Merge branch 'chandan/prep-subpage-blocksize' into for-chris-4.6

# Conflicts:
#	fs/btrfs/file.c
This commit is contained in:
David Sterba
2016-02-26 15:38:28 +01:00
7 changed files with 321 additions and 165 deletions

View File

@@ -263,7 +263,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
data_len = compressed_size;
if (start > 0 ||
actual_end > PAGE_CACHE_SIZE ||
actual_end > root->sectorsize ||
data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
(!compressed_size &&
(actual_end & (root->sectorsize - 1)) == 0) ||
@@ -2002,7 +2002,8 @@ again:
if (PagePrivate2(page))
goto out;
ordered = btrfs_lookup_ordered_extent(inode, page_start);
ordered = btrfs_lookup_ordered_range(inode, page_start,
PAGE_CACHE_SIZE);
if (ordered) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
page_end, &cached_state, GFP_NOFS);
@@ -4248,7 +4249,8 @@ static int truncate_inline_extent(struct inode *inode,
* read the extent item from disk (data not in the page cache).
*/
btrfs_release_path(path);
return btrfs_truncate_page(inode, offset, page_end - offset, 0);
return btrfs_truncate_block(inode, offset, page_end - offset,
0);
}
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
@@ -4601,17 +4603,17 @@ error:
}
/*
* btrfs_truncate_page - read, zero a chunk and write a page
* btrfs_truncate_block - read, zero a chunk and write a block
* @inode - inode that we're zeroing
* @from - the offset to start zeroing
* @len - the length to zero, 0 to zero the entire range respective to the
* offset
* @front - zero up to the offset instead of from the offset on
*
* This will find the page for the "from" offset and cow the page and zero the
* This will find the block for the "from" offset and cow the block and zero the
* part we want to zero. This is used with truncate and hole punching.
*/
int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
int front)
{
struct address_space *mapping = inode->i_mapping;
@@ -4622,18 +4624,19 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
char *kaddr;
u32 blocksize = root->sectorsize;
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned offset = from & (blocksize - 1);
struct page *page;
gfp_t mask = btrfs_alloc_write_mask(mapping);
int ret = 0;
u64 page_start;
u64 page_end;
u64 block_start;
u64 block_end;
if ((offset & (blocksize - 1)) == 0 &&
(!len || ((len & (blocksize - 1)) == 0)))
goto out;
ret = btrfs_delalloc_reserve_space(inode,
round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE);
round_down(from, blocksize), blocksize);
if (ret)
goto out;
@@ -4641,14 +4644,14 @@ again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
btrfs_delalloc_release_space(inode,
round_down(from, PAGE_CACHE_SIZE),
PAGE_CACHE_SIZE);
round_down(from, blocksize),
blocksize);
ret = -ENOMEM;
goto out;
}
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
block_start = round_down(from, blocksize);
block_end = block_start + blocksize - 1;
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
@@ -4665,12 +4668,12 @@ again:
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, &cached_state);
lock_extent_bits(io_tree, block_start, block_end, &cached_state);
set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
unlock_extent_cached(io_tree, block_start, block_end,
&cached_state, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
@@ -4679,39 +4682,41 @@ again:
goto again;
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
unlock_extent_cached(io_tree, block_start, block_end,
&cached_state, GFP_NOFS);
goto out_unlock;
}
if (offset != PAGE_CACHE_SIZE) {
if (offset != blocksize) {
if (!len)
len = PAGE_CACHE_SIZE - offset;
len = blocksize - offset;
kaddr = kmap(page);
if (front)
memset(kaddr, 0, offset);
memset(kaddr + (block_start - page_offset(page)),
0, offset);
else
memset(kaddr + offset, 0, len);
memset(kaddr + (block_start - page_offset(page)) + offset,
0, len);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
GFP_NOFS);
out_unlock:
if (ret)
btrfs_delalloc_release_space(inode, page_start,
PAGE_CACHE_SIZE);
btrfs_delalloc_release_space(inode, block_start,
blocksize);
unlock_page(page);
page_cache_release(page);
out:
@@ -4782,11 +4787,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
int err = 0;
/*
* If our size started in the middle of a page we need to zero out the
* rest of the page before we expand the i_size, otherwise we could
* If our size started in the middle of a block we need to zero out the
* rest of the block before we expand the i_size, otherwise we could
* expose stale data.
*/
err = btrfs_truncate_page(inode, oldsize, 0, 0);
err = btrfs_truncate_block(inode, oldsize, 0, 0);
if (err)
return err;
@@ -4895,7 +4900,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
}
if (newsize > oldsize) {
truncate_pagecache(inode, newsize);
/*
* Don't do an expanding truncate while snapshoting is ongoing.
* This is to ensure the snapshot captures a fully consistent
@@ -4918,6 +4922,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
i_size_write(inode, newsize);
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
pagecache_isize_extended(inode, oldsize, newsize);
ret = btrfs_update_inode(trans, root, inode);
btrfs_end_write_no_snapshoting(root);
btrfs_end_transaction(trans, root);
@@ -7752,9 +7757,9 @@ static int btrfs_check_dio_repairable(struct inode *inode,
}
static int dio_read_error(struct inode *inode, struct bio *failed_bio,
struct page *page, u64 start, u64 end,
int failed_mirror, bio_end_io_t *repair_endio,
void *repair_arg)
struct page *page, unsigned int pgoff,
u64 start, u64 end, int failed_mirror,
bio_end_io_t *repair_endio, void *repair_arg)
{
struct io_failure_record *failrec;
struct bio *bio;
@@ -7775,7 +7780,9 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
return -EIO;
}
if (failed_bio->bi_vcnt > 1)
if ((failed_bio->bi_vcnt > 1)
|| (failed_bio->bi_io_vec->bv_len
> BTRFS_I(inode)->root->sectorsize))
read_mode = READ_SYNC | REQ_FAILFAST_DEV;
else
read_mode = READ_SYNC;
@@ -7783,7 +7790,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
isector = start - btrfs_io_bio(failed_bio)->logical;
isector >>= inode->i_sb->s_blocksize_bits;
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
0, isector, repair_endio, repair_arg);
pgoff, isector, repair_endio, repair_arg);
if (!bio) {
free_io_failure(inode, failrec);
return -EIO;
@@ -7813,12 +7820,17 @@ struct btrfs_retry_complete {
static void btrfs_retry_endio_nocsum(struct bio *bio)
{
struct btrfs_retry_complete *done = bio->bi_private;
struct inode *inode;
struct bio_vec *bvec;
int i;
if (bio->bi_error)
goto end;
ASSERT(bio->bi_vcnt == 1);
inode = bio->bi_io_vec->bv_page->mapping->host;
ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
done->uptodate = 1;
bio_for_each_segment_all(bvec, bio, i)
clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
@@ -7830,25 +7842,35 @@ end:
static int __btrfs_correct_data_nocsum(struct inode *inode,
struct btrfs_io_bio *io_bio)
{
struct btrfs_fs_info *fs_info;
struct bio_vec *bvec;
struct btrfs_retry_complete done;
u64 start;
unsigned int pgoff;
u32 sectorsize;
int nr_sectors;
int i;
int ret;
fs_info = BTRFS_I(inode)->root->fs_info;
sectorsize = BTRFS_I(inode)->root->sectorsize;
start = io_bio->logical;
done.inode = inode;
bio_for_each_segment_all(bvec, &io_bio->bio, i) {
try_again:
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
pgoff = bvec->bv_offset;
next_block_or_try_again:
done.uptodate = 0;
done.start = start;
init_completion(&done.done);
ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
start + bvec->bv_len - 1,
io_bio->mirror_num,
btrfs_retry_endio_nocsum, &done);
ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
pgoff, start, start + sectorsize - 1,
io_bio->mirror_num,
btrfs_retry_endio_nocsum, &done);
if (ret)
return ret;
@@ -7856,10 +7878,15 @@ try_again:
if (!done.uptodate) {
/* We might have another mirror, so try again */
goto try_again;
goto next_block_or_try_again;
}
start += bvec->bv_len;
start += sectorsize;
if (nr_sectors--) {
pgoff += sectorsize;
goto next_block_or_try_again;
}
}
return 0;
@@ -7869,7 +7896,9 @@ static void btrfs_retry_endio(struct bio *bio)
{
struct btrfs_retry_complete *done = bio->bi_private;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct inode *inode;
struct bio_vec *bvec;
u64 start;
int uptodate;
int ret;
int i;
@@ -7878,13 +7907,20 @@ static void btrfs_retry_endio(struct bio *bio)
goto end;
uptodate = 1;
start = done->start;
ASSERT(bio->bi_vcnt == 1);
inode = bio->bi_io_vec->bv_page->mapping->host;
ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
bio_for_each_segment_all(bvec, bio, i) {
ret = __readpage_endio_check(done->inode, io_bio, i,
bvec->bv_page, 0,
done->start, bvec->bv_len);
bvec->bv_page, bvec->bv_offset,
done->start, bvec->bv_len);
if (!ret)
clean_io_failure(done->inode, done->start,
bvec->bv_page, 0);
bvec->bv_page, bvec->bv_offset);
else
uptodate = 0;
}
@@ -7898,20 +7934,34 @@ end:
static int __btrfs_subio_endio_read(struct inode *inode,
struct btrfs_io_bio *io_bio, int err)
{
struct btrfs_fs_info *fs_info;
struct bio_vec *bvec;
struct btrfs_retry_complete done;
u64 start;
u64 offset = 0;
u32 sectorsize;
int nr_sectors;
unsigned int pgoff;
int csum_pos;
int i;
int ret;
fs_info = BTRFS_I(inode)->root->fs_info;
sectorsize = BTRFS_I(inode)->root->sectorsize;
err = 0;
start = io_bio->logical;
done.inode = inode;
bio_for_each_segment_all(bvec, &io_bio->bio, i) {
ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
0, start, bvec->bv_len);
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
pgoff = bvec->bv_offset;
next_block:
csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
ret = __readpage_endio_check(inode, io_bio, csum_pos,
bvec->bv_page, pgoff, start,
sectorsize);
if (likely(!ret))
goto next;
try_again:
@@ -7919,10 +7969,10 @@ try_again:
done.start = start;
init_completion(&done.done);
ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
start + bvec->bv_len - 1,
io_bio->mirror_num,
btrfs_retry_endio, &done);
ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
pgoff, start, start + sectorsize - 1,
io_bio->mirror_num,
btrfs_retry_endio, &done);
if (ret) {
err = ret;
goto next;
@@ -7935,8 +7985,15 @@ try_again:
goto try_again;
}
next:
offset += bvec->bv_len;
start += bvec->bv_len;
offset += sectorsize;
start += sectorsize;
ASSERT(nr_sectors);
if (--nr_sectors) {
pgoff += sectorsize;
goto next_block;
}
}
return err;
@@ -8188,9 +8245,11 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
u64 file_offset = dip->logical_offset;
u64 submit_len = 0;
u64 map_length;
int nr_pages = 0;
int ret;
u32 blocksize = root->sectorsize;
int async_submit = 0;
int nr_sectors;
int ret;
int i;
map_length = orig_bio->bi_iter.bi_size;
ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
@@ -8220,9 +8279,12 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
atomic_inc(&dip->pending_bios);
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
bvec->bv_offset) < bvec->bv_len) {
nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len);
i = 0;
next_block:
if (unlikely(map_length < submit_len + blocksize ||
bio_add_page(bio, bvec->bv_page, blocksize,
bvec->bv_offset + (i * blocksize)) < blocksize)) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
@@ -8243,7 +8305,6 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
file_offset += submit_len;
submit_len = 0;
nr_pages = 0;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
start_sector, GFP_NOFS);
@@ -8261,9 +8322,14 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
bio_put(bio);
goto out_err;
}
goto next_block;
} else {
submit_len += bvec->bv_len;
nr_pages++;
submit_len += blocksize;
if (--nr_sectors) {
i++;
goto next_block;
}
bvec++;
}
}
@@ -8628,6 +8694,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
u64 start;
u64 end;
int inode_evicting = inode->i_state & I_FREEING;
/*
@@ -8647,14 +8715,18 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
if (!inode_evicting)
lock_extent_bits(tree, page_start, page_end, &cached_state);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
again:
start = page_start;
ordered = btrfs_lookup_ordered_range(inode, start,
page_end - start + 1);
if (ordered) {
end = min(page_end, ordered->file_offset + ordered->len - 1);
/*
* IO on this page will never be started, so we need
* to account for any ordered extents now
*/
if (!inode_evicting)
clear_extent_bit(tree, page_start, page_end,
clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 1, 0, &cached_state,
@@ -8671,22 +8743,26 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
spin_lock_irq(&tree->lock);
set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
new_len = page_start - ordered->file_offset;
new_len = start - ordered->file_offset;
if (new_len < ordered->truncated_len)
ordered->truncated_len = new_len;
spin_unlock_irq(&tree->lock);
if (btrfs_dec_test_ordered_pending(inode, &ordered,
page_start,
PAGE_CACHE_SIZE, 1))
start,
end - start + 1, 1))
btrfs_finish_ordered_io(ordered);
}
btrfs_put_ordered_extent(ordered);
if (!inode_evicting) {
cached_state = NULL;
lock_extent_bits(tree, page_start, page_end,
lock_extent_bits(tree, start, end,
&cached_state);
}
start = end + 1;
if (start < page_end)
goto again;
}
/*
@@ -8747,15 +8823,28 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
loff_t size;
int ret;
int reserved = 0;
u64 reserved_space;
u64 page_start;
u64 page_end;
u64 end;
reserved_space = PAGE_CACHE_SIZE;
sb_start_pagefault(inode->i_sb);
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
end = page_end;
/*
* Reserving delalloc space after obtaining the page lock can lead to
* deadlock. For example, if a dirty page is locked by this function
* and the call to btrfs_delalloc_reserve_space() ends up triggering
* dirty page write out, then the btrfs_writepage() function could
* end up waiting indefinitely to get a lock on the page currently
* being processed by btrfs_page_mkwrite() function.
*/
ret = btrfs_delalloc_reserve_space(inode, page_start,
PAGE_CACHE_SIZE);
reserved_space);
if (!ret) {
ret = file_update_time(vma->vm_file);
reserved = 1;
@@ -8789,7 +8878,7 @@ again:
* we can't set the delalloc bits if there are pending ordered
* extents. Drop our locks and wait for them to finish
*/
ordered = btrfs_lookup_ordered_extent(inode, page_start);
ordered = btrfs_lookup_ordered_range(inode, page_start, page_end);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
@@ -8799,6 +8888,18 @@ again:
goto again;
}
if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) {
reserved_space = round_up(size - page_start, root->sectorsize);
if (reserved_space < PAGE_CACHE_SIZE) {
end = page_start + reserved_space - 1;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode, page_start,
PAGE_CACHE_SIZE - reserved_space);
}
}
/*
* XXX - page_mkwrite gets called every time the page is dirtied, even
* if it was already dirty, so for space accounting reasons we need to
@@ -8806,12 +8907,12 @@ again:
* is probably a better way to do this, but for now keep consistent with
* prepare_pages in the normal write path.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
ret = btrfs_set_extent_delalloc(inode, page_start, end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
@@ -8850,7 +8951,7 @@ out_unlock:
}
unlock_page(page);
out:
btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
btrfs_delalloc_release_space(inode, page_start, reserved_space);
out_noreserve:
sb_end_pagefault(inode->i_sb);
return ret;
@@ -9236,7 +9337,6 @@ static int btrfs_getattr(struct vfsmount *mnt,
generic_fillattr(inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->blksize = PAGE_CACHE_SIZE;
spin_lock(&BTRFS_I(inode)->lock);
delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;