linux/fs/f2fs/data.c
Christoph Hellwig e2e40f2c1e fs: move struct kiocb to fs.h
struct kiocb now is a generic I/O container, so move it to fs.h.
Also do a #include diet for aio.h while we're at it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2015-03-25 20:28:11 -04:00

1252 lines
29 KiB
C

/*
* fs/f2fs/data.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "trace.h"
#include <trace/events/f2fs.h>
static void f2fs_read_end_io(struct bio *bio, int err)
{
struct bio_vec *bvec;
int i;
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (!err) {
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_page(page);
}
bio_put(bio);
}
static void f2fs_write_end_io(struct bio *bio, int err)
{
struct f2fs_sb_info *sbi = bio->bi_private;
struct bio_vec *bvec;
int i;
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (unlikely(err)) {
set_page_dirty(page);
set_bit(AS_EIO, &page->mapping->flags);
f2fs_stop_checkpoint(sbi);
}
end_page_writeback(page);
dec_page_count(sbi, F2FS_WRITEBACK);
}
if (!get_pages(sbi, F2FS_WRITEBACK) &&
!list_empty(&sbi->cp_wait.task_list))
wake_up(&sbi->cp_wait);
bio_put(bio);
}
/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
int npages, bool is_read)
{
struct bio *bio;
/* No failure on bio allocation */
bio = bio_alloc(GFP_NOIO, npages);
bio->bi_bdev = sbi->sb->s_bdev;
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = sbi;
return bio;
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
if (!io->bio)
return;
if (is_read_io(fio->rw))
trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
else
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
submit_bio(fio->rw, io->bio);
io->bio = NULL;
}
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
enum page_type type, int rw)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io;
io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
down_write(&io->io_rwsem);
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
if (test_opt(sbi, NOBARRIER))
io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
else
io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
}
__submit_merged_bio(io);
up_write(&io->io_rwsem);
}
/*
* Fill the locked page with data located in the block address.
* Return unlocked page.
*/
int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_io_info *fio)
{
struct bio *bio;
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(page, fio, 0);
/* Allocate a new bio */
bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw));
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
bio_put(bio);
f2fs_put_page(page, 1);
return -EFAULT;
}
submit_bio(fio->rw, bio);
return 0;
}
void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_io_info *fio)
{
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io;
bool is_read = is_read_io(fio->rw);
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
verify_block_addr(sbi, fio->blk_addr);
down_write(&io->io_rwsem);
if (!is_read)
inc_page_count(sbi, F2FS_WRITEBACK);
if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
io->fio.rw != fio->rw))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
int bio_blocks = MAX_BIO_BLOCKS(sbi);
io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
io->fio = *fio;
}
if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
PAGE_CACHE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
io->last_block_in_bio = fio->blk_addr;
f2fs_trace_ios(page, fio, 0);
up_write(&io->io_rwsem);
trace_f2fs_submit_page_mbio(page, fio);
}
/*
* Lock ordering for the change of data block address:
* ->data_page
* ->node_page
* update block addresses in the node page
*/
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn;
__le32 *addr_array;
struct page *node_page = dn->node_page;
unsigned int ofs_in_node = dn->ofs_in_node;
f2fs_wait_on_page_writeback(node_page, NODE);
rn = F2FS_NODE(node_page);
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
set_page_dirty(node_page);
}
int reserve_new_block(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
return -ENOSPC;
trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
dn->data_blkaddr = NEW_ADDR;
__set_data_blkaddr(dn);
mark_inode_dirty(dn->inode);
sync_inode_page(dn);
return 0;
}
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
bool need_put = dn->inode_page ? false : true;
int err;
err = get_dnode_of_data(dn, index, ALLOC_NODE);
if (err)
return err;
if (dn->data_blkaddr == NULL_ADDR)
err = reserve_new_block(dn);
if (err || need_put)
f2fs_put_dnode(dn);
return err;
}
static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
struct buffer_head *bh_result)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
pgoff_t start_fofs, end_fofs;
block_t start_blkaddr;
if (is_inode_flag_set(fi, FI_NO_EXTENT))
return 0;
read_lock(&fi->ext.ext_lock);
if (fi->ext.len == 0) {
read_unlock(&fi->ext.ext_lock);
return 0;
}
stat_inc_total_hit(inode->i_sb);
start_fofs = fi->ext.fofs;
end_fofs = fi->ext.fofs + fi->ext.len - 1;
start_blkaddr = fi->ext.blk_addr;
if (pgofs >= start_fofs && pgofs <= end_fofs) {
unsigned int blkbits = inode->i_sb->s_blocksize_bits;
size_t count;
set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb,
start_blkaddr + pgofs - start_fofs);
count = end_fofs - pgofs + 1;
if (count < (UINT_MAX >> blkbits))
bh_result->b_size = (count << blkbits);
else
bh_result->b_size = UINT_MAX;
stat_inc_read_hit(inode->i_sb);
read_unlock(&fi->ext.ext_lock);
return 1;
}
read_unlock(&fi->ext.ext_lock);
return 0;
}
void update_extent_cache(struct dnode_of_data *dn)
{
struct f2fs_inode_info *fi = F2FS_I(dn->inode);
pgoff_t fofs, start_fofs, end_fofs;
block_t start_blkaddr, end_blkaddr;
int need_update = true;
f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
/* Update the page address in the parent node */
__set_data_blkaddr(dn);
if (is_inode_flag_set(fi, FI_NO_EXTENT))
return;
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node;
write_lock(&fi->ext.ext_lock);
start_fofs = fi->ext.fofs;
end_fofs = fi->ext.fofs + fi->ext.len - 1;
start_blkaddr = fi->ext.blk_addr;
end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
/* Drop and initialize the matched extent */
if (fi->ext.len == 1 && fofs == start_fofs)
fi->ext.len = 0;
/* Initial extent */
if (fi->ext.len == 0) {
if (dn->data_blkaddr != NULL_ADDR) {
fi->ext.fofs = fofs;
fi->ext.blk_addr = dn->data_blkaddr;
fi->ext.len = 1;
}
goto end_update;
}
/* Front merge */
if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) {
fi->ext.fofs--;
fi->ext.blk_addr--;
fi->ext.len++;
goto end_update;
}
/* Back merge */
if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) {
fi->ext.len++;
goto end_update;
}
/* Split the existing extent */
if (fi->ext.len > 1 &&
fofs >= start_fofs && fofs <= end_fofs) {
if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
fi->ext.len = fofs - start_fofs;
} else {
fi->ext.fofs = fofs + 1;
fi->ext.blk_addr = start_blkaddr +
fofs - start_fofs + 1;
fi->ext.len -= fofs - start_fofs + 1;
}
} else {
need_update = false;
}
/* Finally, if the extent is very fragmented, let's drop the cache. */
if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
fi->ext.len = 0;
set_inode_flag(fi, FI_NO_EXTENT);
need_update = true;
}
end_update:
write_unlock(&fi->ext.ext_lock);
if (need_update)
sync_inode_page(dn);
return;
}
struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
int err;
struct f2fs_io_info fio = {
.type = DATA,
.rw = sync ? READ_SYNC : READA,
};
page = find_get_page(mapping, index);
if (page && PageUptodate(page))
return page;
f2fs_put_page(page, 0);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err)
return ERR_PTR(err);
f2fs_put_dnode(&dn);
if (dn.data_blkaddr == NULL_ADDR)
return ERR_PTR(-ENOENT);
/* By fallocate(), there is no cached page, but with NEW_ADDR */
if (unlikely(dn.data_blkaddr == NEW_ADDR))
return ERR_PTR(-EINVAL);
page = grab_cache_page(mapping, index);
if (!page)
return ERR_PTR(-ENOMEM);
if (PageUptodate(page)) {
unlock_page(page);
return page;
}
fio.blk_addr = dn.data_blkaddr;
err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
if (err)
return ERR_PTR(err);
if (sync) {
wait_on_page_locked(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 0);
return ERR_PTR(-EIO);
}
}
return page;
}
/*
* If it tries to access a hole, return an error.
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
int err;
struct f2fs_io_info fio = {
.type = DATA,
.rw = READ_SYNC,
};
repeat:
page = grab_cache_page(mapping, index);
if (!page)
return ERR_PTR(-ENOMEM);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err) {
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
f2fs_put_dnode(&dn);
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
f2fs_put_page(page, 1);
return ERR_PTR(-ENOENT);
}
if (PageUptodate(page))
return page;
/*
* A new dentry page is allocated but not able to be written, since its
* new inode page couldn't be allocated due to -ENOSPC.
* In such the case, its blkaddr can be remained as NEW_ADDR.
* see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
return page;
}
fio.blk_addr = dn.data_blkaddr;
err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
if (err)
return ERR_PTR(err);
lock_page(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
return page;
}
/*
* Caller ensures that this data page is never allocated.
* A new zero-filled data page is allocated in the page cache.
*
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
* Note that, ipage is set only by make_empty_dir.
*/
struct page *get_new_data_page(struct inode *inode,
struct page *ipage, pgoff_t index, bool new_i_size)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
struct dnode_of_data dn;
int err;
set_new_dnode(&dn, inode, ipage, NULL, 0);
err = f2fs_reserve_block(&dn, index);
if (err)
return ERR_PTR(err);
repeat:
page = grab_cache_page(mapping, index);
if (!page) {
err = -ENOMEM;
goto put_err;
}
if (PageUptodate(page))
return page;
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
} else {
struct f2fs_io_info fio = {
.type = DATA,
.rw = READ_SYNC,
.blk_addr = dn.data_blkaddr,
};
err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
if (err)
goto put_err;
lock_page(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
err = -EIO;
goto put_err;
}
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
}
if (new_i_size &&
i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
/* Only the directory inode sets new_i_size */
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
}
return page;
put_err:
f2fs_put_dnode(&dn);
return ERR_PTR(err);
}
static int __allocate_data_block(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_inode_info *fi = F2FS_I(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
int seg = CURSEG_WARM_DATA;
pgoff_t fofs;
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
return -ENOSPC;
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
seg = CURSEG_DIRECT_IO;
allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
/* direct IO doesn't use extent cache to maximize the performance */
__set_data_blkaddr(dn);
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node;
if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
return 0;
}
static void __allocate_data_blocks(struct inode *inode, loff_t offset,
size_t count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
u64 start = F2FS_BYTES_TO_BLK(offset);
u64 len = F2FS_BYTES_TO_BLK(count);
bool allocated;
u64 end_offset;
while (len) {
f2fs_balance_fs(sbi);
f2fs_lock_op(sbi);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (get_dnode_of_data(&dn, start, ALLOC_NODE))
goto out;
allocated = false;
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
while (dn.ofs_in_node < end_offset && len) {
if (dn.data_blkaddr == NULL_ADDR) {
if (__allocate_data_block(&dn))
goto sync_out;
allocated = true;
}
len--;
start++;
dn.ofs_in_node++;
}
if (allocated)
sync_inode_page(&dn);
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
}
return;
sync_out:
if (allocated)
sync_inode_page(&dn);
f2fs_put_dnode(&dn);
out:
f2fs_unlock_op(sbi);
return;
}
/*
* get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
* If original data blocks are allocated, then give them to blockdev.
* Otherwise,
* a. preallocate requested block addresses
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/
static int __get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create, bool fiemap)
{
unsigned int blkbits = inode->i_sb->s_blocksize_bits;
unsigned maxblocks = bh_result->b_size >> blkbits;
struct dnode_of_data dn;
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
pgoff_t pgofs, end_offset;
int err = 0, ofs = 1;
bool allocated = false;
/* Get the page offset from the block offset(iblock) */
pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
if (check_extent_cache(inode, pgofs, bh_result))
goto out;
if (create)
f2fs_lock_op(F2FS_I_SB(inode));
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, mode);
if (err) {
if (err == -ENOENT)
err = 0;
goto unlock_out;
}
if (dn.data_blkaddr == NEW_ADDR && !fiemap)
goto put_out;
if (dn.data_blkaddr != NULL_ADDR) {
set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
} else if (create) {
err = __allocate_data_block(&dn);
if (err)
goto put_out;
allocated = true;
set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
} else {
goto put_out;
}
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bh_result->b_size = (((size_t)1) << blkbits);
dn.ofs_in_node++;
pgofs++;
get_next:
if (dn.ofs_in_node >= end_offset) {
if (allocated)
sync_inode_page(&dn);
allocated = false;
f2fs_put_dnode(&dn);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, mode);
if (err) {
if (err == -ENOENT)
err = 0;
goto unlock_out;
}
if (dn.data_blkaddr == NEW_ADDR && !fiemap)
goto put_out;
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
}
if (maxblocks > (bh_result->b_size >> blkbits)) {
block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
if (blkaddr == NULL_ADDR && create) {
err = __allocate_data_block(&dn);
if (err)
goto sync_out;
allocated = true;
blkaddr = dn.data_blkaddr;
}
/* Give more consecutive addresses for the readahead */
if (blkaddr == (bh_result->b_blocknr + ofs)) {
ofs++;
dn.ofs_in_node++;
pgofs++;
bh_result->b_size += (((size_t)1) << blkbits);
goto get_next;
}
}
sync_out:
if (allocated)
sync_inode_page(&dn);
put_out:
f2fs_put_dnode(&dn);
unlock_out:
if (create)
f2fs_unlock_op(F2FS_I_SB(inode));
out:
trace_f2fs_get_data_block(inode, iblock, bh_result, err);
return err;
}
static int get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create, false);
}
static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create, true);
}
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
return generic_block_fiemap(inode, fieinfo,
start, len, get_data_block_fiemap);
}
static int f2fs_read_data_page(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
int ret = -EAGAIN;
trace_f2fs_readpage(page, DATA);
/* If the file has inline data, try to read it directly */
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
ret = mpage_readpage(page, get_data_block);
return ret;
}
static int f2fs_read_data_pages(struct file *file,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = file->f_mapping->host;
/* If the file has inline data, skip readpages */
if (f2fs_has_inline_data(inode))
return 0;
return mpage_readpages(mapping, pages, nr_pages, get_data_block);
}
int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
{
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
int err = 0;
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
if (err)
return err;
fio->blk_addr = dn.data_blkaddr;
/* This page is already truncated */
if (fio->blk_addr == NULL_ADDR)
goto out_writepage;
set_page_writeback(page);
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
if (unlikely(fio->blk_addr != NEW_ADDR &&
!is_cold_data(page) &&
need_inplace_update(inode))) {
rewrite_data_page(page, fio);
set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
} else {
write_data_page(page, &dn, fio);
update_extent_cache(&dn);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
}
out_writepage:
f2fs_put_dnode(&dn);
return err;
}
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
>> PAGE_CACHE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
struct f2fs_io_info fio = {
.type = DATA,
.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
};
trace_f2fs_writepage(page, DATA);
if (page->index < end_index)
goto write;
/*
* If the offset is out-of-range of file size,
* this page does not have to be written to disk.
*/
offset = i_size & (PAGE_CACHE_SIZE - 1);
if ((page->index >= end_index + 1) || !offset)
goto out;
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
write:
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (f2fs_is_drop_cache(inode))
goto out;
if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
available_free_memory(sbi, BASE_CHECK))
goto redirty_out;
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
err = do_write_data_page(page, &fio);
goto done;
}
/* we should bypass data pages to proceed the kworkder jobs */
if (unlikely(f2fs_cp_error(sbi))) {
SetPageError(page);
goto out;
}
if (!wbc->for_reclaim)
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0))
goto redirty_out;
err = -EAGAIN;
f2fs_lock_op(sbi);
if (f2fs_has_inline_data(inode))
err = f2fs_write_inline_data(inode, page);
if (err == -EAGAIN)
err = do_write_data_page(page, &fio);
f2fs_unlock_op(sbi);
done:
if (err && err != -ENOENT)
goto redirty_out;
clear_cold_data(page);
out:
inode_dec_dirty_pages(inode);
unlock_page(page);
if (need_balance_fs)
f2fs_balance_fs(sbi);
if (wbc->for_reclaim)
f2fs_submit_merged_bio(sbi, DATA, WRITE);
return 0;
redirty_out:
redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
}
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
struct address_space *mapping = data;
int ret = mapping->a_ops->writepage(page, wbc);
mapping_set_error(mapping, ret);
return ret;
}
static int f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
bool locked = false;
int ret;
long diff;
trace_f2fs_writepages(mapping->host, wbc, DATA);
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
return 0;
if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
diff = nr_pages_to_write(sbi, DATA, wbc);
if (!S_ISDIR(inode->i_mode)) {
mutex_lock(&sbi->writepages);
locked = true;
}
ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
if (locked)
mutex_unlock(&sbi->writepages);
f2fs_submit_merged_bio(sbi, DATA, WRITE);
remove_dirty_dir_inode(inode);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return ret;
skip_write:
wbc->pages_skipped += get_dirty_pages(inode);
return 0;
}
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
if (to > inode->i_size) {
truncate_pagecache(inode, inode->i_size);
truncate_blocks(inode, inode->i_size, true);
}
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page, *ipage;
pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
struct dnode_of_data dn;
int err = 0;
trace_f2fs_write_begin(inode, pos, len, flags);
f2fs_balance_fs(sbi);
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
* lock_page(page #0) -> lock_page(inode_page)
*/
if (index != 0) {
err = f2fs_convert_inline_inode(inode);
if (err)
goto fail;
}
repeat:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
err = -ENOMEM;
goto fail;
}
*pagep = page;
f2fs_lock_op(sbi);
/* check inline_data */
ipage = get_node_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
err = PTR_ERR(ipage);
goto unlock_fail;
}
set_new_dnode(&dn, inode, ipage, ipage, 0);
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA) {
read_inline_data(page, ipage);
set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
sync_inode_page(&dn);
goto put_next;
}
err = f2fs_convert_inline_page(&dn, page);
if (err)
goto put_fail;
}
err = f2fs_reserve_block(&dn, index);
if (err)
goto put_fail;
put_next:
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
return 0;
f2fs_wait_on_page_writeback(page, DATA);
if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
unsigned start = pos & (PAGE_CACHE_SIZE - 1);
unsigned end = start + len;
/* Reading beyond i_size is simple: memset to zero */
zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
goto out;
}
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
} else {
struct f2fs_io_info fio = {
.type = DATA,
.rw = READ_SYNC,
.blk_addr = dn.data_blkaddr,
};
err = f2fs_submit_page_bio(sbi, page, &fio);
if (err)
goto fail;
lock_page(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
err = -EIO;
goto fail;
}
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
}
out:
SetPageUptodate(page);
clear_cold_data(page);
return 0;
put_fail:
f2fs_put_dnode(&dn);
unlock_fail:
f2fs_unlock_op(sbi);
f2fs_put_page(page, 1);
fail:
f2fs_write_failed(mapping, pos + len);
return err;
}
static int f2fs_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = page->mapping->host;
trace_f2fs_write_end(inode, pos, len, copied);
set_page_dirty(page);
if (pos + copied > i_size_read(inode)) {
i_size_write(inode, pos + copied);
mark_inode_dirty(inode);
update_inode_page(inode);
}
f2fs_put_page(page, 1);
return copied;
}
static int check_direct_IO(struct inode *inode, int rw,
struct iov_iter *iter, loff_t offset)
{
unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
if (rw == READ)
return 0;
if (offset & blocksize_mask)
return -EINVAL;
if (iov_iter_alignment(iter) & blocksize_mask)
return -EINVAL;
return 0;
}
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
int err;
/* we don't need to use inline_data strictly */
if (f2fs_has_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
if (check_direct_IO(inode, rw, iter, offset))
return 0;
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
if (rw & WRITE)
__allocate_data_blocks(inode, offset, count);
err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
if (err < 0 && (rw & WRITE))
f2fs_write_failed(mapping, offset + count);
trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
return err;
}
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
(offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
return;
if (PageDirty(page)) {
if (inode->i_ino == F2FS_META_INO(sbi))
dec_page_count(sbi, F2FS_DIRTY_META);
else if (inode->i_ino == F2FS_NODE_INO(sbi))
dec_page_count(sbi, F2FS_DIRTY_NODES);
else
inode_dec_dirty_pages(inode);
}
ClearPagePrivate(page);
}
int f2fs_release_page(struct page *page, gfp_t wait)
{
/* If this is dirty page, keep PagePrivate */
if (PageDirty(page))
return 0;
ClearPagePrivate(page);
return 1;
}
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
trace_f2fs_set_page_dirty(page, DATA);
SetPageUptodate(page);
if (f2fs_is_atomic_file(inode)) {
register_inmem_page(inode, page);
return 1;
}
mark_inode_dirty(inode);
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
return 0;
}
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
/* we don't need to use inline_data strictly */
if (f2fs_has_inline_data(inode)) {
int err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
return generic_block_bmap(mapping, block, get_data_block);
}
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
.writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
.write_begin = f2fs_write_begin,
.write_end = f2fs_write_end,
.set_page_dirty = f2fs_set_data_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
};