fs: kill block_prepare_write

__block_write_begin and block_prepare_write are identical except for slightly
different calling conventions.  Convert all callers to the __block_write_begin
calling conventions and drop block_prepare_write.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Christoph Hellwig 2010-10-06 10:47:23 +02:00 committed by Al Viro
parent 56b0dacfa2
commit ebdec241d5
14 changed files with 39 additions and 73 deletions

View File

@ -1834,9 +1834,11 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
}
EXPORT_SYMBOL(page_zero_new_buffers);
int block_prepare_write(struct page *page, unsigned from, unsigned to,
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
@ -1916,7 +1918,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
}
return err;
}
EXPORT_SYMBOL(block_prepare_write);
EXPORT_SYMBOL(__block_write_begin);
static int __block_commit_write(struct inode *inode, struct page *page,
unsigned from, unsigned to)
@ -1953,15 +1955,6 @@ static int __block_commit_write(struct inode *inode, struct page *page,
return 0;
}
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
unsigned start = pos & (PAGE_CACHE_SIZE - 1);
return block_prepare_write(page, start, start + len, get_block);
}
EXPORT_SYMBOL(__block_write_begin);
/*
* block_write_begin takes care of the basic task of block allocation and
* bringing partial write blocks uptodate first.
@ -2379,7 +2372,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
else
end = PAGE_CACHE_SIZE;
ret = block_prepare_write(page, 0, end, get_block);
ret = __block_write_begin(page, 0, end, get_block);
if (!ret)
ret = block_commit_write(page, 0, end);

View File

@ -1696,7 +1696,7 @@ static int ext3_journalled_writepage(struct page *page,
* doesn't seem much point in redirtying the page here.
*/
ClearPageChecked(page);
ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
ext3_get_block);
if (ret != 0) {
ext3_journal_stop(handle);

View File

@ -1538,10 +1538,10 @@ static int do_journal_get_write_access(handle_t *handle,
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
/*
* __block_prepare_write() could have dirtied some buffers. Clean
* __block_write_begin() could have dirtied some buffers. Clean
* the dirty bit as jbd2_journal_get_write_access() could complain
* otherwise about fs integrity issues. Setting of the dirty bit
* by __block_prepare_write() isn't a real problem here as we clear
* by __block_write_begin() isn't a real problem here as we clear
* the bit before releasing a page lock and thus writeback cannot
* ever write the buffer.
*/
@ -2550,8 +2550,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
if (buffer_delay(bh))
return 0; /* Not sure this could or should happen */
/*
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
* XXX: __block_write_begin() unmaps passed block, is it OK?
*/
ret = ext4_da_reserve_space(inode, iblock);
if (ret)
@ -2583,7 +2582,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
/*
* This function is used as a standard get_block_t calback function
* when there is no desire to allocate any blocks. It is used as a
* callback function for block_prepare_write() and block_write_full_page().
* callback function for block_write_begin() and block_write_full_page().
* These functions should only try to map a single block at a time.
*
* Since this function doesn't do block allocations even if the caller
@ -2743,7 +2742,7 @@ static int ext4_writepage(struct page *page,
* all are mapped and non delay. We don't want to
* do block allocation here.
*/
ret = block_prepare_write(page, 0, len,
ret = __block_write_begin(page, 0, len,
noalloc_get_block_write);
if (!ret) {
page_bufs = page_buffers(page);

View File

@ -618,7 +618,6 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
struct gfs2_alloc *al = NULL;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
unsigned to = from + len;
struct page *page;
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@ -691,7 +690,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
}
prepare_write:
error = block_prepare_write(page, from, to, gfs2_block_map);
error = __block_write_begin(page, from, len, gfs2_block_map);
out:
if (error == 0)
return 0;

View File

@ -1294,7 +1294,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
int error;
if (!page_has_buffers(page)) {
error = block_prepare_write(page, from, to, gfs2_block_map);
error = __block_write_begin(page, from, to - from, gfs2_block_map);
if (unlikely(error))
return error;
@ -1313,7 +1313,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
next += bh->b_size;
if (buffer_mapped(bh)) {
if (end) {
error = block_prepare_write(page, start, end,
error = __block_write_begin(page, start, end - start,
gfs2_block_map);
if (unlikely(error))
return error;
@ -1328,7 +1328,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
} while (next < to);
if (end) {
error = block_prepare_write(page, start, end, gfs2_block_map);
error = __block_write_begin(page, start, end - start, gfs2_block_map);
if (unlikely(error))
return error;
empty_write_end(page, start, end);

View File

@ -165,7 +165,7 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
* ocfs2 never allocates in this function - the only time we
* need to use BH_New is when we're extending i_size on a file
* system which doesn't support holes, in which case BH_New
* allows block_prepare_write() to zero.
* allows __block_write_begin() to zero.
*
* If we see this on a sparse file system, then a truncate has
* raced us and removed the cluster. In this case, we clear
@ -407,21 +407,6 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
return ret;
}
/*
* This is called from ocfs2_write_zero_page() which has handled it's
* own cluster locking and has ensured allocation exists for those
* blocks to be written.
*/
int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
unsigned from, unsigned to)
{
int ret;
ret = block_prepare_write(page, from, to, ocfs2_get_block);
return ret;
}
/* Taken from ext3. We don't necessarily need the full blown
* functionality yet, but IMHO it's better to cut and paste the whole
* thing so we can avoid introducing our own bugs (and easily pick up
@ -732,7 +717,7 @@ static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
}
/*
* Some of this taken from block_prepare_write(). We already have our
* Some of this taken from __block_write_begin(). We already have our
* mapping by now though, and the entire write will be allocating or
* it won't, so not much need to use BH_New.
*

View File

@ -22,9 +22,6 @@
#ifndef OCFS2_AOPS_H
#define OCFS2_AOPS_H
int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
unsigned from, unsigned to);
handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
struct page *page,
unsigned from,

View File

@ -796,13 +796,12 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
block_end = block_start + (1 << inode->i_blkbits);
/*
* block_start is block-aligned. Bump it by one to
* force ocfs2_{prepare,commit}_write() to zero the
* block_start is block-aligned. Bump it by one to force
* __block_write_begin and block_commit_write to zero the
* whole block.
*/
ret = ocfs2_prepare_write_nolock(inode, page,
block_start + 1,
block_start + 1);
ret = __block_write_begin(page, block_start + 1, 0,
ocfs2_get_block);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;

View File

@ -22,8 +22,6 @@
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
int reiserfs_prepare_write(struct file *f, struct page *page,
unsigned from, unsigned to);
void reiserfs_evict_inode(struct inode *inode)
{
@ -165,7 +163,7 @@ inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
** but tail is still sitting in a direct item, and we can't write to
** it. So, look through this page, and check all the mapped buffers
** to make sure they have valid block numbers. Any that don't need
** to be unmapped, so that block_prepare_write will correctly call
** to be unmapped, so that __block_write_begin will correctly call
** reiserfs_get_block to convert the tail into an unformatted node
*/
static inline void fix_tail_page_for_writing(struct page *page)
@ -439,13 +437,13 @@ static int reiserfs_bmap(struct inode *inode, sector_t block,
}
/* special version of get_block that is only used by grab_tail_page right
** now. It is sent to block_prepare_write, and when you try to get a
** now. It is sent to __block_write_begin, and when you try to get a
** block past the end of the file (or a block from a hole) it returns
** -ENOENT instead of a valid buffer. block_prepare_write expects to
** -ENOENT instead of a valid buffer. __block_write_begin expects to
** be able to do i/o on the buffers returned, unless an error value
** is also returned.
**
** So, this allows block_prepare_write to be used for reading a single block
** So, this allows __block_write_begin to be used for reading a single block
** in a page. Where it does not produce a valid page for holes, or past the
** end of the file. This turns out to be exactly what we need for reading
** tails for conversion.
@ -558,11 +556,12 @@ static int convert_tail_for_hole(struct inode *inode,
**
** We must fix the tail page for writing because it might have buffers
** that are mapped, but have a block number of 0. This indicates tail
** data that has been read directly into the page, and block_prepare_write
** won't trigger a get_block in this case.
** data that has been read directly into the page, and
** __block_write_begin won't trigger a get_block in this case.
*/
fix_tail_page_for_writing(tail_page);
retval = reiserfs_prepare_write(NULL, tail_page, tail_start, tail_end);
retval = __reiserfs_write_begin(tail_page, tail_start,
tail_end - tail_start);
if (retval)
goto unlock;
@ -2033,7 +2032,7 @@ static int grab_tail_page(struct inode *inode,
/* start within the page of the last block in the file */
start = (offset / blocksize) * blocksize;
error = block_prepare_write(page, start, offset,
error = __block_write_begin(page, start, offset - start,
reiserfs_get_block_create_0);
if (error)
goto unlock;
@ -2628,8 +2627,7 @@ static int reiserfs_write_begin(struct file *file,
return ret;
}
int reiserfs_prepare_write(struct file *f, struct page *page,
unsigned from, unsigned to)
int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
{
struct inode *inode = page->mapping->host;
int ret;
@ -2650,7 +2648,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
th->t_refcount++;
}
ret = block_prepare_write(page, from, to, reiserfs_get_block);
ret = __block_write_begin(page, from, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info;
/* this gets a little ugly. If reiserfs_get_block returned an

View File

@ -160,8 +160,6 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
int reiserfs_prepare_write(struct file *f, struct page *page,
unsigned from, unsigned to);
/*
** reiserfs_unpack
** Function try to convert tail from direct item into indirect.
@ -200,7 +198,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
}
/* we unpack by finding the page with the tail, and calling
** reiserfs_prepare_write on that page. This will force a
** __reiserfs_write_begin on that page. This will force a
** reiserfs_get_block to unpack the tail for us.
*/
index = inode->i_size >> PAGE_CACHE_SHIFT;
@ -210,7 +208,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
if (!page) {
goto out;
}
retval = reiserfs_prepare_write(NULL, page, write_from, write_from);
retval = __reiserfs_write_begin(page, write_from, 0);
if (retval)
goto out_unlock;

View File

@ -418,8 +418,6 @@ static inline __u32 xattr_hash(const char *msg, int len)
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
int reiserfs_prepare_write(struct file *f, struct page *page,
unsigned from, unsigned to);
static void update_ctime(struct inode *inode)
{
@ -532,8 +530,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
rxh->h_hash = cpu_to_le32(xahash);
}
err = reiserfs_prepare_write(NULL, page, page_offset,
page_offset + chunk + skip);
err = __reiserfs_write_begin(page, page_offset, chunk + skip);
if (!err) {
if (buffer)
memcpy(data + skip, buffer + buffer_pos, chunk);

View File

@ -576,7 +576,7 @@ xfs_max_file_offset(
/* Figure out maximum filesize, on Linux this can depend on
* the filesystem blocksize (on 32 bit platforms).
* __block_prepare_write does this in an [unsigned] long...
* __block_write_begin does this in an [unsigned] long...
* page->index << (PAGE_CACHE_SHIFT - bbits)
* So, for page sized blocks (4K on 32 bit platforms),
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is

View File

@ -212,7 +212,6 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **,
get_block_t *, loff_t *);

View File

@ -2072,6 +2072,8 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len);
/* namei.c */
void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,