mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
fs/buffer: Combine two submit_bh() and ll_rw_block() arguments
Both submit_bh() and ll_rw_block() accept a request operation type and request flags as their first two arguments. Micro-optimize these two functions by combining these first two arguments into a single argument. This patch does not change the behavior of any of the modified code. Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Jan Kara <jack@suse.cz> Acked-by: Song Liu <song@kernel.org> (for the md changes) Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20220714180729.1065367-48-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3ae7286943
commit
1420c4a549
@ -302,7 +302,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
|
|||||||
atomic_inc(&bitmap->pending_writes);
|
atomic_inc(&bitmap->pending_writes);
|
||||||
set_buffer_locked(bh);
|
set_buffer_locked(bh);
|
||||||
set_buffer_mapped(bh);
|
set_buffer_mapped(bh);
|
||||||
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
|
||||||
bh = bh->b_this_page;
|
bh = bh->b_this_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -394,7 +394,7 @@ static int read_page(struct file *file, unsigned long index,
|
|||||||
atomic_inc(&bitmap->pending_writes);
|
atomic_inc(&bitmap->pending_writes);
|
||||||
set_buffer_locked(bh);
|
set_buffer_locked(bh);
|
||||||
set_buffer_mapped(bh);
|
set_buffer_mapped(bh);
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
}
|
}
|
||||||
blk_cur++;
|
blk_cur++;
|
||||||
bh = bh->b_this_page;
|
bh = bh->b_this_page;
|
||||||
|
53
fs/buffer.c
53
fs/buffer.c
@ -52,8 +52,8 @@
|
|||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
|
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
|
||||||
static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
|
static int submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
|
||||||
struct buffer_head *bh, struct writeback_control *wbc);
|
struct writeback_control *wbc);
|
||||||
|
|
||||||
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
|
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
|
||||||
|
|
||||||
@ -562,7 +562,7 @@ void write_boundary_block(struct block_device *bdev,
|
|||||||
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
|
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
|
||||||
if (bh) {
|
if (bh) {
|
||||||
if (buffer_dirty(bh))
|
if (buffer_dirty(bh))
|
||||||
ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
|
ll_rw_block(REQ_OP_WRITE, 1, &bh);
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1174,7 +1174,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
|
|||||||
} else {
|
} else {
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (buffer_uptodate(bh))
|
if (buffer_uptodate(bh))
|
||||||
return bh;
|
return bh;
|
||||||
@ -1342,7 +1342,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
|
|||||||
{
|
{
|
||||||
struct buffer_head *bh = __getblk(bdev, block, size);
|
struct buffer_head *bh = __getblk(bdev, block, size);
|
||||||
if (likely(bh)) {
|
if (likely(bh)) {
|
||||||
ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
|
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1353,7 +1353,7 @@ void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
|
|||||||
{
|
{
|
||||||
struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
|
struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
|
||||||
if (likely(bh)) {
|
if (likely(bh)) {
|
||||||
ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
|
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1804,7 +1804,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
|
|||||||
do {
|
do {
|
||||||
struct buffer_head *next = bh->b_this_page;
|
struct buffer_head *next = bh->b_this_page;
|
||||||
if (buffer_async_write(bh)) {
|
if (buffer_async_write(bh)) {
|
||||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
|
submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
|
||||||
nr_underway++;
|
nr_underway++;
|
||||||
}
|
}
|
||||||
bh = next;
|
bh = next;
|
||||||
@ -1858,7 +1858,7 @@ recover:
|
|||||||
struct buffer_head *next = bh->b_this_page;
|
struct buffer_head *next = bh->b_this_page;
|
||||||
if (buffer_async_write(bh)) {
|
if (buffer_async_write(bh)) {
|
||||||
clear_buffer_dirty(bh);
|
clear_buffer_dirty(bh);
|
||||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
|
submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
|
||||||
nr_underway++;
|
nr_underway++;
|
||||||
}
|
}
|
||||||
bh = next;
|
bh = next;
|
||||||
@ -2033,7 +2033,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
|
|||||||
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
||||||
!buffer_unwritten(bh) &&
|
!buffer_unwritten(bh) &&
|
||||||
(block_start < from || block_end > to)) {
|
(block_start < from || block_end > to)) {
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||||
*wait_bh++=bh;
|
*wait_bh++=bh;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2334,7 +2334,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
|
|||||||
if (buffer_uptodate(bh))
|
if (buffer_uptodate(bh))
|
||||||
end_buffer_async_read(bh, 1);
|
end_buffer_async_read(bh, 1);
|
||||||
else
|
else
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2665,7 +2665,7 @@ int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
|
|||||||
if (block_start < from || block_end > to) {
|
if (block_start < from || block_end > to) {
|
||||||
lock_buffer(bh);
|
lock_buffer(bh);
|
||||||
bh->b_end_io = end_buffer_read_nobh;
|
bh->b_end_io = end_buffer_read_nobh;
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
nr_reads++;
|
nr_reads++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2915,7 +2915,7 @@ int block_truncate_page(struct address_space *mapping,
|
|||||||
|
|
||||||
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
|
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
/* Uhhuh. Read error. Complain and punt. */
|
/* Uhhuh. Read error. Complain and punt. */
|
||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
@ -2994,9 +2994,10 @@ static void end_bio_bh_io_sync(struct bio *bio)
|
|||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
|
static int submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
|
||||||
struct buffer_head *bh, struct writeback_control *wbc)
|
struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
|
const enum req_op op = opf & REQ_OP_MASK;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
BUG_ON(!buffer_locked(bh));
|
BUG_ON(!buffer_locked(bh));
|
||||||
@ -3012,11 +3013,11 @@ static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
|
|||||||
clear_buffer_write_io_error(bh);
|
clear_buffer_write_io_error(bh);
|
||||||
|
|
||||||
if (buffer_meta(bh))
|
if (buffer_meta(bh))
|
||||||
op_flags |= REQ_META;
|
opf |= REQ_META;
|
||||||
if (buffer_prio(bh))
|
if (buffer_prio(bh))
|
||||||
op_flags |= REQ_PRIO;
|
opf |= REQ_PRIO;
|
||||||
|
|
||||||
bio = bio_alloc(bh->b_bdev, 1, op | op_flags, GFP_NOIO);
|
bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
|
||||||
|
|
||||||
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
|
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
|
||||||
|
|
||||||
@ -3040,9 +3041,9 @@ static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int submit_bh(enum req_op op, blk_opf_t op_flags, struct buffer_head *bh)
|
int submit_bh(blk_opf_t opf, struct buffer_head *bh)
|
||||||
{
|
{
|
||||||
return submit_bh_wbc(op, op_flags, bh, NULL);
|
return submit_bh_wbc(opf, bh, NULL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(submit_bh);
|
EXPORT_SYMBOL(submit_bh);
|
||||||
|
|
||||||
@ -3072,9 +3073,9 @@ EXPORT_SYMBOL(submit_bh);
|
|||||||
* All of the buffers must be for the same device, and must also be a
|
* All of the buffers must be for the same device, and must also be a
|
||||||
* multiple of the current approved size for the device.
|
* multiple of the current approved size for the device.
|
||||||
*/
|
*/
|
||||||
void ll_rw_block(enum req_op op, blk_opf_t op_flags, int nr,
|
void ll_rw_block(const blk_opf_t opf, int nr, struct buffer_head *bhs[])
|
||||||
struct buffer_head *bhs[])
|
|
||||||
{
|
{
|
||||||
|
const enum req_op op = opf & REQ_OP_MASK;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nr; i++) {
|
for (i = 0; i < nr; i++) {
|
||||||
@ -3086,14 +3087,14 @@ void ll_rw_block(enum req_op op, blk_opf_t op_flags, int nr,
|
|||||||
if (test_clear_buffer_dirty(bh)) {
|
if (test_clear_buffer_dirty(bh)) {
|
||||||
bh->b_end_io = end_buffer_write_sync;
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(op, op_flags, bh);
|
submit_bh(opf, bh);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(op, op_flags, bh);
|
submit_bh(opf, bh);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3111,7 +3112,7 @@ void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
|
|||||||
}
|
}
|
||||||
bh->b_end_io = end_buffer_write_sync;
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(REQ_OP_WRITE, op_flags, bh);
|
submit_bh(REQ_OP_WRITE | op_flags, bh);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(write_dirty_buffer);
|
EXPORT_SYMBOL(write_dirty_buffer);
|
||||||
|
|
||||||
@ -3138,7 +3139,7 @@ int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
|
|||||||
|
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
bh->b_end_io = end_buffer_write_sync;
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
|
ret = submit_bh(REQ_OP_WRITE | op_flags, bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (!ret && !buffer_uptodate(bh))
|
if (!ret && !buffer_uptodate(bh))
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
@ -3366,7 +3367,7 @@ int bh_submit_read(struct buffer_head *bh)
|
|||||||
|
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (buffer_uptodate(bh))
|
if (buffer_uptodate(bh))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -668,7 +668,7 @@ static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
|
|||||||
set_buffer_dirty(bh);
|
set_buffer_dirty(bh);
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
bh->b_end_io = ext4_end_buffer_io_sync;
|
bh->b_end_io = ext4_end_buffer_io_sync;
|
||||||
submit_bh(REQ_OP_WRITE, write_flags, bh);
|
submit_bh(REQ_OP_WRITE | write_flags, bh);
|
||||||
EXT4_SB(sb)->s_fc_bh = NULL;
|
EXT4_SB(sb)->s_fc_bh = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
|
|||||||
lock_buffer(bh);
|
lock_buffer(bh);
|
||||||
bh->b_end_io = end_buffer_write_sync;
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
|
submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
sb_end_write(sb);
|
sb_end_write(sb);
|
||||||
if (unlikely(!buffer_uptodate(bh)))
|
if (unlikely(!buffer_uptodate(bh)))
|
||||||
|
@ -171,7 +171,7 @@ static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
|
|||||||
|
|
||||||
bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
|
bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(REQ_OP_READ, op_flags, bh);
|
submit_bh(REQ_OP_READ | op_flags, bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
|
void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
|
||||||
@ -5939,8 +5939,8 @@ static int ext4_commit_super(struct super_block *sb)
|
|||||||
/* Clear potential dirty bit if it was journalled update */
|
/* Clear potential dirty bit if it was journalled update */
|
||||||
clear_buffer_dirty(sbh);
|
clear_buffer_dirty(sbh);
|
||||||
sbh->b_end_io = end_buffer_write_sync;
|
sbh->b_end_io = end_buffer_write_sync;
|
||||||
submit_bh(REQ_OP_WRITE,
|
submit_bh(REQ_OP_WRITE | REQ_SYNC |
|
||||||
REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
|
(test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
|
||||||
wait_on_buffer(sbh);
|
wait_on_buffer(sbh);
|
||||||
if (buffer_write_io_error(sbh)) {
|
if (buffer_write_io_error(sbh)) {
|
||||||
ext4_msg(sb, KERN_ERR, "I/O error while writing "
|
ext4_msg(sb, KERN_ERR, "I/O error while writing "
|
||||||
|
@ -310,9 +310,8 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
|
|||||||
if (trylock_buffer(rabh)) {
|
if (trylock_buffer(rabh)) {
|
||||||
if (!buffer_uptodate(rabh)) {
|
if (!buffer_uptodate(rabh)) {
|
||||||
rabh->b_end_io = end_buffer_read_sync;
|
rabh->b_end_io = end_buffer_read_sync;
|
||||||
submit_bh(REQ_OP_READ,
|
submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META |
|
||||||
REQ_RAHEAD | REQ_META | REQ_PRIO,
|
REQ_PRIO, rabh);
|
||||||
rabh);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
unlock_buffer(rabh);
|
unlock_buffer(rabh);
|
||||||
|
@ -1508,9 +1508,8 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
submit_bh(REQ_OP_READ,
|
submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META |
|
||||||
REQ_RAHEAD | REQ_META | REQ_PRIO,
|
REQ_PRIO, bh);
|
||||||
bh);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
|
@ -75,7 +75,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
|
|||||||
do {
|
do {
|
||||||
struct buffer_head *next = bh->b_this_page;
|
struct buffer_head *next = bh->b_this_page;
|
||||||
if (buffer_async_write(bh)) {
|
if (buffer_async_write(bh)) {
|
||||||
submit_bh(REQ_OP_WRITE, write_flags, bh);
|
submit_bh(REQ_OP_WRITE | write_flags, bh);
|
||||||
nr_underway++;
|
nr_underway++;
|
||||||
}
|
}
|
||||||
bh = next;
|
bh = next;
|
||||||
@ -527,7 +527,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
|||||||
if (buffer_uptodate(first_bh))
|
if (buffer_uptodate(first_bh))
|
||||||
goto out;
|
goto out;
|
||||||
if (!buffer_locked(first_bh))
|
if (!buffer_locked(first_bh))
|
||||||
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh);
|
ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &first_bh);
|
||||||
|
|
||||||
dblock++;
|
dblock++;
|
||||||
extlen--;
|
extlen--;
|
||||||
@ -536,9 +536,8 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
|||||||
bh = gfs2_getbuf(gl, dblock, CREATE);
|
bh = gfs2_getbuf(gl, dblock, CREATE);
|
||||||
|
|
||||||
if (!buffer_uptodate(bh) && !buffer_locked(bh))
|
if (!buffer_uptodate(bh) && !buffer_locked(bh))
|
||||||
ll_rw_block(REQ_OP_READ,
|
ll_rw_block(REQ_OP_READ | REQ_RAHEAD | REQ_META |
|
||||||
REQ_RAHEAD | REQ_META | REQ_PRIO,
|
REQ_PRIO, 1, &bh);
|
||||||
1, &bh);
|
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
dblock++;
|
dblock++;
|
||||||
extlen--;
|
extlen--;
|
||||||
|
@ -746,7 +746,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
|
|||||||
if (PageUptodate(page))
|
if (PageUptodate(page))
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
|
ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
goto unlock_out;
|
goto unlock_out;
|
||||||
|
@ -82,7 +82,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
|
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
|
||||||
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
|
ll_rw_block(REQ_OP_READ, haveblocks, bhs);
|
||||||
|
|
||||||
curbh = 0;
|
curbh = 0;
|
||||||
curpage = 0;
|
curpage = 0;
|
||||||
|
@ -155,10 +155,10 @@ static int journal_submit_commit_record(journal_t *journal,
|
|||||||
|
|
||||||
if (journal->j_flags & JBD2_BARRIER &&
|
if (journal->j_flags & JBD2_BARRIER &&
|
||||||
!jbd2_has_feature_async_commit(journal))
|
!jbd2_has_feature_async_commit(journal))
|
||||||
ret = submit_bh(REQ_OP_WRITE,
|
ret = submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH |
|
||||||
REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
|
REQ_FUA, bh);
|
||||||
else
|
else
|
||||||
ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
ret = submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
|
||||||
|
|
||||||
*cbh = bh;
|
*cbh = bh;
|
||||||
return ret;
|
return ret;
|
||||||
@ -763,7 +763,7 @@ start_journal_io:
|
|||||||
clear_buffer_dirty(bh);
|
clear_buffer_dirty(bh);
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
bh->b_end_io = journal_end_buffer_io_sync;
|
bh->b_end_io = journal_end_buffer_io_sync;
|
||||||
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
|
||||||
}
|
}
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
|
@ -1638,7 +1638,7 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
|
|||||||
sb->s_checksum = jbd2_superblock_csum(journal, sb);
|
sb->s_checksum = jbd2_superblock_csum(journal, sb);
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
bh->b_end_io = end_buffer_write_sync;
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
ret = submit_bh(REQ_OP_WRITE, write_flags, bh);
|
ret = submit_bh(REQ_OP_WRITE | write_flags, bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (buffer_write_io_error(bh)) {
|
if (buffer_write_io_error(bh)) {
|
||||||
clear_buffer_write_io_error(bh);
|
clear_buffer_write_io_error(bh);
|
||||||
@ -1900,7 +1900,7 @@ static int journal_get_superblock(journal_t *journal)
|
|||||||
|
|
||||||
J_ASSERT(bh != NULL);
|
J_ASSERT(bh != NULL);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
|
@ -100,7 +100,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
|
|||||||
if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
|
if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
|
||||||
bufs[nbufs++] = bh;
|
bufs[nbufs++] = bh;
|
||||||
if (nbufs == MAXBUF) {
|
if (nbufs == MAXBUF) {
|
||||||
ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
|
ll_rw_block(REQ_OP_READ, nbufs, bufs);
|
||||||
journal_brelse_array(bufs, nbufs);
|
journal_brelse_array(bufs, nbufs);
|
||||||
nbufs = 0;
|
nbufs = 0;
|
||||||
}
|
}
|
||||||
@ -109,7 +109,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (nbufs)
|
if (nbufs)
|
||||||
ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
|
ll_rw_block(REQ_OP_READ, nbufs, bufs);
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
|
@ -122,7 +122,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
|
|||||||
bh->b_blocknr = pblocknr; /* set block address for read */
|
bh->b_blocknr = pblocknr; /* set block address for read */
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(mode, mode_flags, bh);
|
submit_bh(mode | mode_flags, bh);
|
||||||
bh->b_blocknr = blocknr; /* set back to the given block address */
|
bh->b_blocknr = blocknr; /* set back to the given block address */
|
||||||
*submit_ptr = pblocknr;
|
*submit_ptr = pblocknr;
|
||||||
err = 0;
|
err = 0;
|
||||||
|
@ -92,7 +92,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
|
|||||||
bh->b_blocknr = pbn;
|
bh->b_blocknr = pbn;
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
if (vbn)
|
if (vbn)
|
||||||
bh->b_blocknr = vbn;
|
bh->b_blocknr = vbn;
|
||||||
out:
|
out:
|
||||||
|
@ -148,7 +148,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
|
|||||||
|
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(mode, mode_flags, bh);
|
submit_bh(mode | mode_flags, bh);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode);
|
trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode);
|
||||||
|
@ -342,7 +342,7 @@ handle_zblock:
|
|||||||
for (i = 0; i < nr; i++) {
|
for (i = 0; i < nr; i++) {
|
||||||
tbh = arr[i];
|
tbh = arr[i];
|
||||||
if (likely(!buffer_uptodate(tbh)))
|
if (likely(!buffer_uptodate(tbh)))
|
||||||
submit_bh(REQ_OP_READ, 0, tbh);
|
submit_bh(REQ_OP_READ, tbh);
|
||||||
else
|
else
|
||||||
ntfs_end_buffer_async_read(tbh, 1);
|
ntfs_end_buffer_async_read(tbh, 1);
|
||||||
}
|
}
|
||||||
@ -859,7 +859,7 @@ lock_retry_remap:
|
|||||||
do {
|
do {
|
||||||
struct buffer_head *next = bh->b_this_page;
|
struct buffer_head *next = bh->b_this_page;
|
||||||
if (buffer_async_write(bh)) {
|
if (buffer_async_write(bh)) {
|
||||||
submit_bh(REQ_OP_WRITE, 0, bh);
|
submit_bh(REQ_OP_WRITE, bh);
|
||||||
need_end_writeback = false;
|
need_end_writeback = false;
|
||||||
}
|
}
|
||||||
bh = next;
|
bh = next;
|
||||||
@ -1187,7 +1187,7 @@ lock_retry_remap:
|
|||||||
BUG_ON(!buffer_mapped(tbh));
|
BUG_ON(!buffer_mapped(tbh));
|
||||||
get_bh(tbh);
|
get_bh(tbh);
|
||||||
tbh->b_end_io = end_buffer_write_sync;
|
tbh->b_end_io = end_buffer_write_sync;
|
||||||
submit_bh(REQ_OP_WRITE, 0, tbh);
|
submit_bh(REQ_OP_WRITE, tbh);
|
||||||
}
|
}
|
||||||
/* Synchronize the mft mirror now if not @sync. */
|
/* Synchronize the mft mirror now if not @sync. */
|
||||||
if (is_mft && !sync)
|
if (is_mft && !sync)
|
||||||
|
@ -658,7 +658,7 @@ lock_retry_remap:
|
|||||||
}
|
}
|
||||||
get_bh(tbh);
|
get_bh(tbh);
|
||||||
tbh->b_end_io = end_buffer_read_sync;
|
tbh->b_end_io = end_buffer_read_sync;
|
||||||
submit_bh(REQ_OP_READ, 0, tbh);
|
submit_bh(REQ_OP_READ, tbh);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for io completion on all buffer heads. */
|
/* Wait for io completion on all buffer heads. */
|
||||||
|
@ -537,7 +537,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
|
|||||||
lock_buffer(bh);
|
lock_buffer(bh);
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
return submit_bh(REQ_OP_READ, 0, bh);
|
return submit_bh(REQ_OP_READ, bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -807,7 +807,7 @@ map_vcn:
|
|||||||
* completed ignore errors afterwards as we can assume
|
* completed ignore errors afterwards as we can assume
|
||||||
* that if one buffer worked all of them will work.
|
* that if one buffer worked all of them will work.
|
||||||
*/
|
*/
|
||||||
submit_bh(REQ_OP_WRITE, 0, bh);
|
submit_bh(REQ_OP_WRITE, bh);
|
||||||
if (should_wait) {
|
if (should_wait) {
|
||||||
should_wait = false;
|
should_wait = false;
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
|
@ -583,7 +583,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
|
|||||||
clear_buffer_dirty(tbh);
|
clear_buffer_dirty(tbh);
|
||||||
get_bh(tbh);
|
get_bh(tbh);
|
||||||
tbh->b_end_io = end_buffer_write_sync;
|
tbh->b_end_io = end_buffer_write_sync;
|
||||||
submit_bh(REQ_OP_WRITE, 0, tbh);
|
submit_bh(REQ_OP_WRITE, tbh);
|
||||||
}
|
}
|
||||||
/* Wait on i/o completion of buffers. */
|
/* Wait on i/o completion of buffers. */
|
||||||
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
|
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
|
||||||
@ -780,7 +780,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
|
|||||||
clear_buffer_dirty(tbh);
|
clear_buffer_dirty(tbh);
|
||||||
get_bh(tbh);
|
get_bh(tbh);
|
||||||
tbh->b_end_io = end_buffer_write_sync;
|
tbh->b_end_io = end_buffer_write_sync;
|
||||||
submit_bh(REQ_OP_WRITE, 0, tbh);
|
submit_bh(REQ_OP_WRITE, tbh);
|
||||||
}
|
}
|
||||||
/* Synchronize the mft mirror now if not @sync. */
|
/* Synchronize the mft mirror now if not @sync. */
|
||||||
if (!sync && ni->mft_no < vol->mftmirr_size)
|
if (!sync && ni->mft_no < vol->mftmirr_size)
|
||||||
|
@ -242,7 +242,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
|
|||||||
lock_buffer(bh);
|
lock_buffer(bh);
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
|
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
|
@ -629,7 +629,7 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
|
|||||||
bh->b_size = block_size;
|
bh->b_size = block_size;
|
||||||
off = vbo & (PAGE_SIZE - 1);
|
off = vbo & (PAGE_SIZE - 1);
|
||||||
set_bh_page(bh, page, off);
|
set_bh_page(bh, page, off);
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
|
@ -638,7 +638,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
|
|||||||
!buffer_new(bh) &&
|
!buffer_new(bh) &&
|
||||||
ocfs2_should_read_blk(inode, page, block_start) &&
|
ocfs2_should_read_blk(inode, page, block_start) &&
|
||||||
(block_start < from || block_end > to)) {
|
(block_start < from || block_end > to)) {
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||||
*wait_bh++=bh;
|
*wait_bh++=bh;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
|
|||||||
|
|
||||||
get_bh(bh); /* for end_buffer_write_sync() */
|
get_bh(bh); /* for end_buffer_write_sync() */
|
||||||
bh->b_end_io = end_buffer_write_sync;
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
submit_bh(REQ_OP_WRITE, 0, bh);
|
submit_bh(REQ_OP_WRITE, bh);
|
||||||
|
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
|
|||||||
|
|
||||||
get_bh(bh); /* for end_buffer_read_sync() */
|
get_bh(bh); /* for end_buffer_read_sync() */
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
read_failure:
|
read_failure:
|
||||||
@ -328,7 +328,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
|
|||||||
if (validate)
|
if (validate)
|
||||||
set_buffer_needs_validate(bh);
|
set_buffer_needs_validate(bh);
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
bh->b_end_io = end_buffer_read_sync;
|
||||||
submit_bh(REQ_OP_READ, 0, bh);
|
submit_bh(REQ_OP_READ, bh);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -449,7 +449,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
|
|||||||
get_bh(bh); /* for end_buffer_write_sync() */
|
get_bh(bh); /* for end_buffer_write_sync() */
|
||||||
bh->b_end_io = end_buffer_write_sync;
|
bh->b_end_io = end_buffer_write_sync;
|
||||||
ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
|
ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
|
||||||
submit_bh(REQ_OP_WRITE, 0, bh);
|
submit_bh(REQ_OP_WRITE, bh);
|
||||||
|
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
|
|
||||||
|
@ -1785,7 +1785,7 @@ static int ocfs2_get_sector(struct super_block *sb,
|
|||||||
if (!buffer_dirty(*bh))
|
if (!buffer_dirty(*bh))
|
||||||
clear_buffer_uptodate(*bh);
|
clear_buffer_uptodate(*bh);
|
||||||
unlock_buffer(*bh);
|
unlock_buffer(*bh);
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, bh);
|
ll_rw_block(REQ_OP_READ, 1, bh);
|
||||||
wait_on_buffer(*bh);
|
wait_on_buffer(*bh);
|
||||||
if (!buffer_uptodate(*bh)) {
|
if (!buffer_uptodate(*bh)) {
|
||||||
mlog_errno(-EIO);
|
mlog_errno(-EIO);
|
||||||
|
@ -2664,7 +2664,7 @@ static int reiserfs_write_full_page(struct page *page,
|
|||||||
do {
|
do {
|
||||||
struct buffer_head *next = bh->b_this_page;
|
struct buffer_head *next = bh->b_this_page;
|
||||||
if (buffer_async_write(bh)) {
|
if (buffer_async_write(bh)) {
|
||||||
submit_bh(REQ_OP_WRITE, 0, bh);
|
submit_bh(REQ_OP_WRITE, bh);
|
||||||
nr++;
|
nr++;
|
||||||
}
|
}
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
@ -2724,7 +2724,7 @@ fail:
|
|||||||
struct buffer_head *next = bh->b_this_page;
|
struct buffer_head *next = bh->b_this_page;
|
||||||
if (buffer_async_write(bh)) {
|
if (buffer_async_write(bh)) {
|
||||||
clear_buffer_dirty(bh);
|
clear_buffer_dirty(bh);
|
||||||
submit_bh(REQ_OP_WRITE, 0, bh);
|
submit_bh(REQ_OP_WRITE, bh);
|
||||||
nr++;
|
nr++;
|
||||||
}
|
}
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
|
@ -650,7 +650,7 @@ static void submit_logged_buffer(struct buffer_head *bh)
|
|||||||
BUG();
|
BUG();
|
||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
BUG();
|
BUG();
|
||||||
submit_bh(REQ_OP_WRITE, 0, bh);
|
submit_bh(REQ_OP_WRITE, bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void submit_ordered_buffer(struct buffer_head *bh)
|
static void submit_ordered_buffer(struct buffer_head *bh)
|
||||||
@ -660,7 +660,7 @@ static void submit_ordered_buffer(struct buffer_head *bh)
|
|||||||
clear_buffer_dirty(bh);
|
clear_buffer_dirty(bh);
|
||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
BUG();
|
BUG();
|
||||||
submit_bh(REQ_OP_WRITE, 0, bh);
|
submit_bh(REQ_OP_WRITE, bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define CHUNK_SIZE 32
|
#define CHUNK_SIZE 32
|
||||||
@ -868,7 +868,7 @@ loop_next:
|
|||||||
*/
|
*/
|
||||||
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
|
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
|
||||||
spin_unlock(lock);
|
spin_unlock(lock);
|
||||||
ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
|
ll_rw_block(REQ_OP_WRITE, 1, &bh);
|
||||||
spin_lock(lock);
|
spin_lock(lock);
|
||||||
}
|
}
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
@ -1054,7 +1054,7 @@ static int flush_commit_list(struct super_block *s,
|
|||||||
if (tbh) {
|
if (tbh) {
|
||||||
if (buffer_dirty(tbh)) {
|
if (buffer_dirty(tbh)) {
|
||||||
depth = reiserfs_write_unlock_nested(s);
|
depth = reiserfs_write_unlock_nested(s);
|
||||||
ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
|
ll_rw_block(REQ_OP_WRITE, 1, &tbh);
|
||||||
reiserfs_write_lock_nested(s, depth);
|
reiserfs_write_lock_nested(s, depth);
|
||||||
}
|
}
|
||||||
put_bh(tbh) ;
|
put_bh(tbh) ;
|
||||||
@ -2240,7 +2240,7 @@ abort_replay:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* read in the log blocks, memcpy to the corresponding real block */
|
/* read in the log blocks, memcpy to the corresponding real block */
|
||||||
ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
|
ll_rw_block(REQ_OP_READ, get_desc_trans_len(desc), log_blocks);
|
||||||
for (i = 0; i < get_desc_trans_len(desc); i++) {
|
for (i = 0; i < get_desc_trans_len(desc); i++) {
|
||||||
|
|
||||||
wait_on_buffer(log_blocks[i]);
|
wait_on_buffer(log_blocks[i]);
|
||||||
@ -2342,7 +2342,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
|
|||||||
} else
|
} else
|
||||||
bhlist[j++] = bh;
|
bhlist[j++] = bh;
|
||||||
}
|
}
|
||||||
ll_rw_block(REQ_OP_READ, 0, j, bhlist);
|
ll_rw_block(REQ_OP_READ, j, bhlist);
|
||||||
for (i = 1; i < j; i++)
|
for (i = 1; i < j; i++)
|
||||||
brelse(bhlist[i]);
|
brelse(bhlist[i]);
|
||||||
bh = bhlist[0];
|
bh = bhlist[0];
|
||||||
|
@ -579,7 +579,7 @@ static int search_by_key_reada(struct super_block *s,
|
|||||||
if (!buffer_uptodate(bh[j])) {
|
if (!buffer_uptodate(bh[j])) {
|
||||||
if (depth == -1)
|
if (depth == -1)
|
||||||
depth = reiserfs_write_unlock_nested(s);
|
depth = reiserfs_write_unlock_nested(s);
|
||||||
ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, bh + j);
|
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, bh + j);
|
||||||
}
|
}
|
||||||
brelse(bh[j]);
|
brelse(bh[j]);
|
||||||
}
|
}
|
||||||
@ -685,7 +685,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
|
|||||||
if (!buffer_uptodate(bh) && depth == -1)
|
if (!buffer_uptodate(bh) && depth == -1)
|
||||||
depth = reiserfs_write_unlock_nested(sb);
|
depth = reiserfs_write_unlock_nested(sb);
|
||||||
|
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
|
|
||||||
if (depth != -1)
|
if (depth != -1)
|
||||||
|
@ -1702,7 +1702,7 @@ static int read_super_block(struct super_block *s, int offset)
|
|||||||
/* after journal replay, reread all bitmap and super blocks */
|
/* after journal replay, reread all bitmap and super blocks */
|
||||||
static int reread_meta_blocks(struct super_block *s)
|
static int reread_meta_blocks(struct super_block *s)
|
||||||
{
|
{
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s));
|
ll_rw_block(REQ_OP_READ, 1, &SB_BUFFER_WITH_SB(s));
|
||||||
wait_on_buffer(SB_BUFFER_WITH_SB(s));
|
wait_on_buffer(SB_BUFFER_WITH_SB(s));
|
||||||
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
|
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
|
||||||
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
|
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
|
||||||
|
@ -130,7 +130,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
|
|||||||
brelse(tmp);
|
brelse(tmp);
|
||||||
}
|
}
|
||||||
if (num) {
|
if (num) {
|
||||||
ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha);
|
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha);
|
||||||
for (i = 0; i < num; i++)
|
for (i = 0; i < num; i++)
|
||||||
brelse(bha[i]);
|
brelse(bha[i]);
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
|
|||||||
brelse(tmp);
|
brelse(tmp);
|
||||||
}
|
}
|
||||||
if (num) {
|
if (num) {
|
||||||
ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha);
|
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha);
|
||||||
for (i = 0; i < num; i++)
|
for (i = 0; i < num; i++)
|
||||||
brelse(bha[i]);
|
brelse(bha[i]);
|
||||||
}
|
}
|
||||||
|
@ -1214,7 +1214,7 @@ struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
|
|||||||
if (buffer_uptodate(bh))
|
if (buffer_uptodate(bh))
|
||||||
return bh;
|
return bh;
|
||||||
|
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||||
|
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (buffer_uptodate(bh))
|
if (buffer_uptodate(bh))
|
||||||
|
@ -296,7 +296,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
|
|||||||
if (!buffer_mapped(bh))
|
if (!buffer_mapped(bh))
|
||||||
map_bh(bh, inode->i_sb, oldb + pos);
|
map_bh(bh, inode->i_sb, oldb + pos);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
ufs_error(inode->i_sb, __func__,
|
ufs_error(inode->i_sb, __func__,
|
||||||
|
@ -202,11 +202,11 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
|||||||
void free_buffer_head(struct buffer_head * bh);
|
void free_buffer_head(struct buffer_head * bh);
|
||||||
void unlock_buffer(struct buffer_head *bh);
|
void unlock_buffer(struct buffer_head *bh);
|
||||||
void __lock_buffer(struct buffer_head *bh);
|
void __lock_buffer(struct buffer_head *bh);
|
||||||
void ll_rw_block(enum req_op, blk_opf_t, int, struct buffer_head * bh[]);
|
void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]);
|
||||||
int sync_dirty_buffer(struct buffer_head *bh);
|
int sync_dirty_buffer(struct buffer_head *bh);
|
||||||
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
|
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
|
||||||
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
|
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
|
||||||
int submit_bh(enum req_op, blk_opf_t, struct buffer_head *);
|
int submit_bh(blk_opf_t, struct buffer_head *);
|
||||||
void write_boundary_block(struct block_device *bdev,
|
void write_boundary_block(struct block_device *bdev,
|
||||||
sector_t bblock, unsigned blocksize);
|
sector_t bblock, unsigned blocksize);
|
||||||
int bh_uptodate_or_lock(struct buffer_head *bh);
|
int bh_uptodate_or_lock(struct buffer_head *bh);
|
||||||
|
Loading…
Reference in New Issue
Block a user