mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
block: pass a block_device and opf to bio_init
Pass the block_device that we plan to use this bio for and the operation to bio_init to optimize the assignment. A NULL block_device can be passed, both for the passthrough case on a raw request_queue and to temporarily avoid refactoring some nasty code. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Link: https://lore.kernel.org/r/20220124091107.642561-19-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
07888c665b
commit
49add4966d
27
block/bio.c
27
block/bio.c
@ -249,12 +249,12 @@ static void bio_free(struct bio *bio)
|
||||
* they must remember to pair any call to bio_init() with bio_uninit()
|
||||
* when IO has completed, or when the bio is released.
|
||||
*/
|
||||
void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
unsigned short max_vecs)
|
||||
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
|
||||
unsigned short max_vecs, unsigned int opf)
|
||||
{
|
||||
bio->bi_next = NULL;
|
||||
bio->bi_bdev = NULL;
|
||||
bio->bi_opf = 0;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_opf = opf;
|
||||
bio->bi_flags = 0;
|
||||
bio->bi_ioprio = 0;
|
||||
bio->bi_write_hint = 0;
|
||||
@ -268,6 +268,8 @@ void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
bio->bi_blkg = NULL;
|
||||
bio->bi_issue.value = 0;
|
||||
if (bdev)
|
||||
bio_associate_blkg(bio);
|
||||
#ifdef CONFIG_BLK_CGROUP_IOCOST
|
||||
bio->bi_iocost_cost = 0;
|
||||
#endif
|
||||
@ -504,17 +506,14 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
|
||||
if (unlikely(!bvl))
|
||||
goto err_free;
|
||||
|
||||
bio_init(bio, bvl, nr_vecs);
|
||||
bio_init(bio, bdev, bvl, nr_vecs, opf);
|
||||
} else if (nr_vecs) {
|
||||
bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
|
||||
bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
|
||||
} else {
|
||||
bio_init(bio, NULL, 0);
|
||||
bio_init(bio, bdev, NULL, 0, opf);
|
||||
}
|
||||
|
||||
bio->bi_pool = bs;
|
||||
if (bdev)
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_opf = opf;
|
||||
return bio;
|
||||
|
||||
err_free:
|
||||
@ -542,7 +541,8 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
|
||||
bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
|
||||
if (unlikely(!bio))
|
||||
return NULL;
|
||||
bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
|
||||
bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs,
|
||||
0);
|
||||
bio->bi_pool = NULL;
|
||||
return bio;
|
||||
}
|
||||
@ -1756,9 +1756,8 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
|
||||
cache->free_list = bio->bi_next;
|
||||
cache->nr--;
|
||||
put_cpu();
|
||||
bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_opf = opf;
|
||||
bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL,
|
||||
nr_vecs, opf);
|
||||
bio->bi_pool = bs;
|
||||
bio_set_flag(bio, BIO_PERCPU_CACHE);
|
||||
return bio;
|
||||
|
@ -460,9 +460,7 @@ int blkdev_issue_flush(struct block_device *bdev)
|
||||
{
|
||||
struct bio bio;
|
||||
|
||||
bio_init(&bio, NULL, 0);
|
||||
bio_set_dev(&bio, bdev);
|
||||
bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
|
||||
return submit_bio_wait(&bio);
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||
|
@ -238,10 +238,7 @@ static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio bio;
|
||||
|
||||
bio_init(&bio, NULL, 0);
|
||||
bio_set_dev(&bio, bdev);
|
||||
bio.bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
|
||||
|
||||
bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
|
||||
return submit_bio_wait(&bio);
|
||||
}
|
||||
|
||||
|
18
block/fops.c
18
block/fops.c
@ -75,8 +75,13 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bio_init(&bio, vecs, nr_pages);
|
||||
bio_set_dev(&bio, bdev);
|
||||
if (iov_iter_rw(iter) == READ) {
|
||||
bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
|
||||
if (iter_is_iovec(iter))
|
||||
should_dirty = true;
|
||||
} else {
|
||||
bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
|
||||
}
|
||||
bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
|
||||
bio.bi_write_hint = iocb->ki_hint;
|
||||
bio.bi_private = current;
|
||||
@ -88,14 +93,9 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
goto out;
|
||||
ret = bio.bi_iter.bi_size;
|
||||
|
||||
if (iov_iter_rw(iter) == READ) {
|
||||
bio.bi_opf = REQ_OP_READ;
|
||||
if (iter_is_iovec(iter))
|
||||
should_dirty = true;
|
||||
} else {
|
||||
bio.bi_opf = dio_bio_write_op(iocb);
|
||||
if (iov_iter_rw(iter) == WRITE)
|
||||
task_io_account_write(ret);
|
||||
}
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT)
|
||||
bio.bi_opf |= REQ_NOWAIT;
|
||||
if (iocb->ki_flags & IOCB_HIPRI)
|
||||
|
@ -4129,15 +4129,13 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
|
||||
|
||||
cbdata.drive = drive;
|
||||
|
||||
bio_init(&bio, &bio_vec, 1);
|
||||
bio_set_dev(&bio, bdev);
|
||||
bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ);
|
||||
bio_add_page(&bio, page, block_size(bdev), 0);
|
||||
|
||||
bio.bi_iter.bi_sector = 0;
|
||||
bio.bi_flags |= (1 << BIO_QUIET);
|
||||
bio.bi_private = &cbdata;
|
||||
bio.bi_end_io = floppy_rb0_cb;
|
||||
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
|
||||
|
||||
init_completion(&cbdata.complete);
|
||||
|
||||
|
@ -743,10 +743,9 @@ static ssize_t writeback_store(struct device *dev,
|
||||
continue;
|
||||
}
|
||||
|
||||
bio_init(&bio, &bio_vec, 1);
|
||||
bio_set_dev(&bio, zram->bdev);
|
||||
bio_init(&bio, zram->bdev, &bio_vec, 1,
|
||||
REQ_OP_WRITE | REQ_SYNC);
|
||||
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
|
||||
bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
|
||||
|
||||
bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
|
||||
bvec.bv_offset);
|
||||
|
@ -26,7 +26,8 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
|
||||
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
|
||||
struct bio *bio = &b->bio;
|
||||
|
||||
bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs,
|
||||
meta_bucket_pages(&c->cache->sb), 0);
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
@ -611,11 +611,9 @@ static void do_journal_discard(struct cache *ca)
|
||||
|
||||
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
|
||||
|
||||
bio_init(bio, bio->bi_inline_vecs, 1);
|
||||
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
|
||||
bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
|
||||
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
|
||||
ca->sb.d[ja->discard_idx]);
|
||||
bio_set_dev(bio, ca->bdev);
|
||||
bio->bi_iter.bi_size = bucket_bytes(ca);
|
||||
bio->bi_end_io = journal_discard_endio;
|
||||
|
||||
|
@ -79,8 +79,8 @@ static void moving_init(struct moving_io *io)
|
||||
{
|
||||
struct bio *bio = &io->bio.bio;
|
||||
|
||||
bio_init(bio, bio->bi_inline_vecs,
|
||||
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs,
|
||||
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
|
||||
bio_get(bio);
|
||||
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
|
||||
|
@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
|
||||
{
|
||||
struct bio *bio = &s->bio.bio;
|
||||
|
||||
bio_init(bio, NULL, 0);
|
||||
bio_init(bio, NULL, NULL, 0, 0);
|
||||
__bio_clone_fast(bio, orig_bio);
|
||||
/*
|
||||
* bi_end_io can be set separately somewhere else, e.g. the
|
||||
|
@ -342,8 +342,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
|
||||
down(&dc->sb_write_mutex);
|
||||
closure_init(cl, parent);
|
||||
|
||||
bio_init(bio, dc->sb_bv, 1);
|
||||
bio_set_dev(bio, dc->bdev);
|
||||
bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
|
||||
bio->bi_end_io = write_bdev_super_endio;
|
||||
bio->bi_private = dc;
|
||||
|
||||
@ -386,8 +385,7 @@ void bcache_write_super(struct cache_set *c)
|
||||
if (ca->sb.version < version)
|
||||
ca->sb.version = version;
|
||||
|
||||
bio_init(bio, ca->sb_bv, 1);
|
||||
bio_set_dev(bio, ca->bdev);
|
||||
bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
|
||||
bio->bi_end_io = write_super_endio;
|
||||
bio->bi_private = ca;
|
||||
|
||||
@ -2239,7 +2237,7 @@ static int cache_alloc(struct cache *ca)
|
||||
__module_get(THIS_MODULE);
|
||||
kobject_init(&ca->kobj, &bch_cache_ktype);
|
||||
|
||||
bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
|
||||
bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
|
||||
|
||||
/*
|
||||
* when ca->sb.njournal_buckets is not zero, journal exists,
|
||||
|
@ -292,8 +292,8 @@ static void dirty_init(struct keybuf_key *w)
|
||||
struct dirty_io *io = w->private;
|
||||
struct bio *bio = &io->bio;
|
||||
|
||||
bio_init(bio, bio->bi_inline_vecs,
|
||||
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs,
|
||||
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
|
||||
if (!io->dc->writeback_percent)
|
||||
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
|
||||
|
@ -1303,9 +1303,8 @@ static int __send_empty_flush(struct clone_info *ci)
|
||||
* need to reference it after submit. It's just used as
|
||||
* the basis for the clone(s).
|
||||
*/
|
||||
bio_init(&flush_bio, NULL, 0);
|
||||
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
|
||||
bio_set_dev(&flush_bio, ci->io->md->disk->part0);
|
||||
bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
|
||||
REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
|
||||
|
||||
ci->bio = &flush_bio;
|
||||
ci->sector_count = 0;
|
||||
|
@ -121,7 +121,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
|
||||
}
|
||||
multipath = conf->multipaths + mp_bh->path;
|
||||
|
||||
bio_init(&mp_bh->bio, NULL, 0);
|
||||
bio_init(&mp_bh->bio, NULL, NULL, 0, 0);
|
||||
__bio_clone_fast(&mp_bh->bio, bio);
|
||||
|
||||
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
|
||||
|
@ -998,13 +998,11 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
|
||||
struct bio bio;
|
||||
struct bio_vec bvec;
|
||||
|
||||
bio_init(&bio, &bvec, 1);
|
||||
|
||||
if (metadata_op && rdev->meta_bdev)
|
||||
bio_set_dev(&bio, rdev->meta_bdev);
|
||||
bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags);
|
||||
else
|
||||
bio_set_dev(&bio, rdev->bdev);
|
||||
bio.bi_opf = op | op_flags;
|
||||
bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags);
|
||||
|
||||
if (metadata_op)
|
||||
bio.bi_iter.bi_sector = sector + rdev->sb_start;
|
||||
else if (rdev->mddev->reshape_position != MaxSector &&
|
||||
|
@ -3108,7 +3108,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
||||
INIT_LIST_HEAD(&log->io_end_ios);
|
||||
INIT_LIST_HEAD(&log->flushing_ios);
|
||||
INIT_LIST_HEAD(&log->finished_ios);
|
||||
bio_init(&log->flush_bio, NULL, 0);
|
||||
bio_init(&log->flush_bio, NULL, NULL, 0, 0);
|
||||
|
||||
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
|
||||
if (!log->io_kc)
|
||||
|
@ -250,7 +250,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
|
||||
INIT_LIST_HEAD(&io->stripe_list);
|
||||
atomic_set(&io->pending_stripes, 0);
|
||||
atomic_set(&io->pending_flushes, 0);
|
||||
bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
|
||||
bio_init(&io->bio, NULL, io->biovec, PPL_IO_INLINE_BVECS, 0);
|
||||
|
||||
pplhdr = page_address(io->header_page);
|
||||
clear_page(pplhdr);
|
||||
|
@ -2310,8 +2310,8 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
|
||||
for (i = 0; i < disks; i++) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
|
||||
bio_init(&dev->req, &dev->vec, 1);
|
||||
bio_init(&dev->rreq, &dev->rvec, 1);
|
||||
bio_init(&dev->req, NULL, &dev->vec, 1, 0);
|
||||
bio_init(&dev->rreq, NULL, &dev->rvec, 1, 0);
|
||||
}
|
||||
|
||||
if (raid5_has_ppl(conf)) {
|
||||
|
@ -267,9 +267,8 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
|
||||
|
||||
if (nvmet_use_inline_bvec(req)) {
|
||||
bio = &req->b.inline_bio;
|
||||
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
|
||||
bio_set_dev(bio, req->ns->bdev);
|
||||
bio->bi_opf = op;
|
||||
bio_init(bio, req->ns->bdev, req->inline_bvec,
|
||||
ARRAY_SIZE(req->inline_bvec), op);
|
||||
} else {
|
||||
bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
|
||||
GFP_KERNEL);
|
||||
@ -328,11 +327,10 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
|
||||
if (!nvmet_check_transfer_len(req, 0))
|
||||
return;
|
||||
|
||||
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
|
||||
bio_set_dev(bio, req->ns->bdev);
|
||||
bio_init(bio, req->ns->bdev, req->inline_bvec,
|
||||
ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
|
||||
bio->bi_private = req;
|
||||
bio->bi_end_io = nvmet_bio_done;
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
@ -206,8 +206,8 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
|
||||
|
||||
if (nvmet_use_inline_bvec(req)) {
|
||||
bio = &req->p.inline_bio;
|
||||
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
|
||||
bio->bi_opf = req_op(rq);
|
||||
bio_init(bio, NULL, req->inline_bvec,
|
||||
ARRAY_SIZE(req->inline_bvec), req_op(rq));
|
||||
} else {
|
||||
bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
|
||||
GFP_KERNEL);
|
||||
|
@ -552,8 +552,8 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
|
||||
|
||||
if (nvmet_use_inline_bvec(req)) {
|
||||
bio = &req->z.inline_bio;
|
||||
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
|
||||
bio->bi_opf = op;
|
||||
bio_init(bio, req->ns->bdev, req->inline_bvec,
|
||||
ARRAY_SIZE(req->inline_bvec), op);
|
||||
} else {
|
||||
bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
|
||||
}
|
||||
|
@ -549,10 +549,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
|
||||
struct bio_vec bvec;
|
||||
struct bio bio;
|
||||
|
||||
bio_init(&bio, &bvec, 1);
|
||||
bio.bi_opf = REQ_OP_READ;
|
||||
bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
|
||||
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
|
||||
bio_set_dev(&bio, iomap->bdev);
|
||||
bio_add_folio(&bio, folio, plen, poff);
|
||||
return submit_bio_wait(&bio);
|
||||
}
|
||||
|
@ -36,9 +36,7 @@ xfs_flush_bdev_async(
|
||||
return;
|
||||
}
|
||||
|
||||
bio_init(bio, NULL, 0);
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
|
||||
bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
|
||||
bio->bi_private = done;
|
||||
bio->bi_end_io = xfs_flush_bdev_async_endio;
|
||||
|
||||
|
@ -1883,19 +1883,19 @@ xlog_write_iclog(
|
||||
return;
|
||||
}
|
||||
|
||||
bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
|
||||
bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
|
||||
iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
|
||||
iclog->ic_bio.bi_end_io = xlog_bio_end_io;
|
||||
iclog->ic_bio.bi_private = iclog;
|
||||
|
||||
/*
|
||||
* We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
|
||||
* IOs coming immediately after this one. This prevents the block layer
|
||||
* writeback throttle from throttling log writes behind background
|
||||
* metadata writeback and causing priority inversions.
|
||||
*/
|
||||
iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
|
||||
bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
|
||||
howmany(count, PAGE_SIZE),
|
||||
REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
|
||||
iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
|
||||
iclog->ic_bio.bi_end_io = xlog_bio_end_io;
|
||||
iclog->ic_bio.bi_private = iclog;
|
||||
|
||||
if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
|
||||
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
|
||||
/*
|
||||
|
@ -1540,10 +1540,8 @@ static int zonefs_read_super(struct super_block *sb)
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
bio_init(&bio, &bio_vec, 1);
|
||||
bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ);
|
||||
bio.bi_iter.bi_sector = 0;
|
||||
bio.bi_opf = REQ_OP_READ;
|
||||
bio_set_dev(&bio, sb->s_bdev);
|
||||
bio_add_page(&bio, page, PAGE_SIZE, 0);
|
||||
|
||||
ret = submit_bio_wait(&bio);
|
||||
|
@ -456,8 +456,8 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
|
||||
struct request_queue;
|
||||
|
||||
extern int submit_bio_wait(struct bio *bio);
|
||||
extern void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
unsigned short max_vecs);
|
||||
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
|
||||
unsigned short max_vecs, unsigned int opf);
|
||||
extern void bio_uninit(struct bio *);
|
||||
extern void bio_reset(struct bio *);
|
||||
void bio_chain(struct bio *, struct bio *);
|
||||
|
Loading…
Reference in New Issue
Block a user