block: unexport bio_clone_bioset
Now only used by the bounce code, so move it there and mark the function static. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3ed122e68b
commit
c55183c9aa
77
block/bio.c
77
block/bio.c
@ -646,83 +646,6 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bio_clone_fast);
|
EXPORT_SYMBOL(bio_clone_fast);
|
||||||
|
|
||||||
/**
|
|
||||||
* bio_clone_bioset - clone a bio
|
|
||||||
* @bio_src: bio to clone
|
|
||||||
* @gfp_mask: allocation priority
|
|
||||||
* @bs: bio_set to allocate from
|
|
||||||
*
|
|
||||||
* Clone bio. Caller will own the returned bio, but not the actual data it
|
|
||||||
* points to. Reference count of returned bio will be one.
|
|
||||||
*/
|
|
||||||
struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
|
|
||||||
struct bio_set *bs)
|
|
||||||
{
|
|
||||||
struct bvec_iter iter;
|
|
||||||
struct bio_vec bv;
|
|
||||||
struct bio *bio;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Pre immutable biovecs, __bio_clone() used to just do a memcpy from
|
|
||||||
* bio_src->bi_io_vec to bio->bi_io_vec.
|
|
||||||
*
|
|
||||||
* We can't do that anymore, because:
|
|
||||||
*
|
|
||||||
* - The point of cloning the biovec is to produce a bio with a biovec
|
|
||||||
* the caller can modify: bi_idx and bi_bvec_done should be 0.
|
|
||||||
*
|
|
||||||
* - The original bio could've had more than BIO_MAX_PAGES biovecs; if
|
|
||||||
* we tried to clone the whole thing bio_alloc_bioset() would fail.
|
|
||||||
* But the clone should succeed as long as the number of biovecs we
|
|
||||||
* actually need to allocate is fewer than BIO_MAX_PAGES.
|
|
||||||
*
|
|
||||||
* - Lastly, bi_vcnt should not be looked at or relied upon by code
|
|
||||||
* that does not own the bio - reason being drivers don't use it for
|
|
||||||
* iterating over the biovec anymore, so expecting it to be kept up
|
|
||||||
* to date (i.e. for clones that share the parent biovec) is just
|
|
||||||
* asking for trouble and would force extra work on
|
|
||||||
* __bio_clone_fast() anyways.
|
|
||||||
*/
|
|
||||||
|
|
||||||
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
|
|
||||||
if (!bio)
|
|
||||||
return NULL;
|
|
||||||
bio->bi_disk = bio_src->bi_disk;
|
|
||||||
bio->bi_opf = bio_src->bi_opf;
|
|
||||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
|
||||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
|
||||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
|
||||||
|
|
||||||
switch (bio_op(bio)) {
|
|
||||||
case REQ_OP_DISCARD:
|
|
||||||
case REQ_OP_SECURE_ERASE:
|
|
||||||
case REQ_OP_WRITE_ZEROES:
|
|
||||||
break;
|
|
||||||
case REQ_OP_WRITE_SAME:
|
|
||||||
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
bio_for_each_segment(bv, bio_src, iter)
|
|
||||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bio_integrity(bio_src)) {
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = bio_integrity_clone(bio, bio_src, gfp_mask);
|
|
||||||
if (ret < 0) {
|
|
||||||
bio_put(bio);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bio_clone_blkcg_association(bio, bio_src);
|
|
||||||
|
|
||||||
return bio;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(bio_clone_bioset);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_add_pc_page - attempt to add page to bio
|
* bio_add_pc_page - attempt to add page to bio
|
||||||
* @q: the target queue
|
* @q: the target queue
|
||||||
|
@ -195,6 +195,73 @@ static void bounce_end_io_read_isa(struct bio *bio)
|
|||||||
__bounce_end_io_read(bio, &isa_page_pool);
|
__bounce_end_io_read(bio, &isa_page_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
||||||
|
struct bio_set *bs)
|
||||||
|
{
|
||||||
|
struct bvec_iter iter;
|
||||||
|
struct bio_vec bv;
|
||||||
|
struct bio *bio;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pre immutable biovecs, __bio_clone() used to just do a memcpy from
|
||||||
|
* bio_src->bi_io_vec to bio->bi_io_vec.
|
||||||
|
*
|
||||||
|
* We can't do that anymore, because:
|
||||||
|
*
|
||||||
|
* - The point of cloning the biovec is to produce a bio with a biovec
|
||||||
|
* the caller can modify: bi_idx and bi_bvec_done should be 0.
|
||||||
|
*
|
||||||
|
* - The original bio could've had more than BIO_MAX_PAGES biovecs; if
|
||||||
|
* we tried to clone the whole thing bio_alloc_bioset() would fail.
|
||||||
|
* But the clone should succeed as long as the number of biovecs we
|
||||||
|
* actually need to allocate is fewer than BIO_MAX_PAGES.
|
||||||
|
*
|
||||||
|
* - Lastly, bi_vcnt should not be looked at or relied upon by code
|
||||||
|
* that does not own the bio - reason being drivers don't use it for
|
||||||
|
* iterating over the biovec anymore, so expecting it to be kept up
|
||||||
|
* to date (i.e. for clones that share the parent biovec) is just
|
||||||
|
* asking for trouble and would force extra work on
|
||||||
|
* __bio_clone_fast() anyways.
|
||||||
|
*/
|
||||||
|
|
||||||
|
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
|
||||||
|
if (!bio)
|
||||||
|
return NULL;
|
||||||
|
bio->bi_disk = bio_src->bi_disk;
|
||||||
|
bio->bi_opf = bio_src->bi_opf;
|
||||||
|
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||||
|
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||||
|
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||||
|
|
||||||
|
switch (bio_op(bio)) {
|
||||||
|
case REQ_OP_DISCARD:
|
||||||
|
case REQ_OP_SECURE_ERASE:
|
||||||
|
case REQ_OP_WRITE_ZEROES:
|
||||||
|
break;
|
||||||
|
case REQ_OP_WRITE_SAME:
|
||||||
|
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
bio_for_each_segment(bv, bio_src, iter)
|
||||||
|
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bio_integrity(bio_src)) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = bio_integrity_clone(bio, bio_src, gfp_mask);
|
||||||
|
if (ret < 0) {
|
||||||
|
bio_put(bio);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bio_clone_blkcg_association(bio, bio_src);
|
||||||
|
|
||||||
|
return bio;
|
||||||
|
}
|
||||||
|
|
||||||
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||||
mempool_t *pool)
|
mempool_t *pool)
|
||||||
{
|
{
|
||||||
@ -222,7 +289,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||||||
generic_make_request(*bio_orig);
|
generic_make_request(*bio_orig);
|
||||||
*bio_orig = bio;
|
*bio_orig = bio;
|
||||||
}
|
}
|
||||||
bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
|
bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
|
||||||
&bounce_bio_set);
|
&bounce_bio_set);
|
||||||
|
|
||||||
bio_for_each_segment_all(to, bio, i) {
|
bio_for_each_segment_all(to, bio, i) {
|
||||||
|
@ -429,7 +429,6 @@ extern void bio_put(struct bio *);
|
|||||||
|
|
||||||
extern void __bio_clone_fast(struct bio *, struct bio *);
|
extern void __bio_clone_fast(struct bio *, struct bio *);
|
||||||
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
|
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
|
||||||
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
|
|
||||||
|
|
||||||
extern struct bio_set fs_bio_set;
|
extern struct bio_set fs_bio_set;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user