mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
block: move bio_alloc_pages() to bcache
bcache is the only user of bio_alloc_pages(), so move this function into bcache, and avoid it being misused in the future. Also rename it to bch_bio_allo_pages() since it is bcache only. Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c2421edf5f
commit
25d8be77e1
28
block/bio.c
28
block/bio.c
@ -968,34 +968,6 @@ void bio_advance(struct bio *bio, unsigned bytes)
|
||||
}
|
||||
EXPORT_SYMBOL(bio_advance);
|
||||
|
||||
/**
|
||||
* bio_alloc_pages - allocates a single page for each bvec in a bio
|
||||
* @bio: bio to allocate pages for
|
||||
* @gfp_mask: flags for allocation
|
||||
*
|
||||
* Allocates pages up to @bio->bi_vcnt.
|
||||
*
|
||||
* Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
|
||||
* freed.
|
||||
*/
|
||||
int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
int i;
|
||||
struct bio_vec *bv;
|
||||
|
||||
bio_for_each_segment_all(bv, bio, i) {
|
||||
bv->bv_page = alloc_page(gfp_mask);
|
||||
if (!bv->bv_page) {
|
||||
while (--bv >= bio->bi_io_vec)
|
||||
__free_page(bv->bv_page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_alloc_pages);
|
||||
|
||||
/**
|
||||
* bio_copy_data - copy contents of data buffers from one chain of bios to
|
||||
* another
|
||||
|
@ -419,7 +419,7 @@ static void do_btree_node_write(struct btree *b)
|
||||
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
|
||||
bset_sector_offset(&b->keys, i));
|
||||
|
||||
if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
|
||||
if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
|
||||
int j;
|
||||
struct bio_vec *bv;
|
||||
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
|
||||
|
@ -116,7 +116,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
|
||||
return;
|
||||
check->bi_opf = REQ_OP_READ;
|
||||
|
||||
if (bio_alloc_pages(check, GFP_NOIO))
|
||||
if (bch_bio_alloc_pages(check, GFP_NOIO))
|
||||
goto out_put;
|
||||
|
||||
submit_bio_wait(check);
|
||||
|
@ -162,7 +162,7 @@ static void read_moving(struct cache_set *c)
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
bio->bi_end_io = read_moving_endio;
|
||||
|
||||
if (bio_alloc_pages(bio, GFP_KERNEL))
|
||||
if (bch_bio_alloc_pages(bio, GFP_KERNEL))
|
||||
goto err;
|
||||
|
||||
trace_bcache_gc_copy(&w->key);
|
||||
|
@ -841,7 +841,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
|
||||
cache_bio->bi_private = &s->cl;
|
||||
|
||||
bch_bio_map(cache_bio, NULL);
|
||||
if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
|
||||
if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
|
||||
goto out_put;
|
||||
|
||||
if (reada)
|
||||
|
@ -283,6 +283,33 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_bio_alloc_pages - allocates a single page for each bvec in a bio
|
||||
* @bio: bio to allocate pages for
|
||||
* @gfp_mask: flags for allocation
|
||||
*
|
||||
* Allocates pages up to @bio->bi_vcnt.
|
||||
*
|
||||
* Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
|
||||
* freed.
|
||||
*/
|
||||
int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
int i;
|
||||
struct bio_vec *bv;
|
||||
|
||||
bio_for_each_segment_all(bv, bio, i) {
|
||||
bv->bv_page = alloc_page(gfp_mask);
|
||||
if (!bv->bv_page) {
|
||||
while (--bv >= bio->bi_io_vec)
|
||||
__free_page(bv->bv_page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
|
||||
* use permitted, subject to terms of PostgreSQL license; see.)
|
||||
|
@ -558,6 +558,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
|
||||
}
|
||||
|
||||
void bch_bio_map(struct bio *bio, void *base);
|
||||
int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
|
||||
|
||||
static inline sector_t bdev_sectors(struct block_device *bdev)
|
||||
{
|
||||
|
@ -278,7 +278,7 @@ static void read_dirty(struct cached_dev *dc)
|
||||
bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
|
||||
io->bio.bi_end_io = read_dirty_endio;
|
||||
|
||||
if (bio_alloc_pages(&io->bio, GFP_KERNEL))
|
||||
if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
|
||||
goto err_free;
|
||||
|
||||
trace_bcache_writeback(&w->key);
|
||||
|
@ -500,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
|
||||
#endif
|
||||
|
||||
extern void bio_copy_data(struct bio *dst, struct bio *src);
|
||||
extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
|
||||
extern void bio_free_pages(struct bio *bio);
|
||||
|
||||
extern struct bio *bio_copy_user_iov(struct request_queue *,
|
||||
|
Loading…
Reference in New Issue
Block a user