mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
btrfs: factor out allocating an array of pages
Several functions currently populate an array of page pointers one allocated page at a time. Factor out the common code so as to allow improvements to all of the sites at once. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
0d031dc4aa
commit
dd137dd1f2
@ -1552,11 +1552,9 @@ static int btrfsic_read_block(struct btrfsic_state *state,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
block_ctx->datav = block_ctx->mem_to_free;
|
block_ctx->datav = block_ctx->mem_to_free;
|
||||||
block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
|
block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
|
||||||
for (i = 0; i < num_pages; i++) {
|
ret = btrfs_alloc_page_array(num_pages, block_ctx->pagev);
|
||||||
block_ctx->pagev[i] = alloc_page(GFP_NOFS);
|
if (ret)
|
||||||
if (!block_ctx->pagev[i])
|
return ret;
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_bytenr = block_ctx->dev_bytenr;
|
dev_bytenr = block_ctx->dev_bytenr;
|
||||||
for (i = 0; i < num_pages;) {
|
for (i = 0; i < num_pages;) {
|
||||||
|
@ -809,8 +809,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||||||
struct extent_map_tree *em_tree;
|
struct extent_map_tree *em_tree;
|
||||||
struct compressed_bio *cb;
|
struct compressed_bio *cb;
|
||||||
unsigned int compressed_len;
|
unsigned int compressed_len;
|
||||||
unsigned int nr_pages;
|
|
||||||
unsigned int pg_index;
|
|
||||||
struct bio *comp_bio = NULL;
|
struct bio *comp_bio = NULL;
|
||||||
const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
|
const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
|
||||||
u64 cur_disk_byte = disk_bytenr;
|
u64 cur_disk_byte = disk_bytenr;
|
||||||
@ -820,7 +818,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||||||
u64 em_start;
|
u64 em_start;
|
||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
int faili = 0;
|
int ret2;
|
||||||
|
int i;
|
||||||
u8 *sums;
|
u8 *sums;
|
||||||
|
|
||||||
em_tree = &BTRFS_I(inode)->extent_tree;
|
em_tree = &BTRFS_I(inode)->extent_tree;
|
||||||
@ -863,24 +862,18 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||||||
cb->compress_type = extent_compress_type(bio_flags);
|
cb->compress_type = extent_compress_type(bio_flags);
|
||||||
cb->orig_bio = bio;
|
cb->orig_bio = bio;
|
||||||
|
|
||||||
nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
|
cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
|
||||||
cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
|
cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
|
||||||
GFP_NOFS);
|
|
||||||
if (!cb->compressed_pages) {
|
if (!cb->compressed_pages) {
|
||||||
ret = BLK_STS_RESOURCE;
|
ret = BLK_STS_RESOURCE;
|
||||||
goto fail1;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
|
ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages);
|
||||||
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS);
|
if (ret2) {
|
||||||
if (!cb->compressed_pages[pg_index]) {
|
|
||||||
faili = pg_index - 1;
|
|
||||||
ret = BLK_STS_RESOURCE;
|
ret = BLK_STS_RESOURCE;
|
||||||
goto fail2;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
faili = nr_pages - 1;
|
|
||||||
cb->nr_pages = nr_pages;
|
|
||||||
|
|
||||||
add_ra_bio_pages(inode, em_start + em_len, cb);
|
add_ra_bio_pages(inode, em_start + em_len, cb);
|
||||||
|
|
||||||
@ -957,14 +950,15 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||||||
}
|
}
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
|
|
||||||
fail2:
|
fail:
|
||||||
while (faili >= 0) {
|
if (cb->compressed_pages) {
|
||||||
__free_page(cb->compressed_pages[faili]);
|
for (i = 0; i < cb->nr_pages; i++) {
|
||||||
faili--;
|
if (cb->compressed_pages[i])
|
||||||
|
__free_page(cb->compressed_pages[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(cb->compressed_pages);
|
kfree(cb->compressed_pages);
|
||||||
fail1:
|
|
||||||
kfree(cb);
|
kfree(cb);
|
||||||
out:
|
out:
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
|
@ -3133,6 +3133,34 @@ readpage_ok:
|
|||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Populate every free slot in a provided array with pages.
|
||||||
|
*
|
||||||
|
* @nr_pages: number of pages to allocate
|
||||||
|
* @page_array: the array to fill with pages; any existing non-null entries in
|
||||||
|
* the array will be skipped
|
||||||
|
*
|
||||||
|
* Return: 0 if all pages were able to be allocated;
|
||||||
|
* -ENOMEM otherwise, and the caller is responsible for freeing all
|
||||||
|
* non-null page pointers in the array.
|
||||||
|
*/
|
||||||
|
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++) {
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
if (page_array[i])
|
||||||
|
continue;
|
||||||
|
page = alloc_page(GFP_NOFS);
|
||||||
|
if (!page)
|
||||||
|
return -ENOMEM;
|
||||||
|
page_array[i] = page;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the members up to but not including 'bio'. Use after allocating a
|
* Initialize the members up to but not including 'bio'. Use after allocating a
|
||||||
* new bio by bio_alloc_bioset as it does not initialize the bytes outside of
|
* new bio by bio_alloc_bioset as it does not initialize the bytes outside of
|
||||||
@ -5912,9 +5940,9 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
|
|||||||
struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
|
struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct page *p;
|
|
||||||
struct extent_buffer *new;
|
struct extent_buffer *new;
|
||||||
int num_pages = num_extent_pages(src);
|
int num_pages = num_extent_pages(src);
|
||||||
|
int ret;
|
||||||
|
|
||||||
new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
|
new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
|
||||||
if (new == NULL)
|
if (new == NULL)
|
||||||
@ -5927,22 +5955,23 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
|
|||||||
*/
|
*/
|
||||||
set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
|
set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
|
||||||
|
|
||||||
for (i = 0; i < num_pages; i++) {
|
memset(new->pages, 0, sizeof(*new->pages) * num_pages);
|
||||||
int ret;
|
ret = btrfs_alloc_page_array(num_pages, new->pages);
|
||||||
|
if (ret) {
|
||||||
p = alloc_page(GFP_NOFS);
|
|
||||||
if (!p) {
|
|
||||||
btrfs_release_extent_buffer(new);
|
btrfs_release_extent_buffer(new);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < num_pages; i++) {
|
||||||
|
int ret;
|
||||||
|
struct page *p = new->pages[i];
|
||||||
|
|
||||||
ret = attach_extent_buffer_page(new, p, NULL);
|
ret = attach_extent_buffer_page(new, p, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
put_page(p);
|
|
||||||
btrfs_release_extent_buffer(new);
|
btrfs_release_extent_buffer(new);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
WARN_ON(PageDirty(p));
|
WARN_ON(PageDirty(p));
|
||||||
new->pages[i] = p;
|
|
||||||
copy_page(page_address(p), page_address(src->pages[i]));
|
copy_page(page_address(p), page_address(src->pages[i]));
|
||||||
}
|
}
|
||||||
set_extent_buffer_uptodate(new);
|
set_extent_buffer_uptodate(new);
|
||||||
@ -5956,31 +5985,36 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
int num_pages;
|
int num_pages;
|
||||||
int i;
|
int i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
eb = __alloc_extent_buffer(fs_info, start, len);
|
eb = __alloc_extent_buffer(fs_info, start, len);
|
||||||
if (!eb)
|
if (!eb)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
num_pages = num_extent_pages(eb);
|
num_pages = num_extent_pages(eb);
|
||||||
for (i = 0; i < num_pages; i++) {
|
ret = btrfs_alloc_page_array(num_pages, eb->pages);
|
||||||
int ret;
|
if (ret)
|
||||||
|
|
||||||
eb->pages[i] = alloc_page(GFP_NOFS);
|
|
||||||
if (!eb->pages[i])
|
|
||||||
goto err;
|
goto err;
|
||||||
ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
|
|
||||||
|
for (i = 0; i < num_pages; i++) {
|
||||||
|
struct page *p = eb->pages[i];
|
||||||
|
|
||||||
|
ret = attach_extent_buffer_page(eb, p, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_extent_buffer_uptodate(eb);
|
set_extent_buffer_uptodate(eb);
|
||||||
btrfs_set_header_nritems(eb, 0);
|
btrfs_set_header_nritems(eb, 0);
|
||||||
set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
|
set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
|
||||||
|
|
||||||
return eb;
|
return eb;
|
||||||
err:
|
err:
|
||||||
for (; i > 0; i--) {
|
for (i = 0; i < num_pages; i++) {
|
||||||
detach_extent_buffer_page(eb, eb->pages[i - 1]);
|
if (eb->pages[i]) {
|
||||||
__free_page(eb->pages[i - 1]);
|
detach_extent_buffer_page(eb, eb->pages[i]);
|
||||||
|
__free_page(eb->pages[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
__free_extent_buffer(eb);
|
__free_extent_buffer(eb);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -277,6 +277,8 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
|
|||||||
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
|
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
|
||||||
struct page *locked_page,
|
struct page *locked_page,
|
||||||
u32 bits_to_clear, unsigned long page_ops);
|
u32 bits_to_clear, unsigned long page_ops);
|
||||||
|
|
||||||
|
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array);
|
||||||
struct bio *btrfs_bio_alloc(unsigned int nr_iovecs);
|
struct bio *btrfs_bio_alloc(unsigned int nr_iovecs);
|
||||||
struct bio *btrfs_bio_clone(struct bio *bio);
|
struct bio *btrfs_bio_clone(struct bio *bio);
|
||||||
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
|
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
|
||||||
|
@ -10461,13 +10461,11 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
|
|||||||
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
|
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
for (i = 0; i < nr_pages; i++) {
|
ret = btrfs_alloc_page_array(nr_pages, pages);
|
||||||
pages[i] = alloc_page(GFP_NOFS);
|
if (ret) {
|
||||||
if (!pages[i]) {
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
|
ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
|
||||||
disk_io_size, pages);
|
disk_io_size, pages);
|
||||||
|
@ -1026,37 +1026,16 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
|
|||||||
/* allocate pages for all the stripes in the bio, including parity */
|
/* allocate pages for all the stripes in the bio, including parity */
|
||||||
static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
|
static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
|
||||||
{
|
{
|
||||||
int i;
|
return btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
for (i = 0; i < rbio->nr_pages; i++) {
|
|
||||||
if (rbio->stripe_pages[i])
|
|
||||||
continue;
|
|
||||||
page = alloc_page(GFP_NOFS);
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
rbio->stripe_pages[i] = page;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* only allocate pages for p/q stripes */
|
/* only allocate pages for p/q stripes */
|
||||||
static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
|
static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
|
||||||
{
|
{
|
||||||
int i;
|
int data_pages = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
|
return btrfs_alloc_page_array(rbio->nr_pages - data_pages,
|
||||||
|
rbio->stripe_pages + data_pages);
|
||||||
for (; i < rbio->nr_pages; i++) {
|
|
||||||
if (rbio->stripe_pages[i])
|
|
||||||
continue;
|
|
||||||
page = alloc_page(GFP_NOFS);
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
rbio->stripe_pages[i] = page;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user