mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 22:51:35 +00:00
bcachefs: kill kvpmalloc()
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
0225bdfafd
commit
cb6fc943b6
@ -60,7 +60,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
|
||||
|
||||
clear_btree_node_just_written(b);
|
||||
|
||||
kvpfree(b->data, btree_buf_bytes(b));
|
||||
kvfree(b->data);
|
||||
b->data = NULL;
|
||||
#ifdef __KERNEL__
|
||||
kvfree(b->aux_data);
|
||||
@ -94,7 +94,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
|
||||
{
|
||||
BUG_ON(b->data || b->aux_data);
|
||||
|
||||
b->data = kvpmalloc(btree_buf_bytes(b), gfp);
|
||||
b->data = kvmalloc(btree_buf_bytes(b), gfp);
|
||||
if (!b->data)
|
||||
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
|
||||
#ifdef __KERNEL__
|
||||
@ -107,7 +107,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
|
||||
b->aux_data = NULL;
|
||||
#endif
|
||||
if (!b->aux_data) {
|
||||
kvpfree(b->data, btree_buf_bytes(b));
|
||||
kvfree(b->data);
|
||||
b->data = NULL;
|
||||
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
|
||||
}
|
||||
@ -408,7 +408,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
|
||||
if (c->verify_data)
|
||||
list_move(&c->verify_data->list, &bc->live);
|
||||
|
||||
kvpfree(c->verify_ondisk, c->opts.btree_node_size);
|
||||
kvfree(c->verify_ondisk);
|
||||
|
||||
for (i = 0; i < btree_id_nr_alive(c); i++) {
|
||||
struct btree_root *r = bch2_btree_id_root(c, i);
|
||||
|
@ -1193,9 +1193,7 @@ static void bch2_gc_free(struct bch_fs *c)
|
||||
genradix_free(&c->gc_stripes);
|
||||
|
||||
for_each_member_device(c, ca) {
|
||||
kvpfree(rcu_dereference_protected(ca->buckets_gc, 1),
|
||||
sizeof(struct bucket_array) +
|
||||
ca->mi.nbuckets * sizeof(struct bucket));
|
||||
kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
|
||||
ca->buckets_gc = NULL;
|
||||
|
||||
free_percpu(ca->usage_gc);
|
||||
@ -1494,7 +1492,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
|
||||
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
|
||||
{
|
||||
for_each_member_device(c, ca) {
|
||||
struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) +
|
||||
struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
|
||||
ca->mi.nbuckets * sizeof(struct bucket),
|
||||
GFP_KERNEL|__GFP_ZERO);
|
||||
if (!buckets) {
|
||||
|
@ -103,7 +103,7 @@ static void btree_bounce_free(struct bch_fs *c, size_t size,
|
||||
if (used_mempool)
|
||||
mempool_free(p, &c->btree_bounce_pool);
|
||||
else
|
||||
vpfree(p, size);
|
||||
kvfree(p);
|
||||
}
|
||||
|
||||
static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
|
||||
@ -115,7 +115,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
|
||||
BUG_ON(size > c->opts.btree_node_size);
|
||||
|
||||
*used_mempool = false;
|
||||
p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
|
||||
p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
|
||||
if (!p) {
|
||||
*used_mempool = true;
|
||||
p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
|
||||
|
@ -447,9 +447,7 @@ void bch2_journal_entries_free(struct bch_fs *c)
|
||||
struct genradix_iter iter;
|
||||
|
||||
genradix_for_each(&c->journal_entries, iter, i)
|
||||
if (*i)
|
||||
kvpfree(*i, offsetof(struct journal_replay, j) +
|
||||
vstruct_bytes(&(*i)->j));
|
||||
kvfree(*i);
|
||||
genradix_free(&c->journal_entries);
|
||||
}
|
||||
|
||||
|
@ -1335,7 +1335,7 @@ static void bucket_gens_free_rcu(struct rcu_head *rcu)
|
||||
struct bucket_gens *buckets =
|
||||
container_of(rcu, struct bucket_gens, rcu);
|
||||
|
||||
kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
|
||||
kvfree(buckets);
|
||||
}
|
||||
|
||||
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
@ -1345,14 +1345,14 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
bool resize = ca->bucket_gens != NULL;
|
||||
int ret;
|
||||
|
||||
if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
|
||||
if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets,
|
||||
GFP_KERNEL|__GFP_ZERO))) {
|
||||
ret = -BCH_ERR_ENOMEM_bucket_gens;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((c->opts.buckets_nouse &&
|
||||
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
|
||||
!(buckets_nouse = kvmalloc(BITS_TO_LONGS(nbuckets) *
|
||||
sizeof(unsigned long),
|
||||
GFP_KERNEL|__GFP_ZERO)))) {
|
||||
ret = -BCH_ERR_ENOMEM_buckets_nouse;
|
||||
@ -1397,8 +1397,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
|
||||
ret = 0;
|
||||
err:
|
||||
kvpfree(buckets_nouse,
|
||||
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
|
||||
kvfree(buckets_nouse);
|
||||
if (bucket_gens)
|
||||
call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
|
||||
|
||||
@ -1407,27 +1406,21 @@ err:
|
||||
|
||||
void bch2_dev_buckets_free(struct bch_dev *ca)
|
||||
{
|
||||
unsigned i;
|
||||
kvfree(ca->buckets_nouse);
|
||||
kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
|
||||
|
||||
kvpfree(ca->buckets_nouse,
|
||||
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
|
||||
kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
|
||||
sizeof(struct bucket_gens) + ca->mi.nbuckets);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++)
|
||||
free_percpu(ca->usage[i]);
|
||||
kfree(ca->usage_base);
|
||||
}
|
||||
|
||||
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
|
||||
if (!ca->usage_base)
|
||||
return -BCH_ERR_ENOMEM_usage_init;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) {
|
||||
ca->usage[i] = alloc_percpu(struct bch_dev_usage);
|
||||
if (!ca->usage[i])
|
||||
return -BCH_ERR_ENOMEM_usage_init;
|
||||
|
@ -601,12 +601,12 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
|
||||
return 0;
|
||||
|
||||
if (!mempool_initialized(&c->compression_bounce[READ]) &&
|
||||
mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
|
||||
mempool_init_kvmalloc_pool(&c->compression_bounce[READ],
|
||||
1, c->opts.encoded_extent_max))
|
||||
return -BCH_ERR_ENOMEM_compression_bounce_read_init;
|
||||
|
||||
if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
|
||||
mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
|
||||
mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE],
|
||||
1, c->opts.encoded_extent_max))
|
||||
return -BCH_ERR_ENOMEM_compression_bounce_write_init;
|
||||
|
||||
@ -622,14 +622,14 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
|
||||
if (mempool_initialized(&c->compress_workspace[i->type]))
|
||||
continue;
|
||||
|
||||
if (mempool_init_kvpmalloc_pool(
|
||||
if (mempool_init_kvmalloc_pool(
|
||||
&c->compress_workspace[i->type],
|
||||
1, i->compress_workspace))
|
||||
return -BCH_ERR_ENOMEM_compression_workspace_init;
|
||||
}
|
||||
|
||||
if (!mempool_initialized(&c->decompress_workspace) &&
|
||||
mempool_init_kvpmalloc_pool(&c->decompress_workspace,
|
||||
mempool_init_kvmalloc_pool(&c->decompress_workspace,
|
||||
1, decompress_workspace_size))
|
||||
return -BCH_ERR_ENOMEM_decompression_workspace_init;
|
||||
|
||||
|
@ -137,7 +137,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
|
||||
mutex_lock(&c->verify_lock);
|
||||
|
||||
if (!c->verify_ondisk) {
|
||||
c->verify_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
|
||||
c->verify_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
|
||||
if (!c->verify_ondisk)
|
||||
goto out;
|
||||
}
|
||||
@ -199,7 +199,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
return;
|
||||
}
|
||||
|
||||
n_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
|
||||
n_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
|
||||
if (!n_ondisk) {
|
||||
prt_printf(out, "memory allocation failure\n");
|
||||
goto out;
|
||||
@ -293,7 +293,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
out:
|
||||
if (bio)
|
||||
bio_put(bio);
|
||||
kvpfree(n_ondisk, btree_buf_bytes(b));
|
||||
kvfree(n_ondisk);
|
||||
percpu_ref_put(&ca->io_ref);
|
||||
}
|
||||
|
||||
|
@ -504,7 +504,7 @@ static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < s->v.nr_blocks; i++) {
|
||||
kvpfree(buf->data[i], buf->size << 9);
|
||||
kvfree(buf->data[i]);
|
||||
buf->data[i] = NULL;
|
||||
}
|
||||
}
|
||||
@ -531,7 +531,7 @@ static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
|
||||
memset(buf->valid, 0xFF, sizeof(buf->valid));
|
||||
|
||||
for (i = 0; i < v->nr_blocks; i++) {
|
||||
buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
|
||||
buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
|
||||
if (!buf->data[i])
|
||||
goto err;
|
||||
}
|
||||
|
@ -24,12 +24,12 @@ struct { \
|
||||
(fifo)->mask = (fifo)->size \
|
||||
? roundup_pow_of_two((fifo)->size) - 1 \
|
||||
: 0; \
|
||||
(fifo)->data = kvpmalloc(fifo_buf_size(fifo), (_gfp)); \
|
||||
(fifo)->data = kvmalloc(fifo_buf_size(fifo), (_gfp)); \
|
||||
})
|
||||
|
||||
#define free_fifo(fifo) \
|
||||
do { \
|
||||
kvpfree((fifo)->data, fifo_buf_size(fifo)); \
|
||||
kvfree((fifo)->data); \
|
||||
(fifo)->data = NULL; \
|
||||
} while (0)
|
||||
|
||||
|
@ -1343,7 +1343,7 @@ void bch2_fs_journal_exit(struct journal *j)
|
||||
darray_exit(&j->early_journal_entries);
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
|
||||
kvpfree(j->buf[i].data, j->buf[i].buf_size);
|
||||
kvfree(j->buf[i].data);
|
||||
free_fifo(&j->pin);
|
||||
}
|
||||
|
||||
@ -1372,7 +1372,7 @@ int bch2_fs_journal_init(struct journal *j)
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
|
||||
j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
|
||||
j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
|
||||
j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
|
||||
if (!j->buf[i].data)
|
||||
return -BCH_ERR_ENOMEM_journal_buf;
|
||||
j->buf[i].idx = i;
|
||||
|
@ -84,8 +84,7 @@ static void __journal_replay_free(struct bch_fs *c,
|
||||
|
||||
BUG_ON(*p != i);
|
||||
*p = NULL;
|
||||
kvpfree(i, offsetof(struct journal_replay, j) +
|
||||
vstruct_bytes(&i->j));
|
||||
kvfree(i);
|
||||
}
|
||||
|
||||
static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
|
||||
@ -196,7 +195,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
|
||||
goto out;
|
||||
}
|
||||
replace:
|
||||
i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
|
||||
i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
|
||||
if (!i)
|
||||
return -BCH_ERR_ENOMEM_journal_entry_add;
|
||||
|
||||
@ -965,11 +964,11 @@ static int journal_read_buf_realloc(struct journal_read_buf *b,
|
||||
return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
|
||||
|
||||
new_size = roundup_pow_of_two(new_size);
|
||||
n = kvpmalloc(new_size, GFP_KERNEL);
|
||||
n = kvmalloc(new_size, GFP_KERNEL);
|
||||
if (!n)
|
||||
return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
|
||||
|
||||
kvpfree(b->data, b->size);
|
||||
kvfree(b->data);
|
||||
b->data = n;
|
||||
b->size = new_size;
|
||||
return 0;
|
||||
@ -1195,7 +1194,7 @@ found:
|
||||
ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
|
||||
out:
|
||||
bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
|
||||
kvpfree(buf.data, buf.size);
|
||||
kvfree(buf.data);
|
||||
percpu_ref_put(&ca->io_ref);
|
||||
closure_return(cl);
|
||||
return;
|
||||
@ -1576,7 +1575,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
|
||||
if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
|
||||
return;
|
||||
|
||||
new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
|
||||
new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
|
||||
if (!new_buf)
|
||||
return;
|
||||
|
||||
@ -1587,7 +1586,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
|
||||
swap(buf->buf_size, new_size);
|
||||
spin_unlock(&j->lock);
|
||||
|
||||
kvpfree(new_buf, new_size);
|
||||
kvfree(new_buf);
|
||||
}
|
||||
|
||||
static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
|
||||
|
@ -576,7 +576,7 @@ static void __bch2_fs_free(struct bch_fs *c)
|
||||
destroy_workqueue(c->btree_update_wq);
|
||||
|
||||
bch2_free_super(&c->disk_sb);
|
||||
kvpfree(c, sizeof(*c));
|
||||
kvfree(c);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
@ -715,7 +715,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
unsigned i, iter_size;
|
||||
int ret = 0;
|
||||
|
||||
c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
|
||||
c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
|
||||
if (!c) {
|
||||
c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
|
||||
goto out;
|
||||
@ -882,7 +882,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
BIOSET_NEED_BVECS) ||
|
||||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
|
||||
!(c->online_reserved = alloc_percpu(u64)) ||
|
||||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
|
||||
mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1,
|
||||
c->opts.btree_node_size) ||
|
||||
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
|
||||
!(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
|
||||
|
@ -1007,28 +1007,6 @@ void sort_cmp_size(void *base, size_t num, size_t size,
|
||||
}
|
||||
}
|
||||
|
||||
static void mempool_free_vp(void *element, void *pool_data)
|
||||
{
|
||||
size_t size = (size_t) pool_data;
|
||||
|
||||
vpfree(element, size);
|
||||
}
|
||||
|
||||
static void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data)
|
||||
{
|
||||
size_t size = (size_t) pool_data;
|
||||
|
||||
return vpmalloc(size, gfp_mask);
|
||||
}
|
||||
|
||||
int mempool_init_kvpmalloc_pool(mempool_t *pool, int min_nr, size_t size)
|
||||
{
|
||||
return size < PAGE_SIZE
|
||||
? mempool_init_kmalloc_pool(pool, min_nr, size)
|
||||
: mempool_init(pool, min_nr, mempool_alloc_vp,
|
||||
mempool_free_vp, (void *) size);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void eytzinger1_test(void)
|
||||
{
|
||||
|
@ -53,38 +53,6 @@ static inline size_t buf_pages(void *p, size_t len)
|
||||
PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void vpfree(void *p, size_t size)
|
||||
{
|
||||
if (is_vmalloc_addr(p))
|
||||
vfree(p);
|
||||
else
|
||||
free_pages((unsigned long) p, get_order(size));
|
||||
}
|
||||
|
||||
static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
|
||||
{
|
||||
return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
|
||||
get_order(size)) ?:
|
||||
__vmalloc(size, gfp_mask);
|
||||
}
|
||||
|
||||
static inline void kvpfree(void *p, size_t size)
|
||||
{
|
||||
if (size < PAGE_SIZE)
|
||||
kfree(p);
|
||||
else
|
||||
vpfree(p, size);
|
||||
}
|
||||
|
||||
static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
|
||||
{
|
||||
return size < PAGE_SIZE
|
||||
? kmalloc(size, gfp_mask)
|
||||
: vpmalloc(size, gfp_mask);
|
||||
}
|
||||
|
||||
int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
|
||||
|
||||
#define HEAP(type) \
|
||||
struct { \
|
||||
size_t size, used; \
|
||||
@ -97,13 +65,13 @@ struct { \
|
||||
({ \
|
||||
(heap)->used = 0; \
|
||||
(heap)->size = (_size); \
|
||||
(heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\
|
||||
(heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\
|
||||
(gfp)); \
|
||||
})
|
||||
|
||||
#define free_heap(heap) \
|
||||
do { \
|
||||
kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0])); \
|
||||
kvfree((heap)->data); \
|
||||
(heap)->data = NULL; \
|
||||
} while (0)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user