mirror of
https://github.com/torvalds/linux.git
synced 2024-12-19 17:41:29 +00:00
bbe9d71f2c
The kmem_cache_alloc implementation simply allocates new memory from malloc() and calls the ctor, which zeroes out the entire object. This means it cannot spot bugs where the object isn't properly reinitialised before being freed. Add a small (11 objects) cache before freeing objects back to malloc. This is enough to let us write a test to catch it, although the memory allocator is now aware of the structure of the radix tree node, since it chains free objects through ->private_data (like the percpu cache does). Link: http://lkml.kernel.org/r/1481667692-14500-2-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
116 lines
2.2 KiB
C
116 lines
2.2 KiB
C
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <malloc.h>
|
|
#include <pthread.h>
|
|
#include <unistd.h>
|
|
#include <assert.h>
|
|
|
|
#include <linux/mempool.h>
|
|
#include <linux/poison.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/radix-tree.h>
|
|
#include <urcu/uatomic.h>
|
|
|
|
int nr_allocated;
|
|
int preempt_count;
|
|
|
|
struct kmem_cache {
|
|
pthread_mutex_t lock;
|
|
int size;
|
|
int nr_objs;
|
|
void *objs;
|
|
void (*ctor)(void *);
|
|
};
|
|
|
|
void *mempool_alloc(mempool_t *pool, int gfp_mask)
|
|
{
|
|
return pool->alloc(gfp_mask, pool->data);
|
|
}
|
|
|
|
void mempool_free(void *element, mempool_t *pool)
|
|
{
|
|
pool->free(element, pool->data);
|
|
}
|
|
|
|
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
|
mempool_free_t *free_fn, void *pool_data)
|
|
{
|
|
mempool_t *ret = malloc(sizeof(*ret));
|
|
|
|
ret->alloc = alloc_fn;
|
|
ret->free = free_fn;
|
|
ret->data = pool_data;
|
|
return ret;
|
|
}
|
|
|
|
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
|
|
{
|
|
struct radix_tree_node *node;
|
|
|
|
if (flags & __GFP_NOWARN)
|
|
return NULL;
|
|
|
|
pthread_mutex_lock(&cachep->lock);
|
|
if (cachep->nr_objs) {
|
|
cachep->nr_objs--;
|
|
node = cachep->objs;
|
|
cachep->objs = node->private_data;
|
|
pthread_mutex_unlock(&cachep->lock);
|
|
node->private_data = NULL;
|
|
} else {
|
|
pthread_mutex_unlock(&cachep->lock);
|
|
node = malloc(cachep->size);
|
|
if (cachep->ctor)
|
|
cachep->ctor(node);
|
|
}
|
|
|
|
uatomic_inc(&nr_allocated);
|
|
return node;
|
|
}
|
|
|
|
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
|
{
|
|
assert(objp);
|
|
uatomic_dec(&nr_allocated);
|
|
pthread_mutex_lock(&cachep->lock);
|
|
if (cachep->nr_objs > 10) {
|
|
memset(objp, POISON_FREE, cachep->size);
|
|
free(objp);
|
|
} else {
|
|
struct radix_tree_node *node = objp;
|
|
cachep->nr_objs++;
|
|
node->private_data = cachep->objs;
|
|
cachep->objs = node;
|
|
}
|
|
pthread_mutex_unlock(&cachep->lock);
|
|
}
|
|
|
|
void *kmalloc(size_t size, gfp_t gfp)
|
|
{
|
|
void *ret = malloc(size);
|
|
uatomic_inc(&nr_allocated);
|
|
return ret;
|
|
}
|
|
|
|
void kfree(void *p)
|
|
{
|
|
if (!p)
|
|
return;
|
|
uatomic_dec(&nr_allocated);
|
|
free(p);
|
|
}
|
|
|
|
struct kmem_cache *
|
|
kmem_cache_create(const char *name, size_t size, size_t offset,
|
|
unsigned long flags, void (*ctor)(void *))
|
|
{
|
|
struct kmem_cache *ret = malloc(sizeof(*ret));
|
|
|
|
pthread_mutex_init(&ret->lock, NULL);
|
|
ret->size = size;
|
|
ret->nr_objs = 0;
|
|
ret->objs = NULL;
|
|
ret->ctor = ctor;
|
|
return ret;
|
|
}
|