slub: Extract hooks for memory checkers from hotpaths

Extract the code that memory checkers and other verification tools use from
the hotpaths. Makes it easier to add new ones and reduces the disturbances
of the hotpaths.

Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
Christoph Lameter 2010-08-20 12:37:16 -05:00 committed by Pekka Enberg
parent 51df114281
commit c016b0bdee

View File

@ -790,6 +790,37 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
}
}
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
lockdep_trace_alloc(flags);
might_sleep_if(flags & __GFP_WAIT);
return should_failslab(s->objsize, flags, s->flags);
}
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
{
kmemcheck_slab_alloc(s, flags, object, s->objsize);
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x)
{
kmemleak_free_recursive(x, s->flags);
}
static inline void slab_free_hook_irq(struct kmem_cache *s, void *object)
{
kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize);
}
/*
* Tracking of fully allocated slabs for debugging purposes.
*/
@ -1696,10 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
gfpflags &= gfp_allowed_mask;
lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);
if (should_failslab(s->objsize, gfpflags, s->flags))
if (slab_pre_alloc_hook(s, gfpflags))
return NULL;
local_irq_save(flags);
@ -1718,8 +1746,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->objsize);
kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
slab_post_alloc_hook(s, gfpflags, object);
return object;
}
@ -1849,13 +1876,13 @@ static __always_inline void slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c;
unsigned long flags;
kmemleak_free_recursive(x, s->flags);
slab_free_hook(s, x);
local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab);
kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize);
slab_free_hook_irq(s, x);
if (likely(page == c->page && c->node >= 0)) {
set_freepointer(s, object, c->freelist);
c->freelist = object;