From 40323278b557a5909bbecfa181c91a3af7afbbe3 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 15 Mar 2016 14:54:18 -0700 Subject: [PATCH] mm/slab: use more appropriate condition check for debug_pagealloc debug_pagealloc debugging is related to SLAB_POISON flag rather than FORCED_DEBUG option, although FORCED_DEBUG option will enable SLAB_POISON. Fix it. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Jesper Dangaard Brouer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 4807cf4fe1b4..8bca9be5d557 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2169,7 +2169,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) else size += BYTES_PER_WORD; } -#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) /* * To activate debug pagealloc, off-slab management is necessary * requirement. In early phase of initialization, small sized slab @@ -2177,14 +2176,13 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) * to check size >= 256. It guarantees that all necessary small * sized slab is initialized in current slab initialization sequence. */ - if (debug_pagealloc_enabled() && + if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && !slab_early_init && size >= kmalloc_size(INDEX_NODE) && size >= 256 && cachep->object_size > cache_line_size() && ALIGN(size, cachep->align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); size = PAGE_SIZE; } -#endif #endif /*