mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
mm: kmemleak: make the tool tolerant to struct scan_area allocation failures
Patch series "mm: kmemleak: Use a memory pool for kmemleak object allocations", v3. Following the discussions on v2 of this patch(set) [1], this series takes slightly different approach: - it implements its own simple memory pool that does not rely on the slab allocator - drops the early log buffer logic entirely since it can now allocate metadata from the memory pool directly before kmemleak is fully initialised - CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option is renamed to CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE - moves the kmemleak_init() call earlier (mm_init()) - to avoid a separate memory pool for struct scan_area, it makes the tool robust when such allocations fail as scan areas are rather an optimisation [1] http://lkml.kernel.org/r/20190727132334.9184-1-catalin.marinas@arm.com This patch (of 3): Object scan areas are an optimisation aimed to decrease the false positives and slightly improve the scanning time of large objects known to only have a few specific pointers. If a struct scan_area fails to allocate, kmemleak can still function normally by scanning the full object. Introduce an OBJECT_FULL_SCAN flag and mark objects as such when scan_area allocation fails. Link: http://lkml.kernel.org/r/20190812160642.52134-2-catalin.marinas@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Qian Cai <cai@lca.pw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b751c52bb5
commit
dba82d9431
@ -168,6 +168,8 @@ struct kmemleak_object {
|
||||
#define OBJECT_REPORTED (1 << 1)
|
||||
/* flag set to not scan the object */
|
||||
#define OBJECT_NO_SCAN (1 << 2)
|
||||
/* flag set to fully scan the object when scan_area allocation failed */
|
||||
#define OBJECT_FULL_SCAN (1 << 3)
|
||||
|
||||
#define HEX_PREFIX " "
|
||||
/* number of bytes to print per line; must be 16 or 32 */
|
||||
@ -773,12 +775,14 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
|
||||
}
|
||||
|
||||
area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
|
||||
if (!area) {
|
||||
pr_warn("Cannot allocate a scan area\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&object->lock, flags);
|
||||
if (!area) {
|
||||
pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
|
||||
/* mark the object for full scan to avoid false positives */
|
||||
object->flags |= OBJECT_FULL_SCAN;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (size == SIZE_MAX) {
|
||||
size = object->pointer + object->size - ptr;
|
||||
} else if (ptr + size > object->pointer + object->size) {
|
||||
@ -795,7 +799,6 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
|
||||
hlist_add_head(&area->node, &object->area_list);
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&object->lock, flags);
|
||||
out:
|
||||
put_object(object);
|
||||
}
|
||||
|
||||
@ -1408,7 +1411,8 @@ static void scan_object(struct kmemleak_object *object)
|
||||
if (!(object->flags & OBJECT_ALLOCATED))
|
||||
/* already freed object */
|
||||
goto out;
|
||||
if (hlist_empty(&object->area_list)) {
|
||||
if (hlist_empty(&object->area_list) ||
|
||||
object->flags & OBJECT_FULL_SCAN) {
|
||||
void *start = (void *)object->pointer;
|
||||
void *end = (void *)(object->pointer + object->size);
|
||||
void *next;
|
||||
|
Loading…
Reference in New Issue
Block a user