debugobjects: Collect newly allocated objects in a list to reduce lock contention

Collect the newly allocated debug objects in a list outside the lock, so
that the lock held time and the potential lock contention is reduced.

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20240911083521.2257-3-thunder.leizhen@huawei.com
Link: https://lore.kernel.org/all/20241007164913.073653668@linutronix.de
This commit is contained in:
Zhen Lei 2024-10-07 18:49:53 +02:00 committed by Thomas Gleixner
parent a0ae950408
commit 813fd07858

View File

@ -161,23 +161,25 @@ static void fill_pool(void)
return;
while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
struct debug_obj *new[ODEBUG_BATCH_SIZE];
struct debug_obj *new, *last = NULL;
HLIST_HEAD(head);
int cnt;
for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
if (!new[cnt])
new = kmem_cache_zalloc(obj_cache, gfp);
if (!new)
break;
hlist_add_head(&new->node, &head);
if (!last)
last = new;
}
if (!cnt)
return;
raw_spin_lock_irqsave(&pool_lock, flags);
while (cnt) {
hlist_add_head(&new[--cnt]->node, &obj_pool);
debug_objects_allocated++;
WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
}
hlist_splice_init(&head, &last->node, &obj_pool);
debug_objects_allocated += cnt;
WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
}