mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
debugobjects: Track object usage to avoid premature freeing of objects
The freelist is freed at a constant rate independent of the actual usage requirements. That's bad in scenarios where usage comes in bursts. The end of a burst puts the objects on the free list and freeing proceeds even when the next burst which requires objects started again. Keep track of the usage with a exponentially wheighted moving average and take that into account in the worker function which frees objects from the free list. This further reduces the kmem_cache allocation/free rate for a full kernel compile: kmem_cache_alloc() kmem_cache_free() Baseline: 225k 173k Usage: 170k 117k Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Zhen Lei <thunder.leizhen@huawei.com> Link: https://lore.kernel.org/all/87bjznhme2.ffs@tglx
This commit is contained in:
parent
13f9ca7239
commit
ff8d523cc4
@ -13,6 +13,7 @@
|
|||||||
#include <linux/hash.h>
|
#include <linux/hash.h>
|
||||||
#include <linux/kmemleak.h>
|
#include <linux/kmemleak.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/sched/loadavg.h>
|
||||||
#include <linux/sched/task_stack.h>
|
#include <linux/sched/task_stack.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
@ -86,6 +87,7 @@ static struct obj_pool pool_to_free = {
|
|||||||
|
|
||||||
static HLIST_HEAD(pool_boot);
|
static HLIST_HEAD(pool_boot);
|
||||||
|
|
||||||
|
static unsigned long avg_usage;
|
||||||
static bool obj_freeing;
|
static bool obj_freeing;
|
||||||
|
|
||||||
static int __data_racy debug_objects_maxchain __read_mostly;
|
static int __data_racy debug_objects_maxchain __read_mostly;
|
||||||
@ -427,11 +429,31 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void calc_usage(void)
|
||||||
|
{
|
||||||
|
static DEFINE_RAW_SPINLOCK(avg_lock);
|
||||||
|
static unsigned long avg_period;
|
||||||
|
unsigned long cur, now = jiffies;
|
||||||
|
|
||||||
|
if (!time_after_eq(now, READ_ONCE(avg_period)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!raw_spin_trylock(&avg_lock))
|
||||||
|
return;
|
||||||
|
|
||||||
|
WRITE_ONCE(avg_period, now + msecs_to_jiffies(10));
|
||||||
|
cur = READ_ONCE(pool_global.stats.cur_used) * ODEBUG_FREE_WORK_MAX;
|
||||||
|
WRITE_ONCE(avg_usage, calc_load(avg_usage, EXP_5, cur));
|
||||||
|
raw_spin_unlock(&avg_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
|
static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
|
||||||
const struct debug_obj_descr *descr)
|
const struct debug_obj_descr *descr)
|
||||||
{
|
{
|
||||||
struct debug_obj *obj;
|
struct debug_obj *obj;
|
||||||
|
|
||||||
|
calc_usage();
|
||||||
|
|
||||||
if (static_branch_likely(&obj_cache_enabled))
|
if (static_branch_likely(&obj_cache_enabled))
|
||||||
obj = pcpu_alloc();
|
obj = pcpu_alloc();
|
||||||
else
|
else
|
||||||
@ -450,14 +472,26 @@ static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
|
|||||||
/* workqueue function to free objects. */
|
/* workqueue function to free objects. */
|
||||||
static void free_obj_work(struct work_struct *work)
|
static void free_obj_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
bool free = true;
|
static unsigned long last_use_avg;
|
||||||
|
unsigned long cur_used, last_used, delta;
|
||||||
|
unsigned int max_free = 0;
|
||||||
|
|
||||||
WRITE_ONCE(obj_freeing, false);
|
WRITE_ONCE(obj_freeing, false);
|
||||||
|
|
||||||
|
/* Rate limit freeing based on current use average */
|
||||||
|
cur_used = READ_ONCE(avg_usage);
|
||||||
|
last_used = last_use_avg;
|
||||||
|
last_use_avg = cur_used;
|
||||||
|
|
||||||
if (!pool_count(&pool_to_free))
|
if (!pool_count(&pool_to_free))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
|
if (cur_used <= last_used) {
|
||||||
|
delta = (last_used - cur_used) / ODEBUG_FREE_WORK_MAX;
|
||||||
|
max_free = min(delta, ODEBUG_FREE_WORK_MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
|
||||||
HLIST_HEAD(tofree);
|
HLIST_HEAD(tofree);
|
||||||
|
|
||||||
/* Acquire and drop the lock for each batch */
|
/* Acquire and drop the lock for each batch */
|
||||||
@ -468,9 +502,10 @@ static void free_obj_work(struct work_struct *work)
|
|||||||
/* Refill the global pool if possible */
|
/* Refill the global pool if possible */
|
||||||
if (pool_move_batch(&pool_global, &pool_to_free)) {
|
if (pool_move_batch(&pool_global, &pool_to_free)) {
|
||||||
/* Don't free as there seems to be demand */
|
/* Don't free as there seems to be demand */
|
||||||
free = false;
|
max_free = 0;
|
||||||
} else if (free) {
|
} else if (max_free) {
|
||||||
pool_pop_batch(&tofree, &pool_to_free);
|
pool_pop_batch(&tofree, &pool_to_free);
|
||||||
|
max_free--;
|
||||||
} else {
|
} else {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1110,7 +1145,7 @@ static int debug_stats_show(struct seq_file *m, void *v)
|
|||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
pcp_free += per_cpu(pool_pcpu.cnt, cpu);
|
pcp_free += per_cpu(pool_pcpu.cnt, cpu);
|
||||||
|
|
||||||
pool_used = data_race(pool_global.stats.cur_used);
|
pool_used = READ_ONCE(pool_global.stats.cur_used);
|
||||||
pcp_free = min(pool_used, pcp_free);
|
pcp_free = min(pool_used, pcp_free);
|
||||||
pool_used -= pcp_free;
|
pool_used -= pcp_free;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user