mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
lib/stackdepot: rework helpers for depot_alloc_stack
Split code in depot_alloc_stack and depot_init_pool into 3 functions: 1. depot_keep_next_pool that keeps preallocated memory for the next pool if required. 2. depot_update_pools that moves on to the next pool if there's no space left in the current pool, uses preallocated memory for the new current pool if required, and calls depot_keep_next_pool otherwise. 3. depot_alloc_stack that calls depot_update_pools and then allocates a stack record as before. This makes it somewhat easier to follow the logic of depot_alloc_stack and also serves as a preparation for implementing the eviction of stack records from the stack depot. Link: https://lkml.kernel.org/r/71fb144d42b701fcb46708d7f4be6801a4a8270e.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Marco Elver <elver@google.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
fcccc41ecb
commit
94b7d32870
@ -225,11 +225,11 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stack_depot_init);
|
||||
|
||||
/* Uses preallocated memory to initialize a new stack depot pool. */
|
||||
static void depot_init_pool(void **prealloc)
|
||||
/* Keeps the preallocated memory to be used for the next stack depot pool. */
|
||||
static void depot_keep_next_pool(void **prealloc)
|
||||
{
|
||||
/*
|
||||
* If the next pool is already initialized or the maximum number of
|
||||
* If the next pool is already saved or the maximum number of
|
||||
* pools is reached, do not use the preallocated memory.
|
||||
* Access next_pool_required non-atomically, as there are no concurrent
|
||||
* write accesses to this variable.
|
||||
@ -237,44 +237,34 @@ static void depot_init_pool(void **prealloc)
|
||||
if (!next_pool_required)
|
||||
return;
|
||||
|
||||
/* Check if the current pool is not yet allocated. */
|
||||
if (stack_pools[pool_index] == NULL) {
|
||||
/* Use the preallocated memory for the current pool. */
|
||||
stack_pools[pool_index] = *prealloc;
|
||||
/*
|
||||
* Use the preallocated memory for the next pool
|
||||
* as long as we do not exceed the maximum number of pools.
|
||||
*/
|
||||
if (pool_index + 1 < DEPOT_MAX_POOLS) {
|
||||
stack_pools[pool_index + 1] = *prealloc;
|
||||
*prealloc = NULL;
|
||||
} else {
|
||||
/*
|
||||
* Otherwise, use the preallocated memory for the next pool
|
||||
* as long as we do not exceed the maximum number of pools.
|
||||
*/
|
||||
if (pool_index + 1 < DEPOT_MAX_POOLS) {
|
||||
stack_pools[pool_index + 1] = *prealloc;
|
||||
*prealloc = NULL;
|
||||
}
|
||||
/*
|
||||
* At this point, either the next pool is initialized or the
|
||||
* maximum number of pools is reached. In either case, take
|
||||
* note that initializing another pool is not required.
|
||||
* smp_store_release() pairs with smp_load_acquire() in
|
||||
* stack_depot_save().
|
||||
*/
|
||||
smp_store_release(&next_pool_required, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point, either the next pool is kept or the maximum
|
||||
* number of pools is reached. In either case, take note that
|
||||
* keeping another pool is not required.
|
||||
* smp_store_release() pairs with smp_load_acquire() in
|
||||
* stack_depot_save().
|
||||
*/
|
||||
smp_store_release(&next_pool_required, 0);
|
||||
}
|
||||
|
||||
/* Allocates a new stack in a stack depot pool. */
|
||||
static struct stack_record *
|
||||
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
|
||||
/* Updates references to the current and the next stack depot pools. */
|
||||
static bool depot_update_pools(size_t required_size, void **prealloc)
|
||||
{
|
||||
struct stack_record *stack;
|
||||
size_t required_size = DEPOT_STACK_RECORD_SIZE;
|
||||
|
||||
/* Check if there is not enough space in the current pool. */
|
||||
if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
|
||||
/* Bail out if we reached the pool limit. */
|
||||
if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
|
||||
WARN_ONCE(1, "Stack depot reached limit capacity");
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -284,9 +274,10 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
|
||||
*/
|
||||
WRITE_ONCE(pool_index, pool_index + 1);
|
||||
pool_offset = 0;
|
||||
|
||||
/*
|
||||
* If the maximum number of pools is not reached, take note
|
||||
* that the next pool needs to initialized.
|
||||
* that the next pool needs to be initialized.
|
||||
* smp_store_release() pairs with smp_load_acquire() in
|
||||
* stack_depot_save().
|
||||
*/
|
||||
@ -294,9 +285,30 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
|
||||
smp_store_release(&next_pool_required, 1);
|
||||
}
|
||||
|
||||
/* Assign the preallocated memory to a pool if required. */
|
||||
/* Check if the current pool is not yet allocated. */
|
||||
if (*prealloc && stack_pools[pool_index] == NULL) {
|
||||
/* Use the preallocated memory for the current pool. */
|
||||
stack_pools[pool_index] = *prealloc;
|
||||
*prealloc = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Otherwise, try using the preallocated memory for the next pool. */
|
||||
if (*prealloc)
|
||||
depot_init_pool(prealloc);
|
||||
depot_keep_next_pool(prealloc);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Allocates a new stack in a stack depot pool. */
|
||||
static struct stack_record *
|
||||
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
|
||||
{
|
||||
struct stack_record *stack;
|
||||
size_t required_size = DEPOT_STACK_RECORD_SIZE;
|
||||
|
||||
/* Update current and next pools if required and possible. */
|
||||
if (!depot_update_pools(required_size, prealloc))
|
||||
return NULL;
|
||||
|
||||
/* Check if we have a pool to save the stack trace. */
|
||||
if (stack_pools[pool_index] == NULL)
|
||||
@ -330,7 +342,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
|
||||
union handle_parts parts = { .handle = handle };
|
||||
/*
|
||||
* READ_ONCE() pairs with potential concurrent write in
|
||||
* depot_alloc_stack().
|
||||
* depot_update_pools().
|
||||
*/
|
||||
int pool_index_cached = READ_ONCE(pool_index);
|
||||
void *pool;
|
||||
@ -430,7 +442,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
|
||||
* the memory now - we won't be able to do that under the lock.
|
||||
*
|
||||
* smp_load_acquire() pairs with smp_store_release() in
|
||||
* depot_alloc_stack() and depot_init_pool().
|
||||
* depot_update_pools() and depot_keep_next_pool().
|
||||
*/
|
||||
if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
|
||||
/*
|
||||
@ -467,7 +479,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
|
||||
* Stack depot already contains this stack trace, but let's
|
||||
* keep the preallocated memory for the future.
|
||||
*/
|
||||
depot_init_pool(&prealloc);
|
||||
depot_keep_next_pool(&prealloc);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&pool_lock, flags);
|
||||
|
Loading…
Reference in New Issue
Block a user