mirror of
https://github.com/torvalds/linux.git
synced 2024-11-06 03:51:48 +00:00
64abdcb243
Currently we dedicate 1/32 of RAM for quarantine and then reduce it by 1/4 of total quarantine size. This can be a significant amount of memory. For example, with 4GB of RAM total quarantine size is 128MB and it is reduced by 32MB at a time. With 128GB of RAM total quarantine size is 4GB and it is reduced by 1GB. This leads to several problems: - freeing 1GB can take tens of seconds, causes rcu stall warnings and just introduces unexpected long delays at random places - if kmalloc() is called under a mutex, other threads stall on that mutex while a thread reduces quarantine - threads wait on quarantine_lock while one thread grabs a large batch of objects to evict - we walk the uncached list of object to free twice which makes all of the above worse - when a thread frees objects, they are already not accounted against global_quarantine.bytes; as the result we can have quarantine_size bytes in quarantine + unbounded amount of memory in large batches in threads that are in process of freeing Reduce size of quarantine in smaller batches to reduce the delays. The only reason to reduce it in batches is amortization of overheads, the new batch size of 1MB should be well enough to amortize spinlock lock/unlock and few function calls. Plus organize quarantine as a FIFO array of batches. This allows to not walk the list in quarantine_reduce() under quarantine_lock, which in turn reduces contention and is just faster. This improves performance of heavy load (syzkaller fuzzing) by ~20% with 4 CPUs and 32GB of RAM. Also this eliminates frequent (every 5 sec) drops of CPU consumption from ~400% to ~100% (one thread reduces quarantine while others are waiting on a mutex). Some reference numbers: 1. Machine with 4 CPUs and 4GB of memory. Quarantine size 128MB. Currently we free 32MB at at time. With new code we free 1MB at a time (1024 batches, ~128 are used). 2. Machine with 32 CPUs and 128GB of memory. Quarantine size 4GB. Currently we free 1GB at at time. With new code we free 8MB at a time (1024 batches, ~512 are used). 3. Machine with 4096 CPUs and 1TB of memory. Quarantine size 32GB. Currently we free 8GB at at time. With new code we free 4MB at a time (16K batches, ~8K are used). Link: http://lkml.kernel.org/r/1478756952-18695-1-git-send-email-dvyukov@google.com Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
291 lines
7.2 KiB
C
291 lines
7.2 KiB
C
/*
|
|
* KASAN quarantine.
|
|
*
|
|
* Author: Alexander Potapenko <glider@google.com>
|
|
* Copyright (C) 2016 Google, Inc.
|
|
*
|
|
* Based on code by Dmitry Chernenkov.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
*/
|
|
|
|
#include <linux/gfp.h>
|
|
#include <linux/hash.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/shrinker.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
|
|
#include "../slab.h"
|
|
#include "kasan.h"
|
|
|
|
/* Data structure and operations for quarantine queues. */
|
|
|
|
/*
|
|
* Each queue is a signle-linked list, which also stores the total size of
|
|
* objects inside of it.
|
|
*/
|
|
struct qlist_head {
|
|
struct qlist_node *head;
|
|
struct qlist_node *tail;
|
|
size_t bytes;
|
|
};
|
|
|
|
#define QLIST_INIT { NULL, NULL, 0 }
|
|
|
|
static bool qlist_empty(struct qlist_head *q)
|
|
{
|
|
return !q->head;
|
|
}
|
|
|
|
static void qlist_init(struct qlist_head *q)
|
|
{
|
|
q->head = q->tail = NULL;
|
|
q->bytes = 0;
|
|
}
|
|
|
|
static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
|
|
size_t size)
|
|
{
|
|
if (unlikely(qlist_empty(q)))
|
|
q->head = qlink;
|
|
else
|
|
q->tail->next = qlink;
|
|
q->tail = qlink;
|
|
qlink->next = NULL;
|
|
q->bytes += size;
|
|
}
|
|
|
|
static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
|
|
{
|
|
if (unlikely(qlist_empty(from)))
|
|
return;
|
|
|
|
if (qlist_empty(to)) {
|
|
*to = *from;
|
|
qlist_init(from);
|
|
return;
|
|
}
|
|
|
|
to->tail->next = from->head;
|
|
to->tail = from->tail;
|
|
to->bytes += from->bytes;
|
|
|
|
qlist_init(from);
|
|
}
|
|
|
|
#define QUARANTINE_PERCPU_SIZE (1 << 20)
|
|
#define QUARANTINE_BATCHES \
|
|
(1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
|
|
|
|
/*
|
|
* The object quarantine consists of per-cpu queues and a global queue,
|
|
* guarded by quarantine_lock.
|
|
*/
|
|
static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
|
|
|
|
/* Round-robin FIFO array of batches. */
|
|
static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
|
|
static int quarantine_head;
|
|
static int quarantine_tail;
|
|
/* Total size of all objects in global_quarantine across all batches. */
|
|
static unsigned long quarantine_size;
|
|
static DEFINE_SPINLOCK(quarantine_lock);
|
|
|
|
/* Maximum size of the global queue. */
|
|
static unsigned long quarantine_max_size;
|
|
|
|
/*
|
|
* Target size of a batch in global_quarantine.
|
|
* Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
|
|
*/
|
|
static unsigned long quarantine_batch_size;
|
|
|
|
/*
|
|
* The fraction of physical memory the quarantine is allowed to occupy.
|
|
* Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
|
|
* the ratio low to avoid OOM.
|
|
*/
|
|
#define QUARANTINE_FRACTION 32
|
|
|
|
static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
|
|
{
|
|
return virt_to_head_page(qlink)->slab_cache;
|
|
}
|
|
|
|
static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
|
|
{
|
|
struct kasan_free_meta *free_info =
|
|
container_of(qlink, struct kasan_free_meta,
|
|
quarantine_link);
|
|
|
|
return ((void *)free_info) - cache->kasan_info.free_meta_offset;
|
|
}
|
|
|
|
static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
|
|
{
|
|
void *object = qlink_to_object(qlink, cache);
|
|
unsigned long flags;
|
|
|
|
if (IS_ENABLED(CONFIG_SLAB))
|
|
local_irq_save(flags);
|
|
|
|
___cache_free(cache, object, _THIS_IP_);
|
|
|
|
if (IS_ENABLED(CONFIG_SLAB))
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
|
|
{
|
|
struct qlist_node *qlink;
|
|
|
|
if (unlikely(qlist_empty(q)))
|
|
return;
|
|
|
|
qlink = q->head;
|
|
while (qlink) {
|
|
struct kmem_cache *obj_cache =
|
|
cache ? cache : qlink_to_cache(qlink);
|
|
struct qlist_node *next = qlink->next;
|
|
|
|
qlink_free(qlink, obj_cache);
|
|
qlink = next;
|
|
}
|
|
qlist_init(q);
|
|
}
|
|
|
|
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
|
|
{
|
|
unsigned long flags;
|
|
struct qlist_head *q;
|
|
struct qlist_head temp = QLIST_INIT;
|
|
|
|
local_irq_save(flags);
|
|
|
|
q = this_cpu_ptr(&cpu_quarantine);
|
|
qlist_put(q, &info->quarantine_link, cache->size);
|
|
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE))
|
|
qlist_move_all(q, &temp);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
if (unlikely(!qlist_empty(&temp))) {
|
|
spin_lock_irqsave(&quarantine_lock, flags);
|
|
WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
|
|
qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
|
|
if (global_quarantine[quarantine_tail].bytes >=
|
|
READ_ONCE(quarantine_batch_size)) {
|
|
int new_tail;
|
|
|
|
new_tail = quarantine_tail + 1;
|
|
if (new_tail == QUARANTINE_BATCHES)
|
|
new_tail = 0;
|
|
if (new_tail != quarantine_head)
|
|
quarantine_tail = new_tail;
|
|
}
|
|
spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
}
|
|
}
|
|
|
|
void quarantine_reduce(void)
|
|
{
|
|
size_t total_size, new_quarantine_size, percpu_quarantines;
|
|
unsigned long flags;
|
|
struct qlist_head to_free = QLIST_INIT;
|
|
|
|
if (likely(READ_ONCE(quarantine_size) <=
|
|
READ_ONCE(quarantine_max_size)))
|
|
return;
|
|
|
|
spin_lock_irqsave(&quarantine_lock, flags);
|
|
|
|
/*
|
|
* Update quarantine size in case of hotplug. Allocate a fraction of
|
|
* the installed memory to quarantine minus per-cpu queue limits.
|
|
*/
|
|
total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
|
|
QUARANTINE_FRACTION;
|
|
percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
|
|
new_quarantine_size = (total_size < percpu_quarantines) ?
|
|
0 : total_size - percpu_quarantines;
|
|
WRITE_ONCE(quarantine_max_size, new_quarantine_size);
|
|
/* Aim at consuming at most 1/2 of slots in quarantine. */
|
|
WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
|
|
2 * total_size / QUARANTINE_BATCHES));
|
|
|
|
if (likely(quarantine_size > quarantine_max_size)) {
|
|
qlist_move_all(&global_quarantine[quarantine_head], &to_free);
|
|
WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
|
|
quarantine_head++;
|
|
if (quarantine_head == QUARANTINE_BATCHES)
|
|
quarantine_head = 0;
|
|
}
|
|
|
|
spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
|
|
qlist_free_all(&to_free, NULL);
|
|
}
|
|
|
|
static void qlist_move_cache(struct qlist_head *from,
|
|
struct qlist_head *to,
|
|
struct kmem_cache *cache)
|
|
{
|
|
struct qlist_node *curr;
|
|
|
|
if (unlikely(qlist_empty(from)))
|
|
return;
|
|
|
|
curr = from->head;
|
|
qlist_init(from);
|
|
while (curr) {
|
|
struct qlist_node *next = curr->next;
|
|
struct kmem_cache *obj_cache = qlink_to_cache(curr);
|
|
|
|
if (obj_cache == cache)
|
|
qlist_put(to, curr, obj_cache->size);
|
|
else
|
|
qlist_put(from, curr, obj_cache->size);
|
|
|
|
curr = next;
|
|
}
|
|
}
|
|
|
|
static void per_cpu_remove_cache(void *arg)
|
|
{
|
|
struct kmem_cache *cache = arg;
|
|
struct qlist_head to_free = QLIST_INIT;
|
|
struct qlist_head *q;
|
|
|
|
q = this_cpu_ptr(&cpu_quarantine);
|
|
qlist_move_cache(q, &to_free, cache);
|
|
qlist_free_all(&to_free, cache);
|
|
}
|
|
|
|
void quarantine_remove_cache(struct kmem_cache *cache)
|
|
{
|
|
unsigned long flags, i;
|
|
struct qlist_head to_free = QLIST_INIT;
|
|
|
|
on_each_cpu(per_cpu_remove_cache, cache, 1);
|
|
|
|
spin_lock_irqsave(&quarantine_lock, flags);
|
|
for (i = 0; i < QUARANTINE_BATCHES; i++)
|
|
qlist_move_cache(&global_quarantine[i], &to_free, cache);
|
|
spin_unlock_irqrestore(&quarantine_lock, flags);
|
|
|
|
qlist_free_all(&to_free, cache);
|
|
}
|