2018-12-28 08:31:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2018-12-28 08:29:45 +00:00
|
|
|
/*
|
2020-12-22 20:00:32 +00:00
|
|
|
* This file contains common KASAN code.
|
2018-12-28 08:29:45 +00:00
|
|
|
*
|
|
|
|
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
|
|
|
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
|
|
|
*
|
|
|
|
* Some code borrowed from https://github.com/xairy/kasan-prototype by
|
|
|
|
* Andrey Konovalov <andreyknvl@gmail.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kasan.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/memblock.h>
|
|
|
|
#include <linux/memory.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/printk.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/sched/task_stack.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/stacktrace.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/bug.h>
|
|
|
|
|
|
|
|
#include "kasan.h"
|
|
|
|
#include "../slab.h"
|
|
|
|
|
2020-08-07 06:24:35 +00:00
|
|
|
depot_stack_handle_t kasan_save_stack(gfp_t flags)
|
2018-12-28 08:29:45 +00:00
|
|
|
{
|
|
|
|
unsigned long entries[KASAN_STACK_DEPTH];
|
2019-04-25 09:45:02 +00:00
|
|
|
unsigned int nr_entries;
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2019-04-25 09:45:02 +00:00
|
|
|
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
|
|
|
|
nr_entries = filter_irq_stacks(entries, nr_entries);
|
|
|
|
return stack_depot_save(entries, nr_entries, flags);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
2020-08-07 06:24:39 +00:00
|
|
|
void kasan_set_track(struct kasan_track *track, gfp_t flags)
|
2018-12-28 08:29:45 +00:00
|
|
|
{
|
|
|
|
track->pid = current->pid;
|
2020-08-07 06:24:35 +00:00
|
|
|
track->stack = kasan_save_stack(flags);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 20:00:56 +00:00
|
|
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
2018-12-28 08:29:45 +00:00
|
|
|
void kasan_enable_current(void)
|
|
|
|
{
|
|
|
|
current->kasan_depth++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kasan_disable_current(void)
|
|
|
|
{
|
|
|
|
current->kasan_depth--;
|
|
|
|
}
|
2020-12-22 20:00:56 +00:00
|
|
|
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2020-12-22 20:00:21 +00:00
|
|
|
void kasan_unpoison_range(const void *address, size_t size)
|
|
|
|
{
|
|
|
|
unpoison_range(address, size);
|
|
|
|
}
|
|
|
|
|
2020-12-22 20:02:42 +00:00
|
|
|
#if CONFIG_KASAN_STACK
|
2018-12-28 08:29:45 +00:00
|
|
|
/* Unpoison the entire stack for a task. */
|
|
|
|
void kasan_unpoison_task_stack(struct task_struct *task)
|
|
|
|
{
|
2020-12-22 20:02:49 +00:00
|
|
|
void *base = task_stack_page(task);
|
|
|
|
|
|
|
|
unpoison_range(base, THREAD_SIZE);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Unpoison the stack for the current task beyond a watermark sp value. */
|
|
|
|
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Calculate the task stack base address. Avoid using 'current'
|
|
|
|
* because this function is called by early resume code which hasn't
|
|
|
|
* yet set up the percpu register (%gs).
|
|
|
|
*/
|
|
|
|
void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
|
|
|
|
|
2020-12-22 20:00:21 +00:00
|
|
|
unpoison_range(base, watermark - base);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
2020-12-22 20:02:42 +00:00
|
|
|
#endif /* CONFIG_KASAN_STACK */
|
2018-12-28 08:29:45 +00:00
|
|
|
|
|
|
|
void kasan_alloc_pages(struct page *page, unsigned int order)
|
|
|
|
{
|
2018-12-28 08:30:57 +00:00
|
|
|
u8 tag;
|
|
|
|
unsigned long i;
|
|
|
|
|
2018-12-28 08:30:50 +00:00
|
|
|
if (unlikely(PageHighMem(page)))
|
|
|
|
return;
|
2018-12-28 08:30:57 +00:00
|
|
|
|
|
|
|
tag = random_tag();
|
|
|
|
for (i = 0; i < (1 << order); i++)
|
|
|
|
page_kasan_tag_set(page + i, tag);
|
2020-12-22 20:00:21 +00:00
|
|
|
unpoison_range(page_address(page), PAGE_SIZE << order);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void kasan_free_pages(struct page *page, unsigned int order)
|
|
|
|
{
|
|
|
|
if (likely(!PageHighMem(page)))
|
2020-12-22 20:00:21 +00:00
|
|
|
poison_range(page_address(page),
|
2018-12-28 08:29:45 +00:00
|
|
|
PAGE_SIZE << order,
|
|
|
|
KASAN_FREE_PAGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
|
|
|
|
* For larger allocations larger redzones are used.
|
|
|
|
*/
|
|
|
|
static inline unsigned int optimal_redzone(unsigned int object_size)
|
|
|
|
{
|
2020-12-22 20:02:10 +00:00
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_GENERIC))
|
2018-12-28 08:30:50 +00:00
|
|
|
return 0;
|
|
|
|
|
2018-12-28 08:29:45 +00:00
|
|
|
return
|
|
|
|
object_size <= 64 - 16 ? 16 :
|
|
|
|
object_size <= 128 - 32 ? 32 :
|
|
|
|
object_size <= 512 - 64 ? 64 :
|
|
|
|
object_size <= 4096 - 128 ? 128 :
|
|
|
|
object_size <= (1 << 14) - 256 ? 256 :
|
|
|
|
object_size <= (1 << 15) - 512 ? 512 :
|
|
|
|
object_size <= (1 << 16) - 1024 ? 1024 : 2048;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
|
|
|
|
slab_flags_t *flags)
|
|
|
|
{
|
|
|
|
unsigned int orig_size = *size;
|
2018-12-28 08:30:50 +00:00
|
|
|
unsigned int redzone_size;
|
2018-12-28 08:29:45 +00:00
|
|
|
int redzone_adjust;
|
|
|
|
|
2020-12-22 20:03:06 +00:00
|
|
|
if (!kasan_stack_collection_enabled()) {
|
|
|
|
*flags |= SLAB_KASAN;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-12-28 08:29:45 +00:00
|
|
|
/* Add alloc meta. */
|
|
|
|
cache->kasan_info.alloc_meta_offset = *size;
|
|
|
|
*size += sizeof(struct kasan_alloc_meta);
|
|
|
|
|
|
|
|
/* Add free meta. */
|
2018-12-28 08:30:50 +00:00
|
|
|
if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
|
|
|
|
(cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
|
|
|
|
cache->object_size < sizeof(struct kasan_free_meta))) {
|
2018-12-28 08:29:45 +00:00
|
|
|
cache->kasan_info.free_meta_offset = *size;
|
|
|
|
*size += sizeof(struct kasan_free_meta);
|
|
|
|
}
|
|
|
|
|
2018-12-28 08:30:50 +00:00
|
|
|
redzone_size = optimal_redzone(cache->object_size);
|
|
|
|
redzone_adjust = redzone_size - (*size - cache->object_size);
|
2018-12-28 08:29:45 +00:00
|
|
|
if (redzone_adjust > 0)
|
|
|
|
*size += redzone_adjust;
|
|
|
|
|
|
|
|
*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
|
2018-12-28 08:30:50 +00:00
|
|
|
max(*size, cache->object_size + redzone_size));
|
2018-12-28 08:29:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the metadata doesn't fit, don't enable KASAN at all.
|
|
|
|
*/
|
|
|
|
if (*size <= cache->kasan_info.alloc_meta_offset ||
|
|
|
|
*size <= cache->kasan_info.free_meta_offset) {
|
|
|
|
cache->kasan_info.alloc_meta_offset = 0;
|
|
|
|
cache->kasan_info.free_meta_offset = 0;
|
|
|
|
*size = orig_size;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*flags |= SLAB_KASAN;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t kasan_metadata_size(struct kmem_cache *cache)
|
|
|
|
{
|
2020-12-22 20:03:06 +00:00
|
|
|
if (!kasan_stack_collection_enabled())
|
|
|
|
return 0;
|
2018-12-28 08:29:45 +00:00
|
|
|
return (cache->kasan_info.alloc_meta_offset ?
|
|
|
|
sizeof(struct kasan_alloc_meta) : 0) +
|
|
|
|
(cache->kasan_info.free_meta_offset ?
|
|
|
|
sizeof(struct kasan_free_meta) : 0);
|
|
|
|
}
|
|
|
|
|
2020-12-22 20:02:34 +00:00
|
|
|
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
|
|
|
|
const void *object)
|
2018-12-28 08:29:45 +00:00
|
|
|
{
|
2020-12-22 20:02:52 +00:00
|
|
|
return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 20:02:34 +00:00
|
|
|
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
|
|
|
|
const void *object)
|
2018-12-28 08:29:45 +00:00
|
|
|
{
|
|
|
|
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
|
2020-12-22 20:02:52 +00:00
|
|
|
return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void kasan_poison_slab(struct page *page)
|
|
|
|
{
|
2018-12-28 08:30:57 +00:00
|
|
|
unsigned long i;
|
|
|
|
|
2019-09-23 22:34:30 +00:00
|
|
|
for (i = 0; i < compound_nr(page); i++)
|
2018-12-28 08:30:57 +00:00
|
|
|
page_kasan_tag_reset(page + i);
|
2020-12-22 20:00:21 +00:00
|
|
|
poison_range(page_address(page), page_size(page),
|
|
|
|
KASAN_KMALLOC_REDZONE);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
|
|
|
|
{
|
2020-12-22 20:00:21 +00:00
|
|
|
unpoison_range(object, cache->object_size);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void kasan_poison_object_data(struct kmem_cache *cache, void *object)
|
|
|
|
{
|
2020-12-22 20:00:21 +00:00
|
|
|
poison_range(object,
|
2020-12-22 20:00:24 +00:00
|
|
|
round_up(cache->object_size, KASAN_GRANULE_SIZE),
|
2018-12-28 08:29:45 +00:00
|
|
|
KASAN_KMALLOC_REDZONE);
|
|
|
|
}
|
|
|
|
|
2018-12-28 08:30:50 +00:00
|
|
|
/*
|
2019-01-08 23:23:18 +00:00
|
|
|
* This function assigns a tag to an object considering the following:
|
|
|
|
* 1. A cache might have a constructor, which might save a pointer to a slab
|
|
|
|
* object somewhere (e.g. in the object itself). We preassign a tag for
|
|
|
|
* each object in caches with constructors during slab creation and reuse
|
|
|
|
* the same tag each time a particular object is allocated.
|
|
|
|
* 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
|
|
|
|
* accessed after being freed. We preassign tags for objects in these
|
|
|
|
* caches as well.
|
|
|
|
* 3. For SLAB allocator we can't preassign tags randomly since the freelist
|
|
|
|
* is stored as an array of indexes instead of a linked list. Assign tags
|
|
|
|
* based on objects indexes, so that objects that are next to each other
|
|
|
|
* get different tags.
|
2018-12-28 08:30:50 +00:00
|
|
|
*/
|
2019-01-08 23:23:18 +00:00
|
|
|
static u8 assign_tag(struct kmem_cache *cache, const void *object,
|
2019-02-21 06:19:01 +00:00
|
|
|
bool init, bool keep_tag)
|
2018-12-28 08:30:50 +00:00
|
|
|
{
|
2019-02-21 06:19:01 +00:00
|
|
|
/*
|
|
|
|
* 1. When an object is kmalloc()'ed, two hooks are called:
|
|
|
|
* kasan_slab_alloc() and kasan_kmalloc(). We assign the
|
|
|
|
* tag only in the first one.
|
|
|
|
* 2. We reuse the same tag for krealloc'ed objects.
|
|
|
|
*/
|
|
|
|
if (keep_tag)
|
2019-01-08 23:23:18 +00:00
|
|
|
return get_tag(object);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
|
|
|
|
* set, assign a tag when the object is being allocated (init == false).
|
|
|
|
*/
|
2018-12-28 08:30:50 +00:00
|
|
|
if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
|
2019-01-08 23:23:18 +00:00
|
|
|
return init ? KASAN_TAG_KERNEL : random_tag();
|
2018-12-28 08:30:50 +00:00
|
|
|
|
2019-01-08 23:23:18 +00:00
|
|
|
/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
|
2018-12-28 08:30:50 +00:00
|
|
|
#ifdef CONFIG_SLAB
|
2019-01-08 23:23:18 +00:00
|
|
|
/* For SLAB assign tags based on the object index in the freelist. */
|
2018-12-28 08:30:50 +00:00
|
|
|
return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
|
|
|
|
#else
|
2019-01-08 23:23:18 +00:00
|
|
|
/*
|
|
|
|
* For SLUB assign a random tag during slab creation, otherwise reuse
|
|
|
|
* the already assigned tag.
|
|
|
|
*/
|
|
|
|
return init ? random_tag() : get_tag(object);
|
2018-12-28 08:30:50 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-12-28 08:31:01 +00:00
|
|
|
void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
|
|
|
|
const void *object)
|
2018-12-28 08:29:45 +00:00
|
|
|
{
|
2020-12-22 20:02:34 +00:00
|
|
|
struct kasan_alloc_meta *alloc_meta;
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2020-12-22 20:03:06 +00:00
|
|
|
if (kasan_stack_collection_enabled()) {
|
|
|
|
if (!(cache->flags & SLAB_KASAN))
|
|
|
|
return (void *)object;
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2020-12-22 20:03:06 +00:00
|
|
|
alloc_meta = kasan_get_alloc_meta(cache, object);
|
|
|
|
__memset(alloc_meta, 0, sizeof(*alloc_meta));
|
|
|
|
}
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2020-12-22 20:02:10 +00:00
|
|
|
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
|
|
|
|
object = set_tag(object, assign_tag(cache, object, true, false));
|
2018-12-28 08:30:50 +00:00
|
|
|
|
2018-12-28 08:29:45 +00:00
|
|
|
return (void *)object;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
|
|
|
|
unsigned long ip, bool quarantine)
|
|
|
|
{
|
2018-12-28 08:30:50 +00:00
|
|
|
u8 tag;
|
|
|
|
void *tagged_object;
|
2018-12-28 08:29:45 +00:00
|
|
|
unsigned long rounded_up_size;
|
|
|
|
|
2018-12-28 08:30:50 +00:00
|
|
|
tag = get_tag(object);
|
|
|
|
tagged_object = object;
|
2020-12-22 20:02:52 +00:00
|
|
|
object = kasan_reset_tag(object);
|
2018-12-28 08:30:50 +00:00
|
|
|
|
2018-12-28 08:29:45 +00:00
|
|
|
if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
|
|
|
|
object)) {
|
2018-12-28 08:30:50 +00:00
|
|
|
kasan_report_invalid_free(tagged_object, ip);
|
2018-12-28 08:29:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* RCU slabs could be legally used after free within the RCU period */
|
|
|
|
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
|
|
|
|
return false;
|
|
|
|
|
2020-12-22 20:00:46 +00:00
|
|
|
if (check_invalid_free(tagged_object)) {
|
2018-12-28 08:30:50 +00:00
|
|
|
kasan_report_invalid_free(tagged_object, ip);
|
2018-12-28 08:29:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-12-22 20:00:24 +00:00
|
|
|
rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
|
2020-12-22 20:00:21 +00:00
|
|
|
poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2020-12-22 20:03:06 +00:00
|
|
|
if (!kasan_stack_collection_enabled())
|
|
|
|
return false;
|
|
|
|
|
2018-12-28 08:30:50 +00:00
|
|
|
if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
|
|
|
|
unlikely(!(cache->flags & SLAB_KASAN)))
|
2018-12-28 08:29:45 +00:00
|
|
|
return false;
|
|
|
|
|
2019-09-23 22:34:13 +00:00
|
|
|
kasan_set_free_info(cache, object, tag);
|
|
|
|
|
kasan: simplify quarantine_put call site
Patch series "kasan: boot parameters for hardware tag-based mode", v4.
=== Overview
Hardware tag-based KASAN mode [1] is intended to eventually be used in
production as a security mitigation. Therefore there's a need for finer
control over KASAN features and for an existence of a kill switch.
This patchset adds a few boot parameters for hardware tag-based KASAN that
allow to disable or otherwise control particular KASAN features, as well
as provides some initial optimizations for running KASAN in production.
There's another planned patchset what will further optimize hardware
tag-based KASAN, provide proper benchmarking and tests, and will fully
enable tag-based KASAN for production use.
Hardware tag-based KASAN relies on arm64 Memory Tagging Extension (MTE)
[2] to perform memory and pointer tagging. Please see [3] and [4] for
detailed analysis of how MTE helps to fight memory safety problems.
The features that can be controlled are:
1. Whether KASAN is enabled at all.
2. Whether KASAN collects and saves alloc/free stacks.
3. Whether KASAN panics on a detected bug or not.
The patch titled "kasan: add and integrate kasan boot parameters" of this
series adds a few new boot parameters.
kasan.mode allows to choose one of three main modes:
- kasan.mode=off - KASAN is disabled, no tag checks are performed
- kasan.mode=prod - only essential production features are enabled
- kasan.mode=full - all KASAN features are enabled
The chosen mode provides default control values for the features mentioned
above. However it's also possible to override the default values by
providing:
- kasan.stacktrace=off/on - enable stacks collection
(default: on for mode=full, otherwise off)
- kasan.fault=report/panic - only report tag fault or also panic
(default: report)
If kasan.mode parameter is not provided, it defaults to full when
CONFIG_DEBUG_KERNEL is enabled, and to prod otherwise.
It is essential that switching between these modes doesn't require
rebuilding the kernel with different configs, as this is required by
the Android GKI (Generic Kernel Image) initiative.
=== Benchmarks
For now I've only performed a few simple benchmarks such as measuring
kernel boot time and slab memory usage after boot. There's an upcoming
patchset which will optimize KASAN further and include more detailed
benchmarking results.
The benchmarks were performed in QEMU and the results below exclude the
slowdown caused by QEMU memory tagging emulation (as it's different from
the slowdown that will be introduced by hardware and is therefore
irrelevant).
KASAN_HW_TAGS=y + kasan.mode=off introduces no performance or memory
impact compared to KASAN_HW_TAGS=n.
kasan.mode=prod (manually excluding tagging) introduces 3% of performance
and no memory impact (except memory used by hardware to store tags)
compared to kasan.mode=off.
kasan.mode=full has about 40% performance and 30% memory impact over
kasan.mode=prod. Both come from alloc/free stack collection.
=== Notes
This patchset is available here:
https://github.com/xairy/linux/tree/up-boot-mte-v4
This patchset is based on v11 of "kasan: add hardware tag-based mode for
arm64" patchset [1].
For testing in QEMU hardware tag-based KASAN requires:
1. QEMU built from master [6] (use "-machine virt,mte=on -cpu max" arguments
to run).
2. GCC version 10.
[1] https://lore.kernel.org/linux-arm-kernel/cover.1606161801.git.andreyknvl@google.com/T/#t
[2] https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/enhancing-memory-safety
[3] https://arxiv.org/pdf/1802.09517.pdf
[4] https://github.com/microsoft/MSRC-Security-Research/blob/master/papers/2020/Security%20analysis%20of%20memory%20tagging.pdf
[5] https://source.android.com/devices/architecture/kernel/generic-kernel-image
[6] https://github.com/qemu/qemu
=== Tags
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
This patch (of 19):
Move get_free_info() call into quarantine_put() to simplify the call site.
No functional changes.
Link: https://lkml.kernel.org/r/cover.1606162397.git.andreyknvl@google.com
Link: https://lkml.kernel.org/r/312d0a3ef92cc6dc4fa5452cbc1714f9393ca239.1606162397.git.andreyknvl@google.com
Link: https://linux-review.googlesource.com/id/Iab0f04e7ebf8d83247024b7190c67c3c34c7940f
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-12-22 20:02:31 +00:00
|
|
|
quarantine_put(cache, object);
|
2018-12-28 08:30:50 +00:00
|
|
|
|
|
|
|
return IS_ENABLED(CONFIG_KASAN_GENERIC);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
|
|
|
|
{
|
|
|
|
return __kasan_slab_free(cache, object, ip, true);
|
|
|
|
}
|
|
|
|
|
2020-12-22 20:02:38 +00:00
|
|
|
static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
|
|
|
|
{
|
|
|
|
kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
|
|
|
|
}
|
|
|
|
|
2019-01-08 23:23:18 +00:00
|
|
|
static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
|
2019-02-21 06:19:01 +00:00
|
|
|
size_t size, gfp_t flags, bool keep_tag)
|
2018-12-28 08:29:45 +00:00
|
|
|
{
|
|
|
|
unsigned long redzone_start;
|
|
|
|
unsigned long redzone_end;
|
2019-06-01 05:30:42 +00:00
|
|
|
u8 tag = 0xff;
|
2018-12-28 08:29:45 +00:00
|
|
|
|
|
|
|
if (gfpflags_allow_blocking(flags))
|
|
|
|
quarantine_reduce();
|
|
|
|
|
|
|
|
if (unlikely(object == NULL))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
redzone_start = round_up((unsigned long)(object + size),
|
2020-12-22 20:00:24 +00:00
|
|
|
KASAN_GRANULE_SIZE);
|
2018-12-28 08:29:45 +00:00
|
|
|
redzone_end = round_up((unsigned long)object + cache->object_size,
|
2020-12-22 20:00:24 +00:00
|
|
|
KASAN_GRANULE_SIZE);
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2020-12-22 20:02:10 +00:00
|
|
|
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
|
2019-02-21 06:19:01 +00:00
|
|
|
tag = assign_tag(cache, object, false, keep_tag);
|
2018-12-28 08:30:50 +00:00
|
|
|
|
2020-12-22 20:02:10 +00:00
|
|
|
/* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
|
2020-12-22 20:00:21 +00:00
|
|
|
unpoison_range(set_tag(object, tag), size);
|
|
|
|
poison_range((void *)redzone_start, redzone_end - redzone_start,
|
|
|
|
KASAN_KMALLOC_REDZONE);
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2020-12-22 20:03:06 +00:00
|
|
|
if (kasan_stack_collection_enabled() && (cache->flags & SLAB_KASAN))
|
2020-12-22 20:02:38 +00:00
|
|
|
set_alloc_info(cache, (void *)object, flags);
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2018-12-28 08:30:50 +00:00
|
|
|
return set_tag(object, tag);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
2019-01-08 23:23:18 +00:00
|
|
|
|
2019-02-21 06:19:01 +00:00
|
|
|
void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
|
|
|
|
gfp_t flags)
|
|
|
|
{
|
|
|
|
return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
|
|
|
|
}
|
|
|
|
|
2019-01-08 23:23:18 +00:00
|
|
|
void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
|
|
|
|
size_t size, gfp_t flags)
|
|
|
|
{
|
2019-02-21 06:19:01 +00:00
|
|
|
return __kasan_kmalloc(cache, object, size, flags, true);
|
2019-01-08 23:23:18 +00:00
|
|
|
}
|
2018-12-28 08:29:45 +00:00
|
|
|
EXPORT_SYMBOL(kasan_kmalloc);
|
|
|
|
|
2018-12-28 08:31:01 +00:00
|
|
|
void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
|
|
|
|
gfp_t flags)
|
2018-12-28 08:29:45 +00:00
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
unsigned long redzone_start;
|
|
|
|
unsigned long redzone_end;
|
|
|
|
|
|
|
|
if (gfpflags_allow_blocking(flags))
|
|
|
|
quarantine_reduce();
|
|
|
|
|
|
|
|
if (unlikely(ptr == NULL))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
page = virt_to_page(ptr);
|
|
|
|
redzone_start = round_up((unsigned long)(ptr + size),
|
2020-12-22 20:00:24 +00:00
|
|
|
KASAN_GRANULE_SIZE);
|
2019-09-23 22:34:25 +00:00
|
|
|
redzone_end = (unsigned long)ptr + page_size(page);
|
2018-12-28 08:29:45 +00:00
|
|
|
|
2020-12-22 20:00:21 +00:00
|
|
|
unpoison_range(ptr, size);
|
|
|
|
poison_range((void *)redzone_start, redzone_end - redzone_start,
|
|
|
|
KASAN_PAGE_REDZONE);
|
2018-12-28 08:29:45 +00:00
|
|
|
|
|
|
|
return (void *)ptr;
|
|
|
|
}
|
|
|
|
|
2018-12-28 08:31:01 +00:00
|
|
|
void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
|
2018-12-28 08:29:45 +00:00
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (unlikely(object == ZERO_SIZE_PTR))
|
|
|
|
return (void *)object;
|
|
|
|
|
|
|
|
page = virt_to_head_page(object);
|
|
|
|
|
|
|
|
if (unlikely(!PageSlab(page)))
|
|
|
|
return kasan_kmalloc_large(object, size, flags);
|
|
|
|
else
|
2019-01-08 23:23:18 +00:00
|
|
|
return __kasan_kmalloc(page->slab_cache, object, size,
|
|
|
|
flags, true);
|
2018-12-28 08:29:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void kasan_poison_kfree(void *ptr, unsigned long ip)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = virt_to_head_page(ptr);
|
|
|
|
|
|
|
|
if (unlikely(!PageSlab(page))) {
|
2018-12-28 08:30:57 +00:00
|
|
|
if (ptr != page_address(page)) {
|
2018-12-28 08:29:45 +00:00
|
|
|
kasan_report_invalid_free(ptr, ip);
|
|
|
|
return;
|
|
|
|
}
|
2020-12-22 20:00:21 +00:00
|
|
|
poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
|
2018-12-28 08:29:45 +00:00
|
|
|
} else {
|
|
|
|
__kasan_slab_free(page->slab_cache, ptr, ip, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void kasan_kfree_large(void *ptr, unsigned long ip)
|
|
|
|
{
|
2018-12-28 08:30:57 +00:00
|
|
|
if (ptr != page_address(virt_to_head_page(ptr)))
|
2018-12-28 08:29:45 +00:00
|
|
|
kasan_report_invalid_free(ptr, ip);
|
|
|
|
/* The object will be poisoned by page_alloc. */
|
|
|
|
}
|