mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
5b301409e8
Make KASAN run on User Mode Linux on x86_64. The UML-specific KASAN initializer uses mmap to map the ~16TB of shadow memory to the location defined by KASAN_SHADOW_OFFSET. kasan_init() utilizes constructors to initialize KASAN before main(). The location of the KASAN shadow memory, starting at KASAN_SHADOW_OFFSET, can be configured using the KASAN_SHADOW_OFFSET option. The default location of this offset is 0x100000000000, which keeps it out-of-the-way even on UML setups with more "physical" memory. For low-memory setups, 0x7fff8000 can be used instead, which fits in an immediate and is therefore faster, as suggested by Dmitry Vyukov. There is usually enough free space at this location; however, it is a config option so that it can be easily changed if needed. Note that, unlike KASAN on other architectures, vmalloc allocations still use the shadow memory allocated upfront, rather than allocating and free-ing it per-vmalloc allocation. If another architecture chooses to go down the same path, we should replace the checks for CONFIG_UML with something more generic, such as: - A CONFIG_KASAN_NO_SHADOW_ALLOC option, which architectures could set - or, a way of having architecture-specific versions of these vmalloc and module shadow memory allocation options. Also note that, while UML supports both KASAN in inline mode (CONFIG_KASAN_INLINE) and static linking (CONFIG_STATIC_LINK), it does not support both at the same time. Signed-off-by: Patricia Alfonso <trishalfonso@google.com> Co-developed-by: Vincent Whitchurch <vincent.whitchurch@axis.com> Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com> Signed-off-by: David Gow <davidgow@google.com> Reviewed-by: Johannes Berg <johannes@sipsolutions.net> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Signed-off-by: Richard Weinberger <richard@nod.at>
76 lines
1.8 KiB
C
76 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
|
* Copyright (C) 2013 Richard Weinberger <richard@nod.at>
|
|
* Copyright (C) 2014 Google Inc., Author: Daniel Walter <dwalter@google.com>
|
|
*/
|
|
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/module.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/stacktrace.h>
|
|
|
|
void dump_trace(struct task_struct *tsk,
|
|
const struct stacktrace_ops *ops,
|
|
void *data)
|
|
{
|
|
int reliable = 0;
|
|
unsigned long *sp, bp, addr;
|
|
struct pt_regs *segv_regs = tsk->thread.segv_regs;
|
|
struct stack_frame *frame;
|
|
|
|
bp = get_frame_pointer(tsk, segv_regs);
|
|
sp = get_stack_pointer(tsk, segv_regs);
|
|
|
|
frame = (struct stack_frame *)bp;
|
|
while (((long) sp & (THREAD_SIZE-1)) != 0) {
|
|
addr = READ_ONCE_NOCHECK(*sp);
|
|
if (__kernel_text_address(addr)) {
|
|
reliable = 0;
|
|
if ((unsigned long) sp == bp + sizeof(long)) {
|
|
frame = frame ? frame->next_frame : NULL;
|
|
bp = (unsigned long)frame;
|
|
reliable = 1;
|
|
}
|
|
ops->address(data, addr, reliable);
|
|
}
|
|
sp++;
|
|
}
|
|
}
|
|
|
|
static void save_addr(void *data, unsigned long address, int reliable)
|
|
{
|
|
struct stack_trace *trace = data;
|
|
|
|
if (!reliable)
|
|
return;
|
|
if (trace->nr_entries >= trace->max_entries)
|
|
return;
|
|
|
|
trace->entries[trace->nr_entries++] = address;
|
|
}
|
|
|
|
static const struct stacktrace_ops dump_ops = {
|
|
.address = save_addr
|
|
};
|
|
|
|
static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
|
|
{
|
|
dump_trace(tsk, &dump_ops, trace);
|
|
}
|
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
|
{
|
|
__save_stack_trace(current, trace);
|
|
}
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
{
|
|
__save_stack_trace(tsk, trace);
|
|
}
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|