mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
25d4054cc9
Patch series "mm: Care about shadow stack guard gap when getting an unmapped area", v2. As covered in the commit log forc44357c2e7
("x86/mm: care about shadow stack guard gap during placement") our current mmap() implementation does not take care to ensure that a new mapping isn't placed with existing mappings inside it's own guard gaps. This is particularly important for shadow stacks since if two shadow stacks end up getting placed adjacent to each other then they can overflow into each other which weakens the protection offered by the feature. On x86 there is a custom arch_get_unmapped_area() which was updated by the above commit to cover this case by specifying a start_gap for allocations with VM_SHADOW_STACK. Both arm64 and RISC-V have equivalent features and use the generic implementation of arch_get_unmapped_area() so let's make the equivalent change there so they also don't get shadow stack pages placed without guard pages. The arm64 and RISC-V shadow stack implementations are currently on the list: https://lore.kernel.org/r/20240829-arm64-gcs-v12-0-42fec94743 https://lore.kernel.org/lkml/20240403234054.2020347-1-debug@rivosinc.com/ Given the addition of the use of vm_flags in the generic implementation we also simplify the set of possibilities that have to be dealt with in the core code by making arch_get_unmapped_area() take vm_flags as standard. This is a bit invasive since the prototype change touches quite a few architectures but since the parameter is ignored the change is straightforward, the simplification for the generic code seems worth it. This patch (of 3): When we introduced arch_get_unmapped_area_vmflags() in961148704a
("mm: introduce arch_get_unmapped_area_vmflags()") we did so as part of properly supporting guard pages for shadow stacks on x86_64, which uses a custom arch_get_unmapped_area(). Equivalent features are also present on both arm64 and RISC-V, both of which use the generic implementation of arch_get_unmapped_area() and will require equivalent modification there. Rather than continue to deal with having two versions of the functions let's bite the bullet and have all implementations of arch_get_unmapped_area() take vm_flags as a parameter. The new parameter is currently ignored by all implementations other than x86. The only caller that doesn't have a vm_flags available is mm_get_unmapped_area(), as for the x86 implementation and the wrapper used on other architectures this is modified to supply no flags. No functional changes. Link: https://lkml.kernel.org/r/20240904-mm-generic-shadow-stack-guard-v2-0-a46b8b6dc0ed@kernel.org Link: https://lkml.kernel.org/r/20240904-mm-generic-shadow-stack-guard-v2-1-a46b8b6dc0ed@kernel.org Signed-off-by: Mark Brown <broonie@kernel.org> Acked-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Acked-by: Helge Deller <deller@gmx.de> [parisc] Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Chris Zankel <chris@zankel.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: "Edgecombe, Rick P" <rick.p.edgecombe@intel.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
233 lines
5.9 KiB
C
233 lines
5.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/compat.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/sem.h>
|
|
#include <linux/msg.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/stat.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/file.h>
|
|
#include <linux/utsname.h>
|
|
#include <linux/personality.h>
|
|
#include <linux/random.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/elf.h>
|
|
|
|
#include <asm/elf.h>
|
|
#include <asm/ia32.h>
|
|
|
|
/*
|
|
* Align a virtual address to avoid aliasing in the I$ on AMD F15h.
|
|
*/
|
|
static unsigned long get_align_mask(void)
|
|
{
|
|
/* handle 32- and 64-bit case with a single conditional */
|
|
if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
|
|
return 0;
|
|
|
|
if (!(current->flags & PF_RANDOMIZE))
|
|
return 0;
|
|
|
|
return va_align.mask;
|
|
}
|
|
|
|
/*
|
|
* To avoid aliasing in the I$ on AMD F15h, the bits defined by the
|
|
* va_align.bits, [12:upper_bit), are set to a random value instead of
|
|
* zeroing them. This random value is computed once per boot. This form
|
|
* of ASLR is known as "per-boot ASLR".
|
|
*
|
|
* To achieve this, the random value is added to the info.align_offset
|
|
* value before calling vm_unmapped_area() or ORed directly to the
|
|
* address.
|
|
*/
|
|
static unsigned long get_align_bits(void)
|
|
{
|
|
return va_align.bits & get_align_mask();
|
|
}
|
|
|
|
static int __init control_va_addr_alignment(char *str)
|
|
{
|
|
/* guard against enabling this on other CPU families */
|
|
if (va_align.flags < 0)
|
|
return 1;
|
|
|
|
if (*str == 0)
|
|
return 1;
|
|
|
|
if (!strcmp(str, "32"))
|
|
va_align.flags = ALIGN_VA_32;
|
|
else if (!strcmp(str, "64"))
|
|
va_align.flags = ALIGN_VA_64;
|
|
else if (!strcmp(str, "off"))
|
|
va_align.flags = 0;
|
|
else if (!strcmp(str, "on"))
|
|
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
|
|
else
|
|
pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
|
|
|
|
return 1;
|
|
}
|
|
__setup("align_va_addr=", control_va_addr_alignment);
|
|
|
|
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
|
unsigned long, prot, unsigned long, flags,
|
|
unsigned long, fd, unsigned long, off)
|
|
{
|
|
if (off & ~PAGE_MASK)
|
|
return -EINVAL;
|
|
|
|
return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
|
|
}
|
|
|
|
static void find_start_end(unsigned long addr, unsigned long flags,
|
|
unsigned long *begin, unsigned long *end)
|
|
{
|
|
if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
|
|
/* This is usually used needed to map code in small
|
|
model, so it needs to be in the first 31bit. Limit
|
|
it to that. This means we need to move the
|
|
unmapped base down for this case. This can give
|
|
conflicts with the heap, but we assume that glibc
|
|
malloc knows how to fall back to mmap. Give it 1GB
|
|
of playground for now. -AK */
|
|
*begin = 0x40000000;
|
|
*end = 0x80000000;
|
|
if (current->flags & PF_RANDOMIZE) {
|
|
*begin = randomize_page(*begin, 0x02000000);
|
|
}
|
|
return;
|
|
}
|
|
|
|
*begin = get_mmap_base(1);
|
|
if (in_32bit_syscall())
|
|
*end = task_size_32bit();
|
|
else
|
|
*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
|
|
}
|
|
|
|
static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
|
|
{
|
|
if (vm_flags & VM_SHADOW_STACK)
|
|
return PAGE_SIZE;
|
|
|
|
return 0;
|
|
}
|
|
|
|
unsigned long
|
|
arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len,
|
|
unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
struct vm_unmapped_area_info info = {};
|
|
unsigned long begin, end;
|
|
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
find_start_end(addr, flags, &begin, &end);
|
|
|
|
if (len > end)
|
|
return -ENOMEM;
|
|
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
vma = find_vma(mm, addr);
|
|
if (end - len >= addr &&
|
|
(!vma || addr + len <= vm_start_gap(vma)))
|
|
return addr;
|
|
}
|
|
|
|
info.length = len;
|
|
info.low_limit = begin;
|
|
info.high_limit = end;
|
|
info.align_offset = pgoff << PAGE_SHIFT;
|
|
info.start_gap = stack_guard_placement(vm_flags);
|
|
if (filp) {
|
|
info.align_mask = get_align_mask();
|
|
info.align_offset += get_align_bits();
|
|
}
|
|
return vm_unmapped_area(&info);
|
|
}
|
|
|
|
unsigned long
|
|
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
|
|
unsigned long len, unsigned long pgoff,
|
|
unsigned long flags, vm_flags_t vm_flags)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct mm_struct *mm = current->mm;
|
|
unsigned long addr = addr0;
|
|
struct vm_unmapped_area_info info = {};
|
|
|
|
/* requested length too big for entire address space */
|
|
if (len > TASK_SIZE)
|
|
return -ENOMEM;
|
|
|
|
/* No address checking. See comment at mmap_address_hint_valid() */
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
/* for MAP_32BIT mappings we force the legacy mmap base */
|
|
if (!in_32bit_syscall() && (flags & MAP_32BIT))
|
|
goto bottomup;
|
|
|
|
/* requesting a specific address */
|
|
if (addr) {
|
|
addr &= PAGE_MASK;
|
|
if (!mmap_address_hint_valid(addr, len))
|
|
goto get_unmapped_area;
|
|
|
|
vma = find_vma(mm, addr);
|
|
if (!vma || addr + len <= vm_start_gap(vma))
|
|
return addr;
|
|
}
|
|
get_unmapped_area:
|
|
|
|
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
|
info.length = len;
|
|
if (!in_32bit_syscall() && (flags & MAP_ABOVE4G))
|
|
info.low_limit = SZ_4G;
|
|
else
|
|
info.low_limit = PAGE_SIZE;
|
|
|
|
info.high_limit = get_mmap_base(0);
|
|
info.start_gap = stack_guard_placement(vm_flags);
|
|
|
|
/*
|
|
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
|
|
* in the full address space.
|
|
*
|
|
* !in_32bit_syscall() check to avoid high addresses for x32
|
|
* (and make it no op on native i386).
|
|
*/
|
|
if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
|
|
info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
|
|
|
|
info.align_offset = pgoff << PAGE_SHIFT;
|
|
if (filp) {
|
|
info.align_mask = get_align_mask();
|
|
info.align_offset += get_align_bits();
|
|
}
|
|
addr = vm_unmapped_area(&info);
|
|
if (!(addr & ~PAGE_MASK))
|
|
return addr;
|
|
VM_BUG_ON(addr != -ENOMEM);
|
|
|
|
bottomup:
|
|
/*
|
|
* A failed mmap() very likely causes application failure,
|
|
* so fall back to the bottom-up function here. This scenario
|
|
* can happen with large stack limits and large mmap()
|
|
* allocations.
|
|
*/
|
|
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags, 0);
|
|
}
|