forked from Minki/linux
1b028f784e
mmap() uses a base address, from which it starts to look for a free space for allocation. The base address is stored in mm->mmap_base, which is calculated during exec(). The address depends on task's size, set rlimit for stack, ASLR randomization. The base depends on the task size and the number of random bits which are different for 64-bit and 32bit applications. Due to the fact, that the base address is fixed, its mmap() from a compat (32bit) syscall issued by a 64bit task will return a address which is based on the 64bit base address and does not fit into the 32bit address space (4GB). The returned pointer is truncated to 32bit, which results in an invalid address. To solve store a seperate compat address base plus a compat legacy address base in mm_struct. These bases are calculated at exec() time and can be used later to address the 32bit compat mmap() issued by 64 bit applications. As a consequence of this change 32-bit applications issuing a 64-bit syscall (after doing a long jump) will get a 64-bit mapping now. Before this change 32-bit applications always got a 32bit mapping. [ tglx: Massaged changelog and added a comment ] Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com> Cc: 0x7f454c46@gmail.com Cc: linux-mm@kvack.org Cc: Andy Lutomirski <luto@kernel.org> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Borislav Petkov <bp@suse.de> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Link: http://lkml.kernel.org/r/20170306141721.9188-4-dsafonov@virtuozzo.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
230 lines
5.6 KiB
C
230 lines
5.6 KiB
C
#include <linux/errno.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/sem.h>
|
|
#include <linux/msg.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/stat.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/file.h>
|
|
#include <linux/utsname.h>
|
|
#include <linux/personality.h>
|
|
#include <linux/random.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/elf.h>
|
|
|
|
#include <asm/elf.h>
|
|
#include <asm/compat.h>
|
|
#include <asm/ia32.h>
|
|
#include <asm/syscalls.h>
|
|
|
|
/*
|
|
* Align a virtual address to avoid aliasing in the I$ on AMD F15h.
|
|
*/
|
|
static unsigned long get_align_mask(void)
|
|
{
|
|
/* handle 32- and 64-bit case with a single conditional */
|
|
if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
|
|
return 0;
|
|
|
|
if (!(current->flags & PF_RANDOMIZE))
|
|
return 0;
|
|
|
|
return va_align.mask;
|
|
}
|
|
|
|
/*
|
|
* To avoid aliasing in the I$ on AMD F15h, the bits defined by the
|
|
* va_align.bits, [12:upper_bit), are set to a random value instead of
|
|
* zeroing them. This random value is computed once per boot. This form
|
|
* of ASLR is known as "per-boot ASLR".
|
|
*
|
|
* To achieve this, the random value is added to the info.align_offset
|
|
* value before calling vm_unmapped_area() or ORed directly to the
|
|
* address.
|
|
*/
|
|
static unsigned long get_align_bits(void)
|
|
{
|
|
return va_align.bits & get_align_mask();
|
|
}
|
|
|
|
unsigned long align_vdso_addr(unsigned long addr)
|
|
{
|
|
unsigned long align_mask = get_align_mask();
|
|
addr = (addr + align_mask) & ~align_mask;
|
|
return addr | get_align_bits();
|
|
}
|
|
|
|
static int __init control_va_addr_alignment(char *str)
|
|
{
|
|
/* guard against enabling this on other CPU families */
|
|
if (va_align.flags < 0)
|
|
return 1;
|
|
|
|
if (*str == 0)
|
|
return 1;
|
|
|
|
if (*str == '=')
|
|
str++;
|
|
|
|
if (!strcmp(str, "32"))
|
|
va_align.flags = ALIGN_VA_32;
|
|
else if (!strcmp(str, "64"))
|
|
va_align.flags = ALIGN_VA_64;
|
|
else if (!strcmp(str, "off"))
|
|
va_align.flags = 0;
|
|
else if (!strcmp(str, "on"))
|
|
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
|
|
else
|
|
return 0;
|
|
|
|
return 1;
|
|
}
|
|
__setup("align_va_addr", control_va_addr_alignment);
|
|
|
|
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
|
unsigned long, prot, unsigned long, flags,
|
|
unsigned long, fd, unsigned long, off)
|
|
{
|
|
long error;
|
|
error = -EINVAL;
|
|
if (off & ~PAGE_MASK)
|
|
goto out;
|
|
|
|
error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
|
|
out:
|
|
return error;
|
|
}
|
|
|
|
static unsigned long get_mmap_base(int is_legacy)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
|
|
if (in_compat_syscall())
|
|
return is_legacy ? mm->mmap_compat_legacy_base
|
|
: mm->mmap_compat_base;
|
|
#endif
|
|
return is_legacy ? mm->mmap_legacy_base : mm->mmap_base;
|
|
}
|
|
|
|
static void find_start_end(unsigned long flags, unsigned long *begin,
|
|
unsigned long *end)
|
|
{
|
|
if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
|
|
/* This is usually used needed to map code in small
|
|
model, so it needs to be in the first 31bit. Limit
|
|
it to that. This means we need to move the
|
|
unmapped base down for this case. This can give
|
|
conflicts with the heap, but we assume that glibc
|
|
malloc knows how to fall back to mmap. Give it 1GB
|
|
of playground for now. -AK */
|
|
*begin = 0x40000000;
|
|
*end = 0x80000000;
|
|
if (current->flags & PF_RANDOMIZE) {
|
|
*begin = randomize_page(*begin, 0x02000000);
|
|
}
|
|
return;
|
|
}
|
|
|
|
*begin = get_mmap_base(1);
|
|
*end = in_compat_syscall() ? tasksize_32bit() : tasksize_64bit();
|
|
}
|
|
|
|
unsigned long
|
|
arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
struct vm_unmapped_area_info info;
|
|
unsigned long begin, end;
|
|
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
find_start_end(flags, &begin, &end);
|
|
|
|
if (len > end)
|
|
return -ENOMEM;
|
|
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
vma = find_vma(mm, addr);
|
|
if (end - len >= addr &&
|
|
(!vma || addr + len <= vma->vm_start))
|
|
return addr;
|
|
}
|
|
|
|
info.flags = 0;
|
|
info.length = len;
|
|
info.low_limit = begin;
|
|
info.high_limit = end;
|
|
info.align_mask = 0;
|
|
info.align_offset = pgoff << PAGE_SHIFT;
|
|
if (filp) {
|
|
info.align_mask = get_align_mask();
|
|
info.align_offset += get_align_bits();
|
|
}
|
|
return vm_unmapped_area(&info);
|
|
}
|
|
|
|
unsigned long
|
|
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
const unsigned long len, const unsigned long pgoff,
|
|
const unsigned long flags)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct mm_struct *mm = current->mm;
|
|
unsigned long addr = addr0;
|
|
struct vm_unmapped_area_info info;
|
|
|
|
/* requested length too big for entire address space */
|
|
if (len > TASK_SIZE)
|
|
return -ENOMEM;
|
|
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
/* for MAP_32BIT mappings we force the legacy mmap base */
|
|
if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
|
|
goto bottomup;
|
|
|
|
/* requesting a specific address */
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
vma = find_vma(mm, addr);
|
|
if (TASK_SIZE - len >= addr &&
|
|
(!vma || addr + len <= vma->vm_start))
|
|
return addr;
|
|
}
|
|
|
|
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
|
info.length = len;
|
|
info.low_limit = PAGE_SIZE;
|
|
info.high_limit = get_mmap_base(0);
|
|
info.align_mask = 0;
|
|
info.align_offset = pgoff << PAGE_SHIFT;
|
|
if (filp) {
|
|
info.align_mask = get_align_mask();
|
|
info.align_offset += get_align_bits();
|
|
}
|
|
addr = vm_unmapped_area(&info);
|
|
if (!(addr & ~PAGE_MASK))
|
|
return addr;
|
|
VM_BUG_ON(addr != -ENOMEM);
|
|
|
|
bottomup:
|
|
/*
|
|
* A failed mmap() very likely causes application failure,
|
|
* so fall back to the bottom-up function here. This scenario
|
|
* can happen with large stack limits and large mmap()
|
|
* allocations.
|
|
*/
|
|
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
|
|
}
|