mirror of
https://github.com/torvalds/linux.git
synced 2024-10-31 17:21:49 +00:00
6252d702c5
Add support for different number of page table levels dependent on the highest address used for a process. This will cause a 31 bit process to use a two level page table instead of the four level page table that is the default after the pud has been introduced. Likewise a normal 64 bit process will use three levels instead of four. Only if a process runs out of the 4 tera bytes which can be addressed with a three level page table the fourth level is dynamically added. Then the process can use up to 8 peta byte. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
152 lines
4.0 KiB
C
152 lines
4.0 KiB
C
/*
|
|
* linux/arch/s390/mm/mmap.c
|
|
*
|
|
* flexible mmap layout support
|
|
*
|
|
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
|
|
* All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*
|
|
*
|
|
* Started by Ingo Molnar <mingo@elte.hu>
|
|
*/
|
|
|
|
#include <linux/personality.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <asm/pgalloc.h>
|
|
|
|
/*
|
|
* Top of mmap area (just below the process stack).
|
|
*
|
|
* Leave an at least ~128 MB hole.
|
|
*/
|
|
#define MIN_GAP (128*1024*1024)
|
|
#define MAX_GAP (TASK_SIZE/6*5)
|
|
|
|
static inline unsigned long mmap_base(void)
|
|
{
|
|
unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
|
|
|
|
if (gap < MIN_GAP)
|
|
gap = MIN_GAP;
|
|
else if (gap > MAX_GAP)
|
|
gap = MAX_GAP;
|
|
|
|
return TASK_SIZE - (gap & PAGE_MASK);
|
|
}
|
|
|
|
static inline int mmap_is_legacy(void)
|
|
{
|
|
#ifdef CONFIG_64BIT
|
|
/*
|
|
* Force standard allocation for 64 bit programs.
|
|
*/
|
|
if (!test_thread_flag(TIF_31BIT))
|
|
return 1;
|
|
#endif
|
|
return sysctl_legacy_va_layout ||
|
|
(current->personality & ADDR_COMPAT_LAYOUT) ||
|
|
current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
|
|
}
|
|
|
|
#ifndef CONFIG_64BIT
|
|
|
|
/*
|
|
* This function, called very early during the creation of a new
|
|
* process VM image, sets up which VM layout function to use:
|
|
*/
|
|
void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
{
|
|
/*
|
|
* Fall back to the standard layout if the personality
|
|
* bit is set, or if the expected stack growth is unlimited:
|
|
*/
|
|
if (mmap_is_legacy()) {
|
|
mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
mm->unmap_area = arch_unmap_area;
|
|
} else {
|
|
mm->mmap_base = mmap_base();
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
mm->unmap_area = arch_unmap_area_topdown;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
|
|
|
|
#else
|
|
|
|
static unsigned long
|
|
s390_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
int rc;
|
|
|
|
addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
|
|
if (addr & ~PAGE_MASK)
|
|
return addr;
|
|
if (unlikely(mm->context.asce_limit < addr + len)) {
|
|
rc = crst_table_upgrade(mm, addr + len);
|
|
if (rc)
|
|
return (unsigned long) rc;
|
|
}
|
|
return addr;
|
|
}
|
|
|
|
static unsigned long
|
|
s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
const unsigned long len, const unsigned long pgoff,
|
|
const unsigned long flags)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
unsigned long addr = addr0;
|
|
int rc;
|
|
|
|
addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
|
|
if (addr & ~PAGE_MASK)
|
|
return addr;
|
|
if (unlikely(mm->context.asce_limit < addr + len)) {
|
|
rc = crst_table_upgrade(mm, addr + len);
|
|
if (rc)
|
|
return (unsigned long) rc;
|
|
}
|
|
return addr;
|
|
}
|
|
/*
|
|
* This function, called very early during the creation of a new
|
|
* process VM image, sets up which VM layout function to use:
|
|
*/
|
|
void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
{
|
|
/*
|
|
* Fall back to the standard layout if the personality
|
|
* bit is set, or if the expected stack growth is unlimited:
|
|
*/
|
|
if (mmap_is_legacy()) {
|
|
mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
mm->get_unmapped_area = s390_get_unmapped_area;
|
|
mm->unmap_area = arch_unmap_area;
|
|
} else {
|
|
mm->mmap_base = mmap_base();
|
|
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
|
|
mm->unmap_area = arch_unmap_area_topdown;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
|
|
|
|
#endif
|