mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
49c40fb4b8
Use the VMA iterator instead. Since VMA can no longer be NULL in the loop, then deal with out-of-memory outside the loop. This means a slightly longer run time in the failure case (-ENOMEM) - it will run to the end of the VMAs before erroring instead of in the middle of the loop. Link: https://lkml.kernel.org/r/20220906194824.2110408-37-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Tested-by: Yu Zhao <yuzhao@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: SeongJae Park <sj@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
100 lines
2.4 KiB
C
100 lines
2.4 KiB
C
/*
|
|
* arch/xtensa/kernel/syscall.c
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
|
* Copyright (C) 2000 Silicon Graphics, Inc.
|
|
* Copyright (C) 1995 - 2000 by Ralf Baechle
|
|
*
|
|
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
|
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
|
* Chris Zankel <chris@zankel.net>
|
|
* Kevin Chea
|
|
*
|
|
*/
|
|
#include <linux/uaccess.h>
|
|
#include <asm/syscall.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/stringify.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/file.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/shm.h>
|
|
|
|
syscall_t sys_call_table[] /* FIXME __cacheline_aligned */= {
|
|
#define __SYSCALL(nr, entry) (syscall_t)entry,
|
|
#include <asm/syscall_table.h>
|
|
};
|
|
|
|
#define COLOUR_ALIGN(addr, pgoff) \
|
|
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
|
|
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
|
|
|
|
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
|
|
{
|
|
unsigned long ret;
|
|
long err;
|
|
|
|
err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
|
|
if (err)
|
|
return err;
|
|
return (long)ret;
|
|
}
|
|
|
|
asmlinkage long xtensa_fadvise64_64(int fd, int advice,
|
|
unsigned long long offset, unsigned long long len)
|
|
{
|
|
return ksys_fadvise64_64(fd, offset, len, advice);
|
|
}
|
|
|
|
#ifdef CONFIG_MMU
|
|
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct vm_area_struct *vmm;
|
|
struct vma_iterator vmi;
|
|
|
|
if (flags & MAP_FIXED) {
|
|
/* We do not accept a shared mapping if it would violate
|
|
* cache aliasing constraints.
|
|
*/
|
|
if ((flags & MAP_SHARED) &&
|
|
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
|
|
return -EINVAL;
|
|
return addr;
|
|
}
|
|
|
|
if (len > TASK_SIZE)
|
|
return -ENOMEM;
|
|
if (!addr)
|
|
addr = TASK_UNMAPPED_BASE;
|
|
|
|
if (flags & MAP_SHARED)
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
else
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma_iter_init(&vmi, current->mm, addr);
|
|
for_each_vma(vmi, vmm) {
|
|
/* At this point: (addr < vmm->vm_end). */
|
|
if (addr + len <= vm_start_gap(vmm))
|
|
break;
|
|
|
|
addr = vmm->vm_end;
|
|
if (flags & MAP_SHARED)
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
}
|
|
|
|
if (TASK_SIZE - len < addr)
|
|
return -ENOMEM;
|
|
|
|
return addr;
|
|
}
|
|
#endif
|