mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
497258dfaf
All relevant architectures had already been converted to the new interface (which just has an underscore in front of the name - not very imaginative naming), this just force-converts the stragglers. The modern interface is almost identical to the old one, except instead of the page pointer it takes a "struct vm_special_mapping" that describes the mapping (and contains the page pointer as one member), and it returns the resulting 'vma' instead of just the error code. Getting rid of the old interface also gets rid of some special casing, which had caused problems with the mremap extensions to "struct vm_special_mapping". [akpm@linux-foundation.org: coding-style cleanups] Link: https://lkml.kernel.org/r/CAHk-=whvR+z=0=0gzgdfUiK70JTa-=+9vxD-4T=3BagXR6dciA@mail.gmail.comTested-by: Rob Landley <rob@landley.net> # arch/sh/ Link: https://lore.kernel.org/all/20240819195120.GA1113263@thelio-3990X/ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nathan Chancellor <nathan@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Brian Cain <bcain@quicinc.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Guo Ren <guoren@kernel.org> Cc: Jeff Xu <jeffxu@google.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Pedro Falcato <pedro.falcato@gmail.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Landley <rob@landley.net> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
112 lines
2.6 KiB
C
112 lines
2.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
|
|
|
#include <linux/binfmts.h>
|
|
#include <linux/elf.h>
|
|
#include <linux/err.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/page.h>
|
|
#include <vdso/datapage.h>
|
|
|
|
extern char vdso_start[], vdso_end[];
|
|
|
|
static unsigned int vdso_pages;
|
|
static struct page **vdso_pagelist;
|
|
|
|
static union vdso_data_store vdso_data_store __page_aligned_data;
|
|
struct vdso_data *vdso_data = vdso_data_store.data;
|
|
|
|
static int __init vdso_init(void)
|
|
{
|
|
unsigned int i;
|
|
|
|
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
|
|
vdso_pagelist =
|
|
kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
|
|
if (unlikely(vdso_pagelist == NULL)) {
|
|
pr_err("vdso: pagelist allocation failed\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
for (i = 0; i < vdso_pages; i++) {
|
|
struct page *pg;
|
|
|
|
pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
|
|
vdso_pagelist[i] = pg;
|
|
}
|
|
vdso_pagelist[i] = virt_to_page(vdso_data);
|
|
|
|
return 0;
|
|
}
|
|
arch_initcall(vdso_init);
|
|
|
|
int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|
int uses_interp)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct mm_struct *mm = current->mm;
|
|
unsigned long vdso_base, vdso_len;
|
|
int ret;
|
|
static struct vm_special_mapping vdso_mapping = {
|
|
.name = "[vdso]",
|
|
};
|
|
static struct vm_special_mapping vvar_mapping = {
|
|
.name = "[vvar]",
|
|
};
|
|
|
|
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
|
|
|
|
mmap_write_lock(mm);
|
|
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
|
|
if (IS_ERR_VALUE(vdso_base)) {
|
|
ret = vdso_base;
|
|
goto end;
|
|
}
|
|
|
|
/*
|
|
* Put vDSO base into mm struct. We need to do this before calling
|
|
* install_special_mapping or the perf counter mmap tracking code
|
|
* will fail to recognise it as a vDSO (since arch_vma_name fails).
|
|
*/
|
|
mm->context.vdso = (void *)vdso_base;
|
|
|
|
vdso_mapping.pages = vdso_pagelist;
|
|
vma =
|
|
_install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
|
|
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
|
|
&vdso_mapping);
|
|
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
mm->context.vdso = NULL;
|
|
goto end;
|
|
}
|
|
|
|
vdso_base += (vdso_pages << PAGE_SHIFT);
|
|
vvar_mapping.pages = &vdso_pagelist[vdso_pages];
|
|
vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
|
|
(VM_READ | VM_MAYREAD), &vvar_mapping);
|
|
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
mm->context.vdso = NULL;
|
|
goto end;
|
|
}
|
|
ret = 0;
|
|
end:
|
|
mmap_write_unlock(mm);
|
|
return ret;
|
|
}
|
|
|
|
const char *arch_vma_name(struct vm_area_struct *vma)
|
|
{
|
|
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
|
|
return "[vdso]";
|
|
if (vma->vm_mm && (vma->vm_start ==
|
|
(long)vma->vm_mm->context.vdso + PAGE_SIZE))
|
|
return "[vdso_data]";
|
|
return NULL;
|
|
}
|