forked from Minki/linux
[PATCH] error path in setup_arg_pages() misses vm_unacct_memory()
Pavel Emelianov and Kirill Korotaev observe that fs and arch users of security_vm_enough_memory tend to forget to vm_unacct_memory when a failure occurs further down (typically in setup_arg_pages variants). These are all users of insert_vm_struct, and that reservation will only be unaccounted on exit if the vma is marked VM_ACCOUNT: which in some cases it is (hidden inside VM_STACK_FLAGS) and in some cases it isn't. So x86_64 32-bit and ppc64 vDSO ELFs have been leaking memory into Committed_AS each time they're run. But don't add VM_ACCOUNT to them, it's inappropriate to reserve against the very unlikely case that gdb be used to COW a vDSO page - we ought to do something about that in do_wp_page, but there are yet other inconsistencies to be resolved. The safe and economical way to fix this is to let insert_vm_struct do the security_vm_enough_memory check when it finds VM_ACCOUNT is set. And the MIPS irix_brk has been calling security_vm_enough_memory before calling do_brk which repeats it, doubly accounting and so also leaking. Remove that, and all the fs and arch calls to security_vm_enough_memory: give it a less misleading name later on. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-Off-By: Kirill Korotaev <dev@sw.ru> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
fb085cf1d4
commit
2fd4ef85e0
@ -216,12 +216,6 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
|
|||||||
if (!mpnt)
|
if (!mpnt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))
|
|
||||||
>> PAGE_SHIFT)) {
|
|
||||||
kmem_cache_free(vm_area_cachep, mpnt);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(mpnt, 0, sizeof(*mpnt));
|
memset(mpnt, 0, sizeof(*mpnt));
|
||||||
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
down_write(¤t->mm->mmap_sem);
|
||||||
|
@ -581,18 +581,13 @@ asmlinkage int irix_brk(unsigned long brk)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if we have enough memory..
|
* Ok, looks good - let it rip.
|
||||||
*/
|
*/
|
||||||
if (security_vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) {
|
if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Ok, looks good - let it rip.
|
|
||||||
*/
|
|
||||||
mm->brk = brk;
|
mm->brk = brk;
|
||||||
do_brk(oldbrk, newbrk-oldbrk);
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -224,10 +224,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
|
|||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
||||||
if (vma == NULL)
|
if (vma == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (security_vm_enough_memory(vdso_pages)) {
|
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
memset(vma, 0, sizeof(*vma));
|
memset(vma, 0, sizeof(*vma));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -237,8 +234,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
|
|||||||
*/
|
*/
|
||||||
vdso_base = get_unmapped_area(NULL, vdso_base,
|
vdso_base = get_unmapped_area(NULL, vdso_base,
|
||||||
vdso_pages << PAGE_SHIFT, 0, 0);
|
vdso_pages << PAGE_SHIFT, 0, 0);
|
||||||
if (vdso_base & ~PAGE_MASK)
|
if (vdso_base & ~PAGE_MASK) {
|
||||||
|
kmem_cache_free(vm_area_cachep, vma);
|
||||||
return (int)vdso_base;
|
return (int)vdso_base;
|
||||||
|
}
|
||||||
|
|
||||||
current->thread.vdso_base = vdso_base;
|
current->thread.vdso_base = vdso_base;
|
||||||
|
|
||||||
@ -266,7 +265,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
|
|||||||
vma->vm_ops = &vdso_vmops;
|
vma->vm_ops = &vdso_vmops;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
insert_vm_struct(mm, vma);
|
if (insert_vm_struct(mm, vma)) {
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
kmem_cache_free(vm_area_cachep, vma);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
|
|
||||||
|
@ -354,11 +354,6 @@ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int exec
|
|||||||
if (!mpnt)
|
if (!mpnt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))>>PAGE_SHIFT)) {
|
|
||||||
kmem_cache_free(vm_area_cachep, mpnt);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(mpnt, 0, sizeof(*mpnt));
|
memset(mpnt, 0, sizeof(*mpnt));
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
|
@ -52,17 +52,13 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
|
|||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (security_vm_enough_memory(npages)) {
|
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(vma, 0, sizeof(struct vm_area_struct));
|
memset(vma, 0, sizeof(struct vm_area_struct));
|
||||||
/* Could randomize here */
|
/* Could randomize here */
|
||||||
vma->vm_start = VSYSCALL32_BASE;
|
vma->vm_start = VSYSCALL32_BASE;
|
||||||
vma->vm_end = VSYSCALL32_END;
|
vma->vm_end = VSYSCALL32_END;
|
||||||
/* MAYWRITE to allow gdb to COW and set breakpoints */
|
/* MAYWRITE to allow gdb to COW and set breakpoints */
|
||||||
vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYEXEC|VM_MAYWRITE;
|
vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
|
||||||
vma->vm_flags |= mm->def_flags;
|
vma->vm_flags |= mm->def_flags;
|
||||||
vma->vm_page_prot = protection_map[vma->vm_flags & 7];
|
vma->vm_page_prot = protection_map[vma->vm_flags & 7];
|
||||||
vma->vm_ops = &syscall32_vm_ops;
|
vma->vm_ops = &syscall32_vm_ops;
|
||||||
|
@ -421,11 +421,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
|||||||
if (!mpnt)
|
if (!mpnt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (security_vm_enough_memory(arg_size >> PAGE_SHIFT)) {
|
|
||||||
kmem_cache_free(vm_area_cachep, mpnt);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(mpnt, 0, sizeof(*mpnt));
|
memset(mpnt, 0, sizeof(*mpnt));
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
|
@ -1993,6 +1993,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
|
|||||||
__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
|
__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
|
||||||
if (__vma && __vma->vm_start < vma->vm_end)
|
if (__vma && __vma->vm_start < vma->vm_end)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
if ((vma->vm_flags & VM_ACCOUNT) &&
|
||||||
|
security_vm_enough_memory(vma_pages(vma)))
|
||||||
|
return -ENOMEM;
|
||||||
vma_link(mm, vma, prev, rb_link, rb_parent);
|
vma_link(mm, vma, prev, rb_link, rb_parent);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user