mm: avoid setting up anonymous pages into file mapping

Reading page fault handler code I've noticed that under right
circumstances kernel would map anonymous pages into file mappings: if
the VMA doesn't have vm_ops->fault() and the VMA wasn't fully populated
on ->mmap(), kernel would handle page fault to not populated pte with
do_anonymous_page().

Let's change page fault handler to use do_anonymous_page() only on
anonymous VMA (->vm_ops == NULL) and make sure that the VMA is not
shared.

For file mappings without vm_ops->fault() or shred VMA without vm_ops,
page fault on pte_none() entry would lead to SIGBUS.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Willy Tarreau <w@1wt.eu>
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Kirill A. Shutemov 2015-07-06 23:18:37 +03:00 committed by Linus Torvalds
parent 883a2dfd6f
commit 6b7339f4c3

View File

@ -2670,6 +2670,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap(page_table); pte_unmap(page_table);
/* File mapping without ->vm_ops ? */
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
/* Check if we need to add a guard page to the stack */ /* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0) if (check_stack_guard_page(vma, address) < 0)
return VM_FAULT_SIGSEGV; return VM_FAULT_SIGSEGV;
@ -3099,6 +3103,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table); pte_unmap(page_table);
/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
if (!vma->vm_ops->fault)
return VM_FAULT_SIGBUS;
if (!(flags & FAULT_FLAG_WRITE)) if (!(flags & FAULT_FLAG_WRITE))
return do_read_fault(mm, vma, address, pmd, pgoff, flags, return do_read_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte); orig_pte);
@ -3244,13 +3251,12 @@ static int handle_pte_fault(struct mm_struct *mm,
barrier(); barrier();
if (!pte_present(entry)) { if (!pte_present(entry)) {
if (pte_none(entry)) { if (pte_none(entry)) {
if (vma->vm_ops) { if (vma->vm_ops)
if (likely(vma->vm_ops->fault)) return do_fault(mm, vma, address, pte, pmd,
return do_fault(mm, vma, address, pte, flags, entry);
pmd, flags, entry);
} return do_anonymous_page(mm, vma, address, pte, pmd,
return do_anonymous_page(mm, vma, address, flags);
pte, pmd, flags);
} }
return do_swap_page(mm, vma, address, return do_swap_page(mm, vma, address,
pte, pmd, flags, entry); pte, pmd, flags, entry);