mm/munmap: replace can_modify_mm with can_modify_vma

We were doing an extra mmap tree traversal just to check if the entire
range is modifiable.  This can be done when we iterate through the VMAs
instead.

Link: https://lkml.kernel.org/r/20240817-mseal-depessimize-v3-2-d8d2e037df30@gmail.com
Signed-off-by: Pedro Falcato <pedro.falcato@gmail.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
LGTM, Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Jeff Xu <jeffxu@chromium.org>
Cc: Kees Cook <kees@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Pedro Falcato 2024-08-17 01:18:29 +01:00 committed by Andrew Morton
parent 4d1b341665
commit df2a7df9a9
2 changed files with 13 additions and 17 deletions

View File

@ -1740,16 +1740,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, struct list_head *uf, unsigned long start, unsigned long end, struct list_head *uf,
bool unlock) bool unlock)
{ {
struct mm_struct *mm = vma->vm_mm; return do_vmi_align_munmap(vmi, vma, vma->vm_mm, start, end, uf, unlock);
/*
* Check if memory is sealed, prevent unmapping a sealed VMA.
* can_modify_mm assumes we have acquired the lock on MM.
*/
if (unlikely(!can_modify_mm(mm, start, end)))
return -EPERM;
return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
} }
/* /*

View File

@ -712,6 +712,12 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
goto map_count_exceeded; goto map_count_exceeded;
/* Don't bother splitting the VMA if we can't unmap it anyway */
if (!can_modify_vma(vma)) {
error = -EPERM;
goto start_split_failed;
}
error = __split_vma(vmi, vma, start, 1); error = __split_vma(vmi, vma, start, 1);
if (error) if (error)
goto start_split_failed; goto start_split_failed;
@ -723,6 +729,11 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
*/ */
next = vma; next = vma;
do { do {
if (!can_modify_vma(next)) {
error = -EPERM;
goto modify_vma_failed;
}
/* Does it split the end? */ /* Does it split the end? */
if (next->vm_end > end) { if (next->vm_end > end) {
error = __split_vma(vmi, next, end, 0); error = __split_vma(vmi, next, end, 0);
@ -815,6 +826,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
__mt_destroy(&mt_detach); __mt_destroy(&mt_detach);
return 0; return 0;
modify_vma_failed:
clear_tree_failed: clear_tree_failed:
userfaultfd_error: userfaultfd_error:
munmap_gather_failed: munmap_gather_failed:
@ -860,13 +872,6 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
if (end == start) if (end == start)
return -EINVAL; return -EINVAL;
/*
* Check if memory is sealed, prevent unmapping a sealed VMA.
* can_modify_mm assumes we have acquired the lock on MM.
*/
if (unlikely(!can_modify_mm(mm, start, end)))
return -EPERM;
/* Find the first overlapping VMA */ /* Find the first overlapping VMA */
vma = vma_find(vmi, end); vma = vma_find(vmi, end);
if (!vma) { if (!vma) {