mm/vma: return the exact errno in vms_gather_munmap_vmas()

__split_vma() and mas_store_gfp() returns several types of errno on
failure so don't ignore them in vms_gather_munmap_vmas().  For example,
__split_vma() returns -EINVAL when an unaligned huge page is unmapped. 
This issue is reproduced by ltp memfd_create03 test.

Don't initialise the error variable and assign it when a failure actually
occurs.

[akpm@linux-foundation.org: fix whitespace, per Liam]
Link: https://lkml.kernel.org/r/20240909125621.1994-1-ice_yangxiao@163.com
Fixes: 6898c9039b ("mm/vma: extract the gathering of vmas from do_vmi_align_munmap()")
Signed-off-by: Xiao Yang <ice_yangxiao@163.com>
Suggested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202409081536.d283a0fb-oliver.sang@intel.com
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Xiao Yang 2024-09-09 21:56:21 +09:00 committed by Andrew Morton
parent f2c5101be4
commit 659c55ef98

View File

@ -1171,13 +1171,13 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
* @vms: The vma munmap struct
* @mas_detach: The maple state tracking the detached tree
*
* Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise
* Return: 0 on success, error otherwise
*/
int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
struct ma_state *mas_detach)
{
struct vm_area_struct *next = NULL;
int error = -ENOMEM;
int error;
/*
* If we need to split any vma, do it now to save pain later.
@ -1191,8 +1191,10 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
* its limit temporarily, to help free resources as expected.
*/
if (vms->end < vms->vma->vm_end &&
vms->vma->vm_mm->map_count >= sysctl_max_map_count)
vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
error = -ENOMEM;
goto map_count_exceeded;
}
/* Don't bother splitting the VMA if we can't unmap it anyway */
if (!can_modify_vma(vms->vma)) {
@ -1200,7 +1202,8 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
goto start_split_failed;
}
if (__split_vma(vms->vmi, vms->vma, vms->start, 1))
error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
if (error)
goto start_split_failed;
}
vms->prev = vma_prev(vms->vmi);
@ -1220,12 +1223,14 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
}
/* Does it split the end? */
if (next->vm_end > vms->end) {
if (__split_vma(vms->vmi, next, vms->end, 0))
error = __split_vma(vms->vmi, next, vms->end, 0);
if (error)
goto end_split_failed;
}
vma_start_write(next);
mas_set(mas_detach, vms->vma_count++);
if (mas_store_gfp(mas_detach, next, GFP_KERNEL))
error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
if (error)
goto munmap_gather_failed;
vma_mark_detached(next, true);
@ -1255,8 +1260,9 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
* split, despite we could. This is unlikely enough
* failure that it's not worth optimizing it for.
*/
if (userfaultfd_unmap_prep(next, vms->start, vms->end,
vms->uf))
error = userfaultfd_unmap_prep(next, vms->start,
vms->end, vms->uf);
if (error)
goto userfaultfd_error;
}
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE