diff --git a/mm/mremap.c b/mm/mremap.c index fbb4861964f6..e2b65a17148e 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -489,6 +489,53 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, return moved; } +/* + * A helper to check if a previous mapping exists. Required for + * move_page_tables() and realign_addr() to determine if a previous mapping + * exists before we can do realignment optimizations. + */ +static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, + unsigned long mask) +{ + unsigned long addr_masked = addr_to_align & mask; + + /* + * If @addr_to_align of either source or destination is not the beginning + * of the corresponding VMA, we can't align down or we will destroy part + * of the current mapping. + */ + if (vma->vm_start != addr_to_align) + return false; + + /* + * Make sure the realignment doesn't cause the address to fall on an + * existing mapping. + */ + return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; +} + +/* Opportunistically realign to specified boundary for faster copy. */ +static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, + unsigned long *new_addr, struct vm_area_struct *new_vma, + unsigned long mask) +{ + /* Skip if the addresses are already aligned. */ + if ((*old_addr & ~mask) == 0) + return; + + /* Only realign if the new and old addresses are mutually aligned. */ + if ((*old_addr & ~mask) != (*new_addr & ~mask)) + return; + + /* Ensure realignment doesn't cause overlap with existing mappings. */ + if (!can_align_down(old_vma, *old_addr, mask) || + !can_align_down(new_vma, *new_addr, mask)) + return; + + *old_addr = *old_addr & mask; + *new_addr = *new_addr & mask; +} + unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, @@ -508,6 +555,14 @@ unsigned long move_page_tables(struct vm_area_struct *vma, return move_hugetlb_page_tables(vma, new_vma, old_addr, new_addr, len); + /* + * If possible, realign addresses to PMD boundary for faster copy. + * Only realign if the mremap copying hits a PMD boundary. + */ + if ((vma != new_vma) + && (len >= PMD_SIZE - (old_addr & ~PMD_MASK))) + try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK); + flush_cache_range(vma, old_addr, old_end); mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, old_addr, old_end); @@ -577,6 +632,13 @@ again: mmu_notifier_invalidate_range_end(&range); + /* + * Prevent negative return values when {old,new}_addr was realigned + * but we broke out of the above loop for the first PMD itself. + */ + if (len + old_addr < old_end) + return 0; + return len + old_addr - old_end; /* how much done */ }