mm/migrate: remove useless mask of start address

Addresses passed to walk_page_range() callback functions are already
page aligned and don't need to be masked with PAGE_MASK.

Link: http://lkml.kernel.org/r/20200107211208.24595-2-rcampbell@nvidia.com
Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Bharata B Rao <bharata@linux.ibm.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Chris Down <chris@chrisdown.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Ralph Campbell 2020-01-30 22:14:38 -08:00 committed by Linus Torvalds
parent afb971729a
commit 872ea70751

View File

@ -2156,7 +2156,7 @@ static int migrate_vma_collect_hole(unsigned long start,
struct migrate_vma *migrate = walk->private; struct migrate_vma *migrate = walk->private;
unsigned long addr; unsigned long addr;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
migrate->dst[migrate->npages] = 0; migrate->dst[migrate->npages] = 0;
migrate->npages++; migrate->npages++;
@ -2173,7 +2173,7 @@ static int migrate_vma_collect_skip(unsigned long start,
struct migrate_vma *migrate = walk->private; struct migrate_vma *migrate = walk->private;
unsigned long addr; unsigned long addr;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->dst[migrate->npages] = 0; migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = 0; migrate->src[migrate->npages++] = 0;
} }