forked from Minki/linux
mm/gup: migrate pinned pages out of movable zone
We should not pin pages in ZONE_MOVABLE. Currently, we do not pin only movable CMA pages. Generalize the function that migrates CMA pages to migrate all movable pages. Use is_pinnable_page() to check which pages need to be migrated Link: https://lkml.kernel.org/r/20210215161349.246722-10-pasha.tatashin@soleen.com Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: James Morris <jmorris@namei.org> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sasha Levin <sashal@kernel.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Tyler Hicks <tyhicks@linux.microsoft.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9afaf30f7a
commit
d1e153fea2
@ -27,6 +27,7 @@ enum migrate_reason {
|
||||
MR_MEMPOLICY_MBIND,
|
||||
MR_NUMA_MISPLACED,
|
||||
MR_CONTIG_RANGE,
|
||||
MR_LONGTERM_PIN,
|
||||
MR_TYPES
|
||||
};
|
||||
|
||||
|
@ -407,8 +407,13 @@ enum zone_type {
|
||||
* to increase the number of THP/huge pages. Notable special cases are:
|
||||
*
|
||||
* 1. Pinned pages: (long-term) pinning of movable pages might
|
||||
* essentially turn such pages unmovable. Memory offlining might
|
||||
* retry a long time.
|
||||
* essentially turn such pages unmovable. Therefore, we do not allow
|
||||
* pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
|
||||
* faulted, they come from the right zone right away. However, it is
|
||||
* still possible that address space already has pages in
|
||||
* ZONE_MOVABLE at the time when pages are pinned (i.e. user has
|
||||
* touches that memory before pinning). In such case we migrate them
|
||||
* to a different zone. When migration fails - pinning fails.
|
||||
* 2. memblock allocations: kernelcore/movablecore setups might create
|
||||
* situations where ZONE_MOVABLE contains unmovable allocations
|
||||
* after boot. Memory offlining and allocations fail early.
|
||||
|
@ -20,7 +20,8 @@
|
||||
EM( MR_SYSCALL, "syscall_or_cpuset") \
|
||||
EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
|
||||
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
|
||||
EMe(MR_CONTIG_RANGE, "contig_range")
|
||||
EM( MR_CONTIG_RANGE, "contig_range") \
|
||||
EMe(MR_LONGTERM_PIN, "longterm_pin")
|
||||
|
||||
/*
|
||||
* First define the enums in the above macros to be exported to userspace
|
||||
|
67
mm/gup.c
67
mm/gup.c
@ -87,11 +87,12 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
|
||||
int orig_refs = refs;
|
||||
|
||||
/*
|
||||
* Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
|
||||
* path, so fail and let the caller fall back to the slow path.
|
||||
* Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
|
||||
* right zone, so fail and let the caller fall back to the slow
|
||||
* path.
|
||||
*/
|
||||
if (unlikely(flags & FOLL_LONGTERM) &&
|
||||
is_migrate_cma_page(page))
|
||||
if (unlikely((flags & FOLL_LONGTERM) &&
|
||||
!is_pinnable_page(page)))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
@ -1600,17 +1601,17 @@ struct page *get_dump_page(unsigned long addr)
|
||||
}
|
||||
#endif /* CONFIG_ELF_CORE */
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
static long check_and_migrate_cma_pages(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
unsigned int gup_flags)
|
||||
#ifdef CONFIG_MIGRATION
|
||||
static long check_and_migrate_movable_pages(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
unsigned int gup_flags)
|
||||
{
|
||||
unsigned long i, isolation_error_count;
|
||||
bool drain_allow;
|
||||
LIST_HEAD(cma_page_list);
|
||||
LIST_HEAD(movable_page_list);
|
||||
long ret = nr_pages;
|
||||
struct page *prev_head, *head;
|
||||
struct migration_target_control mtc = {
|
||||
@ -1628,13 +1629,12 @@ check_again:
|
||||
continue;
|
||||
prev_head = head;
|
||||
/*
|
||||
* If we get a page from the CMA zone, since we are going to
|
||||
* be pinning these entries, we might as well move them out
|
||||
* of the CMA zone if possible.
|
||||
* If we get a movable page, since we are going to be pinning
|
||||
* these entries, try to move them out if possible.
|
||||
*/
|
||||
if (is_migrate_cma_page(head)) {
|
||||
if (!is_pinnable_page(head)) {
|
||||
if (PageHuge(head)) {
|
||||
if (!isolate_huge_page(head, &cma_page_list))
|
||||
if (!isolate_huge_page(head, &movable_page_list))
|
||||
isolation_error_count++;
|
||||
} else {
|
||||
if (!PageLRU(head) && drain_allow) {
|
||||
@ -1646,7 +1646,7 @@ check_again:
|
||||
isolation_error_count++;
|
||||
continue;
|
||||
}
|
||||
list_add_tail(&head->lru, &cma_page_list);
|
||||
list_add_tail(&head->lru, &movable_page_list);
|
||||
mod_node_page_state(page_pgdat(head),
|
||||
NR_ISOLATED_ANON +
|
||||
page_is_file_lru(head),
|
||||
@ -1659,10 +1659,10 @@ check_again:
|
||||
* If list is empty, and no isolation errors, means that all pages are
|
||||
* in the correct zone.
|
||||
*/
|
||||
if (list_empty(&cma_page_list) && !isolation_error_count)
|
||||
if (list_empty(&movable_page_list) && !isolation_error_count)
|
||||
return ret;
|
||||
|
||||
if (!list_empty(&cma_page_list)) {
|
||||
if (!list_empty(&movable_page_list)) {
|
||||
/*
|
||||
* drop the above get_user_pages reference.
|
||||
*/
|
||||
@ -1672,12 +1672,12 @@ check_again:
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
put_page(pages[i]);
|
||||
|
||||
ret = migrate_pages(&cma_page_list, alloc_migration_target,
|
||||
ret = migrate_pages(&movable_page_list, alloc_migration_target,
|
||||
NULL, (unsigned long)&mtc, MIGRATE_SYNC,
|
||||
MR_CONTIG_RANGE);
|
||||
MR_LONGTERM_PIN);
|
||||
if (ret) {
|
||||
if (!list_empty(&cma_page_list))
|
||||
putback_movable_pages(&cma_page_list);
|
||||
if (!list_empty(&movable_page_list))
|
||||
putback_movable_pages(&movable_page_list);
|
||||
return ret > 0 ? -ENOMEM : ret;
|
||||
}
|
||||
|
||||
@ -1696,16 +1696,16 @@ check_again:
|
||||
goto check_again;
|
||||
}
|
||||
#else
|
||||
static long check_and_migrate_cma_pages(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
unsigned int gup_flags)
|
||||
static long check_and_migrate_movable_pages(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
unsigned int gup_flags)
|
||||
{
|
||||
return nr_pages;
|
||||
}
|
||||
#endif /* CONFIG_CMA */
|
||||
#endif /* CONFIG_MIGRATION */
|
||||
|
||||
/*
|
||||
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
|
||||
@ -1729,8 +1729,9 @@ static long __gup_longterm_locked(struct mm_struct *mm,
|
||||
|
||||
if (gup_flags & FOLL_LONGTERM) {
|
||||
if (rc > 0)
|
||||
rc = check_and_migrate_cma_pages(mm, start, rc, pages,
|
||||
vmas, gup_flags);
|
||||
rc = check_and_migrate_movable_pages(mm, start, rc,
|
||||
pages, vmas,
|
||||
gup_flags);
|
||||
memalloc_pin_restore(flags);
|
||||
}
|
||||
return rc;
|
||||
|
Loading…
Reference in New Issue
Block a user