mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm/migration: return errno when isolate_huge_page failed
We might fail to isolate huge page due to e.g. the page is under
migration which cleared HPageMigratable. We should return errno in this
case rather than always return 1 which could confuse the user, i.e. the
caller might think all of the memory is migrated while the hugetlb page is
left behind. We make the prototype of isolate_huge_page consistent with
isolate_lru_page as suggested by Huang Ying and rename isolate_huge_page
to isolate_hugetlb as suggested by Muchun to improve the readability.
Link: https://lkml.kernel.org/r/20220530113016.16663-4-linmiaohe@huawei.com
Fixes: e8db67eb0d
("mm: migrate: move_pages() supports thp migration")
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Suggested-by: Huang Ying <ying.huang@intel.com>
Reported-by: kernel test robot <lkp@intel.com> (build error)
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
160088b3b6
commit
7ce82f4c3f
@ -170,7 +170,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
|
||||
vm_flags_t vm_flags);
|
||||
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
|
||||
long freed);
|
||||
bool isolate_huge_page(struct page *page, struct list_head *list);
|
||||
int isolate_hugetlb(struct page *page, struct list_head *list);
|
||||
int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
|
||||
int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
|
||||
void putback_active_hugepage(struct page *page);
|
||||
@ -376,9 +376,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool isolate_huge_page(struct page *page, struct list_head *list)
|
||||
static inline int isolate_hugetlb(struct page *page, struct list_head *list)
|
||||
{
|
||||
return false;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
|
||||
|
2
mm/gup.c
2
mm/gup.c
@ -1930,7 +1930,7 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
|
||||
* Try to move out any movable page before pinning the range.
|
||||
*/
|
||||
if (folio_test_hugetlb(folio)) {
|
||||
if (!isolate_huge_page(&folio->page,
|
||||
if (isolate_hugetlb(&folio->page,
|
||||
&movable_page_list))
|
||||
isolation_error_count++;
|
||||
continue;
|
||||
|
11
mm/hugetlb.c
11
mm/hugetlb.c
@ -2766,8 +2766,7 @@ retry:
|
||||
* Fail with -EBUSY if not possible.
|
||||
*/
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
if (!isolate_huge_page(old_page, list))
|
||||
ret = -EBUSY;
|
||||
ret = isolate_hugetlb(old_page, list);
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
goto free_new;
|
||||
} else if (!HPageFreed(old_page)) {
|
||||
@ -2843,7 +2842,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
|
||||
if (hstate_is_gigantic(h))
|
||||
return -ENOMEM;
|
||||
|
||||
if (page_count(head) && isolate_huge_page(head, list))
|
||||
if (page_count(head) && !isolate_hugetlb(head, list))
|
||||
ret = 0;
|
||||
else if (!page_count(head))
|
||||
ret = alloc_and_dissolve_huge_page(h, head, list);
|
||||
@ -6960,15 +6959,15 @@ follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int fla
|
||||
return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
bool isolate_huge_page(struct page *page, struct list_head *list)
|
||||
int isolate_hugetlb(struct page *page, struct list_head *list)
|
||||
{
|
||||
bool ret = true;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
if (!PageHeadHuge(page) ||
|
||||
!HPageMigratable(page) ||
|
||||
!get_page_unless_zero(page)) {
|
||||
ret = false;
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
ClearHPageMigratable(page);
|
||||
|
@ -2178,7 +2178,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
|
||||
bool lru = PageLRU(page);
|
||||
|
||||
if (PageHuge(page)) {
|
||||
isolated = isolate_huge_page(page, pagelist);
|
||||
isolated = !isolate_hugetlb(page, pagelist);
|
||||
} else {
|
||||
if (lru)
|
||||
isolated = !isolate_lru_page(page);
|
||||
|
@ -1641,7 +1641,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
||||
|
||||
if (PageHuge(page)) {
|
||||
pfn = page_to_pfn(head) + compound_nr(head) - 1;
|
||||
isolate_huge_page(head, &source);
|
||||
isolate_hugetlb(head, &source);
|
||||
continue;
|
||||
} else if (PageTransHuge(page))
|
||||
pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
|
||||
|
@ -602,7 +602,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
|
||||
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
|
||||
if (flags & (MPOL_MF_MOVE_ALL) ||
|
||||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
|
||||
if (!isolate_huge_page(page, qp->pagelist) &&
|
||||
if (isolate_hugetlb(page, qp->pagelist) &&
|
||||
(flags & MPOL_MF_STRICT))
|
||||
/*
|
||||
* Failed to isolate page but allow migrating pages
|
||||
|
@ -133,7 +133,7 @@ static void putback_movable_page(struct page *page)
|
||||
*
|
||||
* This function shall be used whenever the isolated pageset has been
|
||||
* built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
|
||||
* and isolate_huge_page().
|
||||
* and isolate_hugetlb().
|
||||
*/
|
||||
void putback_movable_pages(struct list_head *l)
|
||||
{
|
||||
@ -1628,8 +1628,9 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
if (PageHuge(page)) {
|
||||
if (PageHead(page)) {
|
||||
isolate_huge_page(page, pagelist);
|
||||
err = 1;
|
||||
err = isolate_hugetlb(page, pagelist);
|
||||
if (!err)
|
||||
err = 1;
|
||||
}
|
||||
} else {
|
||||
struct page *head;
|
||||
|
Loading…
Reference in New Issue
Block a user