mm/migrate: Convert remove_migration_ptes() to folios
Convert the implementation and all callers. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
parent
0d2514859c
commit
4eecb8b916
@ -261,7 +261,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
|
|||||||
*/
|
*/
|
||||||
int folio_mkclean(struct folio *);
|
int folio_mkclean(struct folio *);
|
||||||
|
|
||||||
void remove_migration_ptes(struct page *old, struct page *new, bool locked);
|
void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called by memory-failure.c to kill processes.
|
* Called by memory-failure.c to kill processes.
|
||||||
|
@ -2270,18 +2270,19 @@ static void unmap_page(struct page *page)
|
|||||||
VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
|
VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void remap_page(struct page *page, unsigned int nr)
|
static void remap_page(struct folio *folio, unsigned long nr)
|
||||||
{
|
{
|
||||||
int i;
|
int i = 0;
|
||||||
|
|
||||||
/* If unmap_page() uses try_to_migrate() on file, remove this check */
|
/* If unmap_page() uses try_to_migrate() on file, remove this check */
|
||||||
if (!PageAnon(page))
|
if (!folio_test_anon(folio))
|
||||||
return;
|
return;
|
||||||
if (PageTransHuge(page)) {
|
for (;;) {
|
||||||
remove_migration_ptes(page, page, true);
|
remove_migration_ptes(folio, folio, true);
|
||||||
} else {
|
i += folio_nr_pages(folio);
|
||||||
for (i = 0; i < nr; i++)
|
if (i >= nr)
|
||||||
remove_migration_ptes(page + i, page + i, true);
|
break;
|
||||||
|
folio = folio_next(folio);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2441,7 +2442,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
|||||||
}
|
}
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
remap_page(head, nr);
|
remap_page(folio, nr);
|
||||||
|
|
||||||
if (PageSwapCache(head)) {
|
if (PageSwapCache(head)) {
|
||||||
swp_entry_t entry = { .val = page_private(head) };
|
swp_entry_t entry = { .val = page_private(head) };
|
||||||
@ -2550,7 +2551,8 @@ bool can_split_huge_page(struct page *page, int *pextra_pins)
|
|||||||
*/
|
*/
|
||||||
int split_huge_page_to_list(struct page *page, struct list_head *list)
|
int split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||||
{
|
{
|
||||||
struct page *head = compound_head(page);
|
struct folio *folio = page_folio(page);
|
||||||
|
struct page *head = &folio->page;
|
||||||
struct deferred_split *ds_queue = get_deferred_split_queue(head);
|
struct deferred_split *ds_queue = get_deferred_split_queue(head);
|
||||||
XA_STATE(xas, &head->mapping->i_pages, head->index);
|
XA_STATE(xas, &head->mapping->i_pages, head->index);
|
||||||
struct anon_vma *anon_vma = NULL;
|
struct anon_vma *anon_vma = NULL;
|
||||||
@ -2667,7 +2669,7 @@ fail:
|
|||||||
if (mapping)
|
if (mapping)
|
||||||
xas_unlock(&xas);
|
xas_unlock(&xas);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
remap_page(head, thp_nr_pages(head));
|
remap_page(folio, folio_nr_pages(folio));
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
55
mm/migrate.c
55
mm/migrate.c
@ -174,30 +174,32 @@ void putback_movable_pages(struct list_head *l)
|
|||||||
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
|
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
|
||||||
unsigned long addr, void *old)
|
unsigned long addr, void *old)
|
||||||
{
|
{
|
||||||
DEFINE_PAGE_VMA_WALK(pvmw, (struct page *)old, vma, addr,
|
struct folio *folio = page_folio(page);
|
||||||
PVMW_SYNC | PVMW_MIGRATION);
|
DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
|
||||||
struct page *new;
|
|
||||||
pte_t pte;
|
|
||||||
swp_entry_t entry;
|
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||||
while (page_vma_mapped_walk(&pvmw)) {
|
while (page_vma_mapped_walk(&pvmw)) {
|
||||||
if (PageKsm(page))
|
pte_t pte;
|
||||||
new = page;
|
swp_entry_t entry;
|
||||||
else
|
struct page *new;
|
||||||
new = page - pvmw.pgoff +
|
unsigned long idx = 0;
|
||||||
linear_page_index(vma, pvmw.address);
|
|
||||||
|
/* pgoff is invalid for ksm pages, but they are never large */
|
||||||
|
if (folio_test_large(folio) && !folio_test_hugetlb(folio))
|
||||||
|
idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
|
||||||
|
new = folio_page(folio, idx);
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
||||||
/* PMD-mapped THP migration entry */
|
/* PMD-mapped THP migration entry */
|
||||||
if (!pvmw.pte) {
|
if (!pvmw.pte) {
|
||||||
VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
|
VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
|
||||||
|
!folio_test_pmd_mappable(folio), folio);
|
||||||
remove_migration_pmd(&pvmw, new);
|
remove_migration_pmd(&pvmw, new);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
get_page(new);
|
folio_get(folio);
|
||||||
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
|
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
|
||||||
if (pte_swp_soft_dirty(*pvmw.pte))
|
if (pte_swp_soft_dirty(*pvmw.pte))
|
||||||
pte = pte_mksoft_dirty(pte);
|
pte = pte_mksoft_dirty(pte);
|
||||||
@ -226,12 +228,12 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
if (PageHuge(new)) {
|
if (folio_test_hugetlb(folio)) {
|
||||||
unsigned int shift = huge_page_shift(hstate_vma(vma));
|
unsigned int shift = huge_page_shift(hstate_vma(vma));
|
||||||
|
|
||||||
pte = pte_mkhuge(pte);
|
pte = pte_mkhuge(pte);
|
||||||
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
|
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
|
||||||
if (PageAnon(new))
|
if (folio_test_anon(folio))
|
||||||
hugepage_add_anon_rmap(new, vma, pvmw.address);
|
hugepage_add_anon_rmap(new, vma, pvmw.address);
|
||||||
else
|
else
|
||||||
page_dup_rmap(new, true);
|
page_dup_rmap(new, true);
|
||||||
@ -239,7 +241,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
|
|||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
if (PageAnon(new))
|
if (folio_test_anon(folio))
|
||||||
page_add_anon_rmap(new, vma, pvmw.address, false);
|
page_add_anon_rmap(new, vma, pvmw.address, false);
|
||||||
else
|
else
|
||||||
page_add_file_rmap(new, vma, false);
|
page_add_file_rmap(new, vma, false);
|
||||||
@ -259,17 +261,17 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
|
|||||||
* Get rid of all migration entries and replace them by
|
* Get rid of all migration entries and replace them by
|
||||||
* references to the indicated page.
|
* references to the indicated page.
|
||||||
*/
|
*/
|
||||||
void remove_migration_ptes(struct page *old, struct page *new, bool locked)
|
void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
|
||||||
{
|
{
|
||||||
struct rmap_walk_control rwc = {
|
struct rmap_walk_control rwc = {
|
||||||
.rmap_one = remove_migration_pte,
|
.rmap_one = remove_migration_pte,
|
||||||
.arg = old,
|
.arg = src,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (locked)
|
if (locked)
|
||||||
rmap_walk_locked(new, &rwc);
|
rmap_walk_locked(&dst->page, &rwc);
|
||||||
else
|
else
|
||||||
rmap_walk(new, &rwc);
|
rmap_walk(&dst->page, &rwc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -756,6 +758,7 @@ int buffer_migrate_page_norefs(struct address_space *mapping,
|
|||||||
*/
|
*/
|
||||||
static int writeout(struct address_space *mapping, struct page *page)
|
static int writeout(struct address_space *mapping, struct page *page)
|
||||||
{
|
{
|
||||||
|
struct folio *folio = page_folio(page);
|
||||||
struct writeback_control wbc = {
|
struct writeback_control wbc = {
|
||||||
.sync_mode = WB_SYNC_NONE,
|
.sync_mode = WB_SYNC_NONE,
|
||||||
.nr_to_write = 1,
|
.nr_to_write = 1,
|
||||||
@ -781,7 +784,7 @@ static int writeout(struct address_space *mapping, struct page *page)
|
|||||||
* At this point we know that the migration attempt cannot
|
* At this point we know that the migration attempt cannot
|
||||||
* be successful.
|
* be successful.
|
||||||
*/
|
*/
|
||||||
remove_migration_ptes(page, page, false);
|
remove_migration_ptes(folio, folio, false);
|
||||||
|
|
||||||
rc = mapping->a_ops->writepage(page, &wbc);
|
rc = mapping->a_ops->writepage(page, &wbc);
|
||||||
|
|
||||||
@ -913,6 +916,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||||||
int force, enum migrate_mode mode)
|
int force, enum migrate_mode mode)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
struct folio *folio = page_folio(page);
|
||||||
|
struct folio *dst = page_folio(newpage);
|
||||||
int rc = -EAGAIN;
|
int rc = -EAGAIN;
|
||||||
bool page_was_mapped = false;
|
bool page_was_mapped = false;
|
||||||
struct anon_vma *anon_vma = NULL;
|
struct anon_vma *anon_vma = NULL;
|
||||||
@ -1039,8 +1043,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (page_was_mapped)
|
if (page_was_mapped)
|
||||||
remove_migration_ptes(page,
|
remove_migration_ptes(folio,
|
||||||
rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
|
rc == MIGRATEPAGE_SUCCESS ? dst : folio, false);
|
||||||
|
|
||||||
out_unlock_both:
|
out_unlock_both:
|
||||||
unlock_page(newpage);
|
unlock_page(newpage);
|
||||||
@ -1166,7 +1170,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||||||
enum migrate_mode mode, int reason,
|
enum migrate_mode mode, int reason,
|
||||||
struct list_head *ret)
|
struct list_head *ret)
|
||||||
{
|
{
|
||||||
struct folio *src = page_folio(hpage);
|
struct folio *dst, *src = page_folio(hpage);
|
||||||
int rc = -EAGAIN;
|
int rc = -EAGAIN;
|
||||||
int page_was_mapped = 0;
|
int page_was_mapped = 0;
|
||||||
struct page *new_hpage;
|
struct page *new_hpage;
|
||||||
@ -1194,6 +1198,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||||||
new_hpage = get_new_page(hpage, private);
|
new_hpage = get_new_page(hpage, private);
|
||||||
if (!new_hpage)
|
if (!new_hpage)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
dst = page_folio(new_hpage);
|
||||||
|
|
||||||
if (!trylock_page(hpage)) {
|
if (!trylock_page(hpage)) {
|
||||||
if (!force)
|
if (!force)
|
||||||
@ -1254,8 +1259,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||||||
rc = move_to_new_page(new_hpage, hpage, mode);
|
rc = move_to_new_page(new_hpage, hpage, mode);
|
||||||
|
|
||||||
if (page_was_mapped)
|
if (page_was_mapped)
|
||||||
remove_migration_ptes(hpage,
|
remove_migration_ptes(src,
|
||||||
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
|
rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
|
||||||
|
|
||||||
unlock_put_anon:
|
unlock_put_anon:
|
||||||
unlock_page(new_hpage);
|
unlock_page(new_hpage);
|
||||||
|
@ -376,15 +376,17 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
|
|||||||
|
|
||||||
for (i = 0; i < npages && restore; i++) {
|
for (i = 0; i < npages && restore; i++) {
|
||||||
struct page *page = migrate_pfn_to_page(migrate->src[i]);
|
struct page *page = migrate_pfn_to_page(migrate->src[i]);
|
||||||
|
struct folio *folio;
|
||||||
|
|
||||||
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
|
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
remove_migration_ptes(page, page, false);
|
folio = page_folio(page);
|
||||||
|
remove_migration_ptes(folio, folio, false);
|
||||||
|
|
||||||
migrate->src[i] = 0;
|
migrate->src[i] = 0;
|
||||||
unlock_page(page);
|
folio_unlock(folio);
|
||||||
put_page(page);
|
folio_put(folio);
|
||||||
restore--;
|
restore--;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -729,6 +731,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
|
|||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
for (i = 0; i < npages; i++) {
|
for (i = 0; i < npages; i++) {
|
||||||
|
struct folio *dst, *src;
|
||||||
struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
|
struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
|
||||||
struct page *page = migrate_pfn_to_page(migrate->src[i]);
|
struct page *page = migrate_pfn_to_page(migrate->src[i]);
|
||||||
|
|
||||||
@ -748,8 +751,10 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
|
|||||||
newpage = page;
|
newpage = page;
|
||||||
}
|
}
|
||||||
|
|
||||||
remove_migration_ptes(page, newpage, false);
|
src = page_folio(page);
|
||||||
unlock_page(page);
|
dst = page_folio(newpage);
|
||||||
|
remove_migration_ptes(src, dst, false);
|
||||||
|
folio_unlock(src);
|
||||||
|
|
||||||
if (is_zone_device_page(page))
|
if (is_zone_device_page(page))
|
||||||
put_page(page);
|
put_page(page);
|
||||||
|
Loading…
Reference in New Issue
Block a user