mm/khugepaged: recover from poisoned file-backed memory

Make collapse_file roll back when copying pages failed. More concretely:
- extract copying operations into a separate loop
- postpone the updates for nr_none until both scanning and copying
  succeeded
- postpone joining small xarray entries until both scanning and copying
  succeeded
- postpone the update operations to NR_XXX_THPS until both scanning and
  copying succeeded
- for non-SHMEM file, roll back filemap_nr_thps_inc if scan succeeded but
  copying failed

Tested manually:
0. Enable khugepaged on system under test. Mount tmpfs at /mnt/ramdisk.
1. Start a two-thread application. Each thread allocates a chunk of
   non-huge memory buffer from /mnt/ramdisk.
2. Pick 4 random buffer address (2 in each thread) and inject
   uncorrectable memory errors at physical addresses.
3. Signal both threads to make their memory buffer collapsible, i.e.
   calling madvise(MADV_HUGEPAGE).
4. Wait and then check kernel log: khugepaged is able to recover from
   poisoned pages by skipping them.
5. Signal both threads to inspect their buffer contents and make sure no
   data corruption.

Link: https://lkml.kernel.org/r/20230329151121.949896-4-jiaqiyan@google.com
Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: David Stevens <stevensd@chromium.org>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Tong Tiangen <tongtiangen@huawei.com>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Jiaqi Yan 2023-03-29 08:11:21 -07:00 committed by Andrew Morton
parent 6efc7afb5c
commit 12904d9533

View File

@ -1877,6 +1877,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
{
struct address_space *mapping = file->f_mapping;
struct page *hpage;
struct page *page;
struct page *tmp;
struct folio *folio;
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@ -1921,8 +1924,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_set(&xas, start);
for (index = start; index < end; index++) {
struct page *page = xas_next(&xas);
struct folio *folio;
page = xas_next(&xas);
VM_BUG_ON(index != xas.xa_index);
if (is_shmem) {
@ -2116,12 +2118,8 @@ out_unlock:
put_page(page);
goto xa_unlocked;
}
nr = thp_nr_pages(hpage);
if (is_shmem)
__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
else {
__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
if (!is_shmem) {
filemap_nr_thps_inc(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to ensure
@ -2132,21 +2130,10 @@ out_unlock:
smp_mb();
if (inode_is_open_for_write(mapping->host)) {
result = SCAN_FAIL;
__mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
goto xa_locked;
}
}
if (nr_none) {
__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
}
/* Join all the small entries into a single multi-index entry */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, hpage);
/* Here we can't get an ENOMEM (because entries were
* previously allocated) But let's check for errors
* (XArray implementation can be changed in the future)
@ -2164,21 +2151,36 @@ xa_unlocked:
try_to_unmap_flush();
if (result == SCAN_SUCCEED) {
struct page *page, *tmp;
struct folio *folio;
/*
* Replacing old pages with new one has succeeded, now we
* need to copy the content and free the old pages.
* attempt to copy the contents.
*/
index = start;
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
list_for_each_entry(page, &pagelist, lru) {
while (index < page->index) {
clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
page);
if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR),
page) > 0) {
result = SCAN_COPY_MC;
break;
}
index++;
}
while (result == SCAN_SUCCEED && index < end) {
clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
}
nr = thp_nr_pages(hpage);
if (result == SCAN_SUCCEED) {
/*
* Copying old pages to huge one has succeeded, now we
* need to free the old pages.
*/
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
list_del(&page->lru);
page->mapping = NULL;
page_ref_unfreeze(page, 1);
@ -2186,12 +2188,23 @@ xa_unlocked:
ClearPageUnevictable(page);
unlock_page(page);
put_page(page);
index++;
}
while (index < end) {
clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
xas_lock_irq(&xas);
if (is_shmem)
__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
else
__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
if (nr_none) {
__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
}
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, hpage);
xas_unlock_irq(&xas);
folio = page_folio(hpage);
folio_mark_uptodate(folio);
@ -2209,8 +2222,6 @@ xa_unlocked:
unlock_page(hpage);
hpage = NULL;
} else {
struct page *page;
/* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas);
if (nr_none) {
@ -2244,6 +2255,20 @@ xa_unlocked:
xas_lock_irq(&xas);
}
VM_BUG_ON(nr_none);
/*
* Undo the updates of filemap_nr_thps_inc for non-SHMEM
* file only. This undo is not needed unless failure is
* due to SCAN_COPY_MC.
*/
if (!is_shmem && result == SCAN_COPY_MC) {
filemap_nr_thps_dec(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to
* ensure the update to nr_thps is visible.
*/
smp_mb();
}
xas_unlock_irq(&xas);
hpage->mapping = NULL;