diff --git a/mm/truncate.c b/mm/truncate.c index 5ce62a939e55..dfb3d1f4d456 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -39,12 +39,25 @@ static inline void __clear_shadow_entry(struct address_space *mapping, xas_store(&xas, NULL); } -static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, - void *entry) +static void clear_shadow_entries(struct address_space *mapping, + struct folio_batch *fbatch, pgoff_t *indices) { + int i; + + /* Handled by shmem itself, or for DAX we do nothing. */ + if (shmem_mapping(mapping) || dax_mapping(mapping)) + return; + spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); - __clear_shadow_entry(mapping, index, entry); + + for (i = 0; i < folio_batch_count(fbatch); i++) { + struct folio *folio = fbatch->folios[i]; + + if (xa_is_value(folio)) + __clear_shadow_entry(mapping, indices[i], folio); + } + xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); @@ -105,36 +118,6 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping, fbatch->nr = j; } -/* - * Invalidate exceptional entry if easily possible. This handles exceptional - * entries for invalidate_inode_pages(). - */ -static int invalidate_exceptional_entry(struct address_space *mapping, - pgoff_t index, void *entry) -{ - /* Handled by shmem itself, or for DAX we do nothing. */ - if (shmem_mapping(mapping) || dax_mapping(mapping)) - return 1; - clear_shadow_entry(mapping, index, entry); - return 1; -} - -/* - * Invalidate exceptional entry if clean. This handles exceptional entries for - * invalidate_inode_pages2() so for DAX it evicts only clean entries. - */ -static int invalidate_exceptional_entry2(struct address_space *mapping, - pgoff_t index, void *entry) -{ - /* Handled by shmem itself */ - if (shmem_mapping(mapping)) - return 1; - if (dax_mapping(mapping)) - return dax_invalidate_mapping_entry_sync(mapping, index); - clear_shadow_entry(mapping, index, entry); - return 1; -} - /** * folio_invalidate - Invalidate part or all of a folio. * @folio: The folio which is affected. @@ -494,6 +477,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping, unsigned long ret; unsigned long count = 0; int i; + bool xa_has_values = false; folio_batch_init(&fbatch); while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { @@ -503,8 +487,8 @@ unsigned long mapping_try_invalidate(struct address_space *mapping, /* We rely upon deletion not changing folio->index */ if (xa_is_value(folio)) { - count += invalidate_exceptional_entry(mapping, - indices[i], folio); + xa_has_values = true; + count++; continue; } @@ -522,6 +506,10 @@ unsigned long mapping_try_invalidate(struct address_space *mapping, } count += ret; } + + if (xa_has_values) + clear_shadow_entries(mapping, &fbatch, indices); + folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched(); @@ -616,6 +604,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, int ret = 0; int ret2 = 0; int did_range_unmap = 0; + bool xa_has_values = false; if (mapping_empty(mapping)) return 0; @@ -629,8 +618,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping, /* We rely upon deletion not changing folio->index */ if (xa_is_value(folio)) { - if (!invalidate_exceptional_entry2(mapping, - indices[i], folio)) + xa_has_values = true; + if (dax_mapping(mapping) && + !dax_invalidate_mapping_entry_sync(mapping, indices[i])) ret = -EBUSY; continue; } @@ -666,6 +656,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ret = ret2; folio_unlock(folio); } + + if (xa_has_values) + clear_shadow_entries(mapping, &fbatch, indices); + folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched();