mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
mm/truncate: batch-clear shadow entries
Make clear_shadow_entry() clear shadow entries in `struct folio_batch` so that it can reduce contention on i_lock and i_pages locks, e.g., watchdog: BUG: soft lockup - CPU#29 stuck for 11s! [fio:2701649] clear_shadow_entry+0x3d/0x100 mapping_try_invalidate+0x117/0x1d0 invalidate_mapping_pages+0x10/0x20 invalidate_bdev+0x3c/0x50 blkdev_common_ioctl+0x5f7/0xa90 blkdev_ioctl+0x109/0x270 Also, rename clear_shadow_entry() to clear_shadow_entries() accordingly. [yuzhao@google.com: v2] Link: https://lkml.kernel.org/r/20240710060933.3979380-1-yuzhao@google.com Link: https://lkml.kernel.org/r/20240708212753.3120511-1-yuzhao@google.com Reported-by: Bharata B Rao <bharata@amd.com> Closes: https://lore.kernel.org/d2841226-e27b-4d3d-a578-63587a3aa4f3@amd.com/ Signed-off-by: Yu Zhao <yuzhao@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8a78882dac
commit
61c663e020
@ -39,12 +39,25 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
|
||||
xas_store(&xas, NULL);
|
||||
}
|
||||
|
||||
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
|
||||
void *entry)
|
||||
static void clear_shadow_entries(struct address_space *mapping,
|
||||
struct folio_batch *fbatch, pgoff_t *indices)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Handled by shmem itself, or for DAX we do nothing. */
|
||||
if (shmem_mapping(mapping) || dax_mapping(mapping))
|
||||
return;
|
||||
|
||||
spin_lock(&mapping->host->i_lock);
|
||||
xa_lock_irq(&mapping->i_pages);
|
||||
__clear_shadow_entry(mapping, index, entry);
|
||||
|
||||
for (i = 0; i < folio_batch_count(fbatch); i++) {
|
||||
struct folio *folio = fbatch->folios[i];
|
||||
|
||||
if (xa_is_value(folio))
|
||||
__clear_shadow_entry(mapping, indices[i], folio);
|
||||
}
|
||||
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
if (mapping_shrinkable(mapping))
|
||||
inode_add_lru(mapping->host);
|
||||
@ -105,36 +118,6 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
|
||||
fbatch->nr = j;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate exceptional entry if easily possible. This handles exceptional
|
||||
* entries for invalidate_inode_pages().
|
||||
*/
|
||||
static int invalidate_exceptional_entry(struct address_space *mapping,
|
||||
pgoff_t index, void *entry)
|
||||
{
|
||||
/* Handled by shmem itself, or for DAX we do nothing. */
|
||||
if (shmem_mapping(mapping) || dax_mapping(mapping))
|
||||
return 1;
|
||||
clear_shadow_entry(mapping, index, entry);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate exceptional entry if clean. This handles exceptional entries for
|
||||
* invalidate_inode_pages2() so for DAX it evicts only clean entries.
|
||||
*/
|
||||
static int invalidate_exceptional_entry2(struct address_space *mapping,
|
||||
pgoff_t index, void *entry)
|
||||
{
|
||||
/* Handled by shmem itself */
|
||||
if (shmem_mapping(mapping))
|
||||
return 1;
|
||||
if (dax_mapping(mapping))
|
||||
return dax_invalidate_mapping_entry_sync(mapping, index);
|
||||
clear_shadow_entry(mapping, index, entry);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_invalidate - Invalidate part or all of a folio.
|
||||
* @folio: The folio which is affected.
|
||||
@ -494,6 +477,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
|
||||
unsigned long ret;
|
||||
unsigned long count = 0;
|
||||
int i;
|
||||
bool xa_has_values = false;
|
||||
|
||||
folio_batch_init(&fbatch);
|
||||
while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
|
||||
@ -503,8 +487,8 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
|
||||
/* We rely upon deletion not changing folio->index */
|
||||
|
||||
if (xa_is_value(folio)) {
|
||||
count += invalidate_exceptional_entry(mapping,
|
||||
indices[i], folio);
|
||||
xa_has_values = true;
|
||||
count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -522,6 +506,10 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
|
||||
}
|
||||
count += ret;
|
||||
}
|
||||
|
||||
if (xa_has_values)
|
||||
clear_shadow_entries(mapping, &fbatch, indices);
|
||||
|
||||
folio_batch_remove_exceptionals(&fbatch);
|
||||
folio_batch_release(&fbatch);
|
||||
cond_resched();
|
||||
@ -616,6 +604,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
int ret = 0;
|
||||
int ret2 = 0;
|
||||
int did_range_unmap = 0;
|
||||
bool xa_has_values = false;
|
||||
|
||||
if (mapping_empty(mapping))
|
||||
return 0;
|
||||
@ -629,8 +618,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
/* We rely upon deletion not changing folio->index */
|
||||
|
||||
if (xa_is_value(folio)) {
|
||||
if (!invalidate_exceptional_entry2(mapping,
|
||||
indices[i], folio))
|
||||
xa_has_values = true;
|
||||
if (dax_mapping(mapping) &&
|
||||
!dax_invalidate_mapping_entry_sync(mapping, indices[i]))
|
||||
ret = -EBUSY;
|
||||
continue;
|
||||
}
|
||||
@ -666,6 +656,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
ret = ret2;
|
||||
folio_unlock(folio);
|
||||
}
|
||||
|
||||
if (xa_has_values)
|
||||
clear_shadow_entries(mapping, &fbatch, indices);
|
||||
|
||||
folio_batch_remove_exceptionals(&fbatch);
|
||||
folio_batch_release(&fbatch);
|
||||
cond_resched();
|
||||
|
Loading…
Reference in New Issue
Block a user