huge_memory: convert split_huge_page_to_list() to use a folio

Saves many calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-53-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-09-02 20:46:48 +01:00 committed by Andrew Morton
parent c33db29231
commit 3e9a13daa6

View File

@ -2622,27 +2622,26 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
int split_huge_page_to_list(struct page *page, struct list_head *list)
{
struct folio *folio = page_folio(page);
struct page *head = &folio->page;
struct deferred_split *ds_queue = get_deferred_split_queue(head);
XA_STATE(xas, &head->mapping->i_pages, head->index);
struct deferred_split *ds_queue = get_deferred_split_queue(&folio->page);
XA_STATE(xas, &folio->mapping->i_pages, folio->index);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
int extra_pins, ret;
pgoff_t end;
bool is_hzp;
VM_BUG_ON_PAGE(!PageLocked(head), head);
VM_BUG_ON_PAGE(!PageCompound(head), head);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
is_hzp = is_huge_zero_page(head);
VM_WARN_ON_ONCE_PAGE(is_hzp, head);
is_hzp = is_huge_zero_page(&folio->page);
VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
if (is_hzp)
return -EBUSY;
if (PageWriteback(head))
if (folio_test_writeback(folio))
return -EBUSY;
if (PageAnon(head)) {
if (folio_test_anon(folio)) {
/*
* The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
@ -2651,7 +2650,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* is taken to serialise against parallel split or collapse
* operations.
*/
anon_vma = page_get_anon_vma(head);
anon_vma = page_get_anon_vma(&folio->page);
if (!anon_vma) {
ret = -EBUSY;
goto out;
@ -2662,7 +2661,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
} else {
gfp_t gfp;
mapping = head->mapping;
mapping = folio->mapping;
/* Truncated ? */
if (!mapping) {
@ -2679,7 +2678,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
goto out;
}
xas_split_alloc(&xas, head, compound_order(head), gfp);
xas_split_alloc(&xas, folio, folio_order(folio), gfp);
if (xas_error(&xas)) {
ret = xas_error(&xas);
goto out;
@ -2693,7 +2692,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
* which cannot be nested inside the page tree lock. So note
* end now: i_size itself may be changed at any moment, but
* head page lock is good enough to serialize the trimming.
* folio lock is good enough to serialize the trimming.
*/
end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
if (shmem_mapping(mapping))
@ -2709,38 +2708,38 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
goto out_unlock;
}
unmap_page(head);
unmap_page(&folio->page);
/* block interrupt reentry in xa_lock and spinlock */
local_irq_disable();
if (mapping) {
/*
* Check if the head page is present in page cache.
* We assume all tail are present too, if head is there.
* Check if the folio is present in page cache.
* We assume all tail are present too, if folio is there.
*/
xas_lock(&xas);
xas_reset(&xas);
if (xas_load(&xas) != head)
if (xas_load(&xas) != folio)
goto fail;
}
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
if (page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
if (folio_ref_freeze(folio, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(&folio->page))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
list_del(page_deferred_list(&folio->page));
}
spin_unlock(&ds_queue->split_queue_lock);
if (mapping) {
int nr = thp_nr_pages(head);
int nr = folio_nr_pages(folio);
xas_split(&xas, head, thp_order(head));
if (PageSwapBacked(head)) {
__mod_lruvec_page_state(head, NR_SHMEM_THPS,
xas_split(&xas, folio, folio_order(folio));
if (folio_test_swapbacked(folio)) {
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
-nr);
} else {
__mod_lruvec_page_state(head, NR_FILE_THPS,
__lruvec_stat_mod_folio(folio, NR_FILE_THPS,
-nr);
filemap_nr_thps_dec(mapping);
}