mm: filemap: batch mm counter updating in filemap_map_pages()

Like copy_pte_range()/zap_pte_range(), make mm counter batch updating in
filemap_map_pages(), since folios type are same(MM_SHMEMPAGES or
MM_FILEPAGES) in filemap_map_pages(), only check the first folio type is
enough, the 'lat_pagefault -P 1 file' test from lmbench shows 12%
improvement, and the percpu_counter_add_batch() is gone from perf flame
graph.

Link: https://lkml.kernel.org/r/20240412064751.119015-3-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang 2024-04-12 14:47:51 +08:00 committed by Andrew Morton
parent 1f2d8b4421
commit ceca44991f

View File

@ -3505,7 +3505,7 @@ skip:
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
unsigned long addr, unsigned int nr_pages,
unsigned int *mmap_miss)
unsigned long *rss, unsigned int *mmap_miss)
{
vm_fault_t ret = 0;
struct page *page = folio_page(folio, start);
@ -3539,8 +3539,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
skip:
if (count) {
set_pte_range(vmf, folio, page, count, addr);
add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio),
count);
*rss += count;
folio_ref_add(folio, count);
if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
@ -3555,7 +3554,7 @@ skip:
if (count) {
set_pte_range(vmf, folio, page, count, addr);
add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), count);
*rss += count;
folio_ref_add(folio, count);
if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
@ -3568,7 +3567,7 @@ skip:
static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
struct folio *folio, unsigned long addr,
unsigned int *mmap_miss)
unsigned long *rss, unsigned int *mmap_miss)
{
vm_fault_t ret = 0;
struct page *page = &folio->page;
@ -3592,7 +3591,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
ret = VM_FAULT_NOPAGE;
set_pte_range(vmf, folio, page, 1, addr);
add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), 1);
(*rss)++;
folio_ref_inc(folio);
return ret;
@ -3609,7 +3608,8 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
vm_fault_t ret = 0;
unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved;
unsigned long rss = 0;
unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@ -3628,6 +3628,8 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
folio_put(folio);
goto out;
}
folio_type = mm_counter_file(folio);
do {
unsigned long end;
@ -3639,15 +3641,16 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
if (!folio_test_large(folio))
ret |= filemap_map_order0_folio(vmf,
folio, addr, &mmap_miss);
folio, addr, &rss, &mmap_miss);
else
ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr,
nr_pages, &mmap_miss);
nr_pages, &rss, &mmap_miss);
folio_unlock(folio);
folio_put(folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
add_mm_counter(vma->vm_mm, folio_type, rss);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();