mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
mm/rmap: pass rmap flags to hugepage_add_anon_rmap()
Let's prepare for passing RMAP_EXCLUSIVE, similarly as we do for page_add_anon_rmap() now. RMAP_COMPOUND is implicit for hugetlb pages and ignored. Link: https://lkml.kernel.org/r/20220428083441.37290-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Rientjes <rientjes@google.com> Cc: Don Dutile <ddutile@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Khalid Aziz <khalid.aziz@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Liang Zhang <zhangliang5@huawei.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Nadav Amit <namit@vmware.com> Cc: Oded Gabbay <oded.gabbay@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Pedro Demarchi Gomes <pedrodemargomes@gmail.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f1e2db12e4
commit
28c5209dfd
@ -191,7 +191,7 @@ void page_add_file_rmap(struct page *, struct vm_area_struct *,
|
||||
void page_remove_rmap(struct page *, struct vm_area_struct *,
|
||||
bool compound);
|
||||
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long address);
|
||||
unsigned long address, rmap_t flags);
|
||||
void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long address);
|
||||
|
||||
|
@ -232,7 +232,8 @@ static bool remove_migration_pte(struct folio *folio,
|
||||
pte = pte_mkhuge(pte);
|
||||
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
|
||||
if (folio_test_anon(folio))
|
||||
hugepage_add_anon_rmap(new, vma, pvmw.address);
|
||||
hugepage_add_anon_rmap(new, vma, pvmw.address,
|
||||
RMAP_NONE);
|
||||
else
|
||||
page_dup_file_rmap(new, true);
|
||||
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
|
||||
|
@ -2391,9 +2391,11 @@ void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc)
|
||||
* The following two functions are for anonymous (private mapped) hugepages.
|
||||
* Unlike common anonymous pages, anonymous hugepages have no accounting code
|
||||
* and no lru code, because we handle hugepages differently from common pages.
|
||||
*
|
||||
* RMAP_COMPOUND is ignored.
|
||||
*/
|
||||
void hugepage_add_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
|
||||
unsigned long address, rmap_t flags)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
int first;
|
||||
@ -2403,7 +2405,8 @@ void hugepage_add_anon_rmap(struct page *page,
|
||||
/* address might be in next vma when migration races vma_adjust */
|
||||
first = atomic_inc_and_test(compound_mapcount_ptr(page));
|
||||
if (first)
|
||||
__page_set_anon_rmap(page, vma, address, 0);
|
||||
__page_set_anon_rmap(page, vma, address,
|
||||
!!(flags & RMAP_EXCLUSIVE));
|
||||
}
|
||||
|
||||
void hugepage_add_new_anon_rmap(struct page *page,
|
||||
|
Loading…
Reference in New Issue
Block a user