mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()
hugetlb_no_page() and hugetlb_wp() call anon_vma_prepare(). In preparation for hugetlb to safely handle faults under the VMA lock, use vmf_anon_prepare() here instead. Additionally, passing hugetlb_wp() the vm_fault struct from hugetlb_fault() works toward cleaning up the hugetlb code and function stack. Link: https://lkml.kernel.org/r/20240221234732.187629-5-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7dac0ec8fa
commit
9acad7ba3e
18
mm/hugetlb.c
18
mm/hugetlb.c
@ -5851,7 +5851,8 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
*/
|
*/
|
||||||
static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
|
static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
unsigned long address, pte_t *ptep, unsigned int flags,
|
unsigned long address, pte_t *ptep, unsigned int flags,
|
||||||
struct folio *pagecache_folio, spinlock_t *ptl)
|
struct folio *pagecache_folio, spinlock_t *ptl,
|
||||||
|
struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
const bool unshare = flags & FAULT_FLAG_UNSHARE;
|
const bool unshare = flags & FAULT_FLAG_UNSHARE;
|
||||||
pte_t pte = huge_ptep_get(ptep);
|
pte_t pte = huge_ptep_get(ptep);
|
||||||
@ -5985,10 +5986,9 @@ retry_avoidcopy:
|
|||||||
* When the original hugepage is shared one, it does not have
|
* When the original hugepage is shared one, it does not have
|
||||||
* anon_vma prepared.
|
* anon_vma prepared.
|
||||||
*/
|
*/
|
||||||
if (unlikely(anon_vma_prepare(vma))) {
|
ret = vmf_anon_prepare(vmf);
|
||||||
ret = VM_FAULT_OOM;
|
if (unlikely(ret))
|
||||||
goto out_release_all;
|
goto out_release_all;
|
||||||
}
|
|
||||||
|
|
||||||
if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
|
if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
|
||||||
ret = VM_FAULT_HWPOISON_LARGE;
|
ret = VM_FAULT_HWPOISON_LARGE;
|
||||||
@ -6228,10 +6228,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||||||
new_pagecache_folio = true;
|
new_pagecache_folio = true;
|
||||||
} else {
|
} else {
|
||||||
folio_lock(folio);
|
folio_lock(folio);
|
||||||
if (unlikely(anon_vma_prepare(vma))) {
|
|
||||||
ret = VM_FAULT_OOM;
|
ret = vmf_anon_prepare(vmf);
|
||||||
|
if (unlikely(ret))
|
||||||
goto backout_unlocked;
|
goto backout_unlocked;
|
||||||
}
|
|
||||||
anon_rmap = 1;
|
anon_rmap = 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -6298,7 +6298,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||||||
hugetlb_count_add(pages_per_huge_page(h), mm);
|
hugetlb_count_add(pages_per_huge_page(h), mm);
|
||||||
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
|
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
|
||||||
/* Optimization, do the COW without a second fault */
|
/* Optimization, do the COW without a second fault */
|
||||||
ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
|
ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl, vmf);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
@ -6521,7 +6521,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
|
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
|
||||||
if (!huge_pte_write(entry)) {
|
if (!huge_pte_write(entry)) {
|
||||||
ret = hugetlb_wp(mm, vma, address, ptep, flags,
|
ret = hugetlb_wp(mm, vma, address, ptep, flags,
|
||||||
pagecache_folio, ptl);
|
pagecache_folio, ptl, &vmf);
|
||||||
goto out_put_page;
|
goto out_put_page;
|
||||||
} else if (likely(flags & FAULT_FLAG_WRITE)) {
|
} else if (likely(flags & FAULT_FLAG_WRITE)) {
|
||||||
entry = huge_pte_mkdirty(entry);
|
entry = huge_pte_mkdirty(entry);
|
||||||
|
Loading…
Reference in New Issue
Block a user