mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
mm: remove page_mkclean()
There are no more users of page_mkclean(), remove it and update the document and comment. Link: https://lkml.kernel.org/r/20240604114822.2089819-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Helge Deller <deller@gmx.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
645b1399fa
commit
a929e0d10f
@ -132,7 +132,7 @@ CASE 1: Direct IO (DIO)
|
|||||||
-----------------------
|
-----------------------
|
||||||
There are GUP references to pages that are serving
|
There are GUP references to pages that are serving
|
||||||
as DIO buffers. These buffers are needed for a relatively short time (so they
|
as DIO buffers. These buffers are needed for a relatively short time (so they
|
||||||
are not "long term"). No special synchronization with page_mkclean() or
|
are not "long term"). No special synchronization with folio_mkclean() or
|
||||||
munmap() is provided. Therefore, flags to set at the call site are: ::
|
munmap() is provided. Therefore, flags to set at the call site are: ::
|
||||||
|
|
||||||
FOLL_PIN
|
FOLL_PIN
|
||||||
@ -144,7 +144,7 @@ CASE 2: RDMA
|
|||||||
------------
|
------------
|
||||||
There are GUP references to pages that are serving as DMA
|
There are GUP references to pages that are serving as DMA
|
||||||
buffers. These buffers are needed for a long time ("long term"). No special
|
buffers. These buffers are needed for a long time ("long term"). No special
|
||||||
synchronization with page_mkclean() or munmap() is provided. Therefore, flags
|
synchronization with folio_mkclean() or munmap() is provided. Therefore, flags
|
||||||
to set at the call site are: ::
|
to set at the call site are: ::
|
||||||
|
|
||||||
FOLL_PIN | FOLL_LONGTERM
|
FOLL_PIN | FOLL_LONGTERM
|
||||||
@ -170,7 +170,7 @@ callback, simply remove the range from the device's page tables.
|
|||||||
|
|
||||||
Either way, as long as the driver unpins the pages upon mmu notifier callback,
|
Either way, as long as the driver unpins the pages upon mmu notifier callback,
|
||||||
then there is proper synchronization with both filesystem and mm
|
then there is proper synchronization with both filesystem and mm
|
||||||
(page_mkclean(), munmap(), etc). Therefore, neither flag needs to be set.
|
(folio_mkclean(), munmap(), etc). Therefore, neither flag needs to be set.
|
||||||
|
|
||||||
CASE 4: Pinning for struct page manipulation only
|
CASE 4: Pinning for struct page manipulation only
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
@ -200,7 +200,7 @@ folio_maybe_dma_pinned(): the whole point of pinning
|
|||||||
====================================================
|
====================================================
|
||||||
|
|
||||||
The whole point of marking folios as "DMA-pinned" or "gup-pinned" is to be able
|
The whole point of marking folios as "DMA-pinned" or "gup-pinned" is to be able
|
||||||
to query, "is this folio DMA-pinned?" That allows code such as page_mkclean()
|
to query, "is this folio DMA-pinned?" That allows code such as folio_mkclean()
|
||||||
(and file system writeback code in general) to make informed decisions about
|
(and file system writeback code in general) to make informed decisions about
|
||||||
what to do when a folio cannot be unmapped due to such pins.
|
what to do when a folio cannot be unmapped due to such pins.
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
|
|||||||
printk(KERN_ERR "no mapping available\n");
|
printk(KERN_ERR "no mapping available\n");
|
||||||
|
|
||||||
BUG_ON(!page->mapping);
|
BUG_ON(!page->mapping);
|
||||||
page->index = vmf->pgoff; /* for page_mkclean() */
|
page->index = vmf->pgoff; /* for folio_mkclean() */
|
||||||
|
|
||||||
vmf->page = page;
|
vmf->page = page;
|
||||||
return 0;
|
return 0;
|
||||||
@ -161,7 +161,7 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* We want the page to remain locked from ->page_mkwrite until
|
* We want the page to remain locked from ->page_mkwrite until
|
||||||
* the PTE is marked dirty to avoid page_mkclean() being called
|
* the PTE is marked dirty to avoid folio_mkclean() being called
|
||||||
* before the PTE is updated, which would leave the page ignored
|
* before the PTE is updated, which would leave the page ignored
|
||||||
* by defio.
|
* by defio.
|
||||||
* Do this by locking the page here and informing the caller
|
* Do this by locking the page here and informing the caller
|
||||||
|
@ -1577,7 +1577,7 @@ static inline void put_page(struct page *page)
|
|||||||
* issue.
|
* issue.
|
||||||
*
|
*
|
||||||
* Locking: the lockless algorithm described in folio_try_get_rcu()
|
* Locking: the lockless algorithm described in folio_try_get_rcu()
|
||||||
* provides safe operation for get_user_pages(), page_mkclean() and
|
* provides safe operation for get_user_pages(), folio_mkclean() and
|
||||||
* other calls that race to set up page table entries.
|
* other calls that race to set up page table entries.
|
||||||
*/
|
*/
|
||||||
#define GUP_PIN_COUNTING_BIAS (1U << 10)
|
#define GUP_PIN_COUNTING_BIAS (1U << 10)
|
||||||
|
@ -802,8 +802,4 @@ static inline int folio_mkclean(struct folio *folio)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
||||||
static inline int page_mkclean(struct page *page)
|
|
||||||
{
|
|
||||||
return folio_mkclean(page_folio(page));
|
|
||||||
}
|
|
||||||
#endif /* _LINUX_RMAP_H */
|
#endif /* _LINUX_RMAP_H */
|
||||||
|
2
mm/gup.c
2
mm/gup.c
@ -378,7 +378,7 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
|
|||||||
* 1) This code sees the page as already dirty, so it
|
* 1) This code sees the page as already dirty, so it
|
||||||
* skips the call to set_page_dirty(). That could happen
|
* skips the call to set_page_dirty(). That could happen
|
||||||
* because clear_page_dirty_for_io() called
|
* because clear_page_dirty_for_io() called
|
||||||
* page_mkclean(), followed by set_page_dirty().
|
* folio_mkclean(), followed by set_page_dirty().
|
||||||
* However, now the page is going to get written back,
|
* However, now the page is going to get written back,
|
||||||
* which meets the original intention of setting it
|
* which meets the original intention of setting it
|
||||||
* dirty, so all is well: clear_page_dirty_for_io() goes
|
* dirty, so all is well: clear_page_dirty_for_io() goes
|
||||||
|
@ -198,7 +198,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||||||
* PTE.
|
* PTE.
|
||||||
*
|
*
|
||||||
* NOTE! Both old and new PTL matter: the old one
|
* NOTE! Both old and new PTL matter: the old one
|
||||||
* for racing with page_mkclean(), the new one to
|
* for racing with folio_mkclean(), the new one to
|
||||||
* make sure the physical page stays valid until
|
* make sure the physical page stays valid until
|
||||||
* the TLB entry for the old mapping has been
|
* the TLB entry for the old mapping has been
|
||||||
* flushed.
|
* flushed.
|
||||||
|
Loading…
Reference in New Issue
Block a user