mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm: add utility functions for ptdesc
Introduce utility functions setting the foundation for ptdescs. These will also assist in the splitting out of ptdesc from struct page. Functions that focus on the descriptor are prefixed with ptdesc_* while functions that focus on the pagetable are prefixed with pagetable_*. pagetable_alloc() is defined to allocate new ptdesc pages as compound pages. This is to standardize ptdescs by allowing for one allocation and one free function, in contrast to 2 allocation and 2 free functions. Link: https://lkml.kernel.org/r/20230807230513.102486-4-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Geert Uytterhoeven <geert+renesas@glider.be> Cc: Guo Ren <guoren@kernel.org> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Jonas Bonn <jonas@southpole.se> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Palmer Dabbelt <palmer@rivosinc.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9a35de4ffc
commit
bf2d4334f7
@ -480,6 +480,17 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
||||
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
|
||||
{
|
||||
tlb_remove_table(tlb, pt);
|
||||
}
|
||||
|
||||
/* Like tlb_remove_ptdesc, but for page-like page directories. */
|
||||
static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
|
||||
{
|
||||
tlb_remove_page(tlb, ptdesc_page(pt));
|
||||
}
|
||||
|
||||
static inline void tlb_change_page_size(struct mmu_gather *tlb,
|
||||
unsigned int page_size)
|
||||
{
|
||||
|
@ -2772,6 +2772,57 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static inline struct ptdesc *virt_to_ptdesc(const void *x)
|
||||
{
|
||||
return page_ptdesc(virt_to_page(x));
|
||||
}
|
||||
|
||||
static inline void *ptdesc_to_virt(const struct ptdesc *pt)
|
||||
{
|
||||
return page_to_virt(ptdesc_page(pt));
|
||||
}
|
||||
|
||||
static inline void *ptdesc_address(const struct ptdesc *pt)
|
||||
{
|
||||
return folio_address(ptdesc_folio(pt));
|
||||
}
|
||||
|
||||
static inline bool pagetable_is_reserved(struct ptdesc *pt)
|
||||
{
|
||||
return folio_test_reserved(ptdesc_folio(pt));
|
||||
}
|
||||
|
||||
/**
|
||||
* pagetable_alloc - Allocate pagetables
|
||||
* @gfp: GFP flags
|
||||
* @order: desired pagetable order
|
||||
*
|
||||
* pagetable_alloc allocates memory for page tables as well as a page table
|
||||
* descriptor to describe that memory.
|
||||
*
|
||||
* Return: The ptdesc describing the allocated page tables.
|
||||
*/
|
||||
static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order)
|
||||
{
|
||||
struct page *page = alloc_pages(gfp | __GFP_COMP, order);
|
||||
|
||||
return page_ptdesc(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* pagetable_free - Free pagetables
|
||||
* @pt: The page table descriptor
|
||||
*
|
||||
* pagetable_free frees the memory of all page tables described by a page
|
||||
* table descriptor and the memory for the descriptor itself.
|
||||
*/
|
||||
static inline void pagetable_free(struct ptdesc *pt)
|
||||
{
|
||||
struct page *page = ptdesc_page(pt);
|
||||
|
||||
__free_pages(page, compound_order(page));
|
||||
}
|
||||
|
||||
#if USE_SPLIT_PTE_PTLOCKS
|
||||
#if ALLOC_SPLIT_PTLOCKS
|
||||
void __init ptlock_cache_init(void);
|
||||
@ -2898,6 +2949,11 @@ static inline struct page *pmd_pgtable_page(pmd_t *pmd)
|
||||
return virt_to_page((void *)((unsigned long) pmd & mask));
|
||||
}
|
||||
|
||||
static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
|
||||
{
|
||||
return page_ptdesc(pmd_pgtable_page(pmd));
|
||||
}
|
||||
|
||||
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
return ptlock_ptr(pmd_pgtable_page(pmd));
|
||||
@ -3010,6 +3066,11 @@ static inline void mark_page_reserved(struct page *page)
|
||||
adjust_managed_page_count(page, -1);
|
||||
}
|
||||
|
||||
static inline void free_reserved_ptdesc(struct ptdesc *pt)
|
||||
{
|
||||
free_reserved_page(ptdesc_page(pt));
|
||||
}
|
||||
|
||||
/*
|
||||
* Default method to free all the __init memory into the buddy system.
|
||||
* The freed pages will be poisoned with pattern "poison" if it's within
|
||||
|
@ -467,6 +467,18 @@ TABLE_MATCH(memcg_data, pt_memcg_data);
|
||||
#undef TABLE_MATCH
|
||||
static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
|
||||
|
||||
#define ptdesc_page(pt) (_Generic((pt), \
|
||||
const struct ptdesc *: (const struct page *)(pt), \
|
||||
struct ptdesc *: (struct page *)(pt)))
|
||||
|
||||
#define ptdesc_folio(pt) (_Generic((pt), \
|
||||
const struct ptdesc *: (const struct folio *)(pt), \
|
||||
struct ptdesc *: (struct folio *)(pt)))
|
||||
|
||||
#define page_ptdesc(p) (_Generic((p), \
|
||||
const struct page *: (const struct ptdesc *)(p), \
|
||||
struct page *: (struct ptdesc *)(p)))
|
||||
|
||||
/*
|
||||
* Used for sizing the vmemmap region on some architectures
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user