mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 06:41:43 +00:00
b2b29d6d01
We account the PTE level of the page tables to the process in order to make smarter OOM decisions and help diagnose why memory is fragmented. For these same reasons, we should account pages allocated for PMDs. With larger process address spaces and ASLR, the number of PMDs in use is higher than it used to be so the inaccuracy is starting to matter. [rppt@linux.ibm.com: arm: __pmd_free_tlb(): call page table destructor] Link: https://lkml.kernel.org/r/20200825111303.GB69694@linux.ibm.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Joerg Roedel <joro@8bytes.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Satheesh Rajendran <sathnaga@linux.vnet.ibm.com> Cc: Stafford Horne <shorne@gmail.com> Cc: Naresh Kamboju <naresh.kamboju@linaro.org> Cc: Anders Roxell <anders.roxell@linaro.org> Link: http://lkml.kernel.org/r/20200627184642.GF25039@casper.infradead.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
69 lines
1.4 KiB
C
69 lines
1.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* arch/arm/include/asm/tlb.h
|
|
*
|
|
* Copyright (C) 2002 Russell King
|
|
*
|
|
* Experimentation shows that on a StrongARM, it appears to be faster
|
|
* to use the "invalidate whole tlb" rather than "invalidate single
|
|
* tlb" for this.
|
|
*
|
|
* This appears true for both the process fork+exit case, as well as
|
|
* the munmap-large-area case.
|
|
*/
|
|
#ifndef __ASMARM_TLB_H
|
|
#define __ASMARM_TLB_H
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#ifndef CONFIG_MMU
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#define tlb_flush(tlb) ((void) tlb)
|
|
|
|
#include <asm-generic/tlb.h>
|
|
|
|
#else /* !CONFIG_MMU */
|
|
|
|
#include <linux/swap.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
static inline void __tlb_remove_table(void *_table)
|
|
{
|
|
free_page_and_swap_cache((struct page *)_table);
|
|
}
|
|
|
|
#include <asm-generic/tlb.h>
|
|
|
|
static inline void
|
|
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
|
|
{
|
|
pgtable_pte_page_dtor(pte);
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
/*
|
|
* With the classic ARM MMU, a pte page has two corresponding pmd
|
|
* entries, each covering 1MB.
|
|
*/
|
|
addr = (addr & PMD_MASK) + SZ_1M;
|
|
__tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
|
|
#endif
|
|
|
|
tlb_remove_table(tlb, pte);
|
|
}
|
|
|
|
static inline void
|
|
__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
|
|
{
|
|
#ifdef CONFIG_ARM_LPAE
|
|
struct page *page = virt_to_page(pmdp);
|
|
|
|
pgtable_pmd_page_dtor(page);
|
|
tlb_remove_table(tlb, page);
|
|
#endif
|
|
}
|
|
|
|
#endif /* CONFIG_MMU */
|
|
#endif
|