forked from Minki/linux
0d6e24d430
As described in the comment, the correct order for freeing pages is: 1) unhook page 2) TLB invalidate page 3) free page This order equally applies to page directories. Currently there are two correct options: - use tlb_remove_page(), when all page directores are full pages and there are no futher contraints placed by things like software walkers (HAVE_FAST_GUP). - use MMU_GATHER_RCU_TABLE_FREE and tlb_remove_table() when the architecture does not do IPI based TLB invalidate and has HAVE_FAST_GUP (or software TLB fill). This however leaves architectures that don't have page based directories but don't need RCU in a bind. For those, provide MMU_GATHER_TABLE_FREE, which provides the independent batching for directories without the additional RCU freeing. Link: http://lkml.kernel.org/r/20200116064531.483522-10-aneesh.kumar@linux.ibm.com Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
69 lines
1.4 KiB
C
69 lines
1.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* arch/arm/include/asm/tlb.h
|
|
*
|
|
* Copyright (C) 2002 Russell King
|
|
*
|
|
* Experimentation shows that on a StrongARM, it appears to be faster
|
|
* to use the "invalidate whole tlb" rather than "invalidate single
|
|
* tlb" for this.
|
|
*
|
|
* This appears true for both the process fork+exit case, as well as
|
|
* the munmap-large-area case.
|
|
*/
|
|
#ifndef __ASMARM_TLB_H
|
|
#define __ASMARM_TLB_H
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#ifndef CONFIG_MMU
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#define tlb_flush(tlb) ((void) tlb)
|
|
|
|
#include <asm-generic/tlb.h>
|
|
|
|
#else /* !CONFIG_MMU */
|
|
|
|
#include <linux/swap.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
static inline void __tlb_remove_table(void *_table)
|
|
{
|
|
free_page_and_swap_cache((struct page *)_table);
|
|
}
|
|
|
|
#include <asm-generic/tlb.h>
|
|
|
|
static inline void
|
|
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
|
|
{
|
|
pgtable_pte_page_dtor(pte);
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
/*
|
|
* With the classic ARM MMU, a pte page has two corresponding pmd
|
|
* entries, each covering 1MB.
|
|
*/
|
|
addr = (addr & PMD_MASK) + SZ_1M;
|
|
__tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
|
|
#endif
|
|
|
|
tlb_remove_table(tlb, pte);
|
|
}
|
|
|
|
static inline void
|
|
__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
|
|
{
|
|
#ifdef CONFIG_ARM_LPAE
|
|
struct page *page = virt_to_page(pmdp);
|
|
|
|
tlb_remove_table(tlb, page);
|
|
#endif
|
|
}
|
|
|
|
#endif /* CONFIG_MMU */
|
|
#endif
|