mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
arm64: tlb: Avoid synchronous TLBIs when freeing page tables
By selecting HAVE_RCU_TABLE_INVALIDATE, we can rely on tlb_flush() being called if we fail to batch table pages for freeing. This in turn allows us to postpone walk-cache invalidation until tlb_finish_mmu(), which avoids lots of unnecessary DSBs and means we can shoot down the ASID if the range is large enough. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
f270ab88fd
commit
ace8cb7545
@ -143,6 +143,7 @@ config ARM64
|
|||||||
select HAVE_PERF_USER_STACK_DUMP
|
select HAVE_PERF_USER_STACK_DUMP
|
||||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||||
select HAVE_RCU_TABLE_FREE
|
select HAVE_RCU_TABLE_FREE
|
||||||
|
select HAVE_RCU_TABLE_INVALIDATE
|
||||||
select HAVE_RSEQ
|
select HAVE_RSEQ
|
||||||
select HAVE_STACKPROTECTOR
|
select HAVE_STACKPROTECTOR
|
||||||
select HAVE_SYSCALL_TRACEPOINTS
|
select HAVE_SYSCALL_TRACEPOINTS
|
||||||
|
@ -54,7 +54,6 @@ static inline void tlb_flush(struct mmu_gather *tlb)
|
|||||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
__flush_tlb_pgtable(tlb->mm, addr);
|
|
||||||
pgtable_page_dtor(pte);
|
pgtable_page_dtor(pte);
|
||||||
tlb_remove_table(tlb, pte);
|
tlb_remove_table(tlb, pte);
|
||||||
}
|
}
|
||||||
@ -63,7 +62,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
|||||||
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
|
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
__flush_tlb_pgtable(tlb->mm, addr);
|
|
||||||
tlb_remove_table(tlb, virt_to_page(pmdp));
|
tlb_remove_table(tlb, virt_to_page(pmdp));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -72,7 +70,6 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
|
|||||||
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
|
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
__flush_tlb_pgtable(tlb->mm, addr);
|
|
||||||
tlb_remove_table(tlb, virt_to_page(pudp));
|
tlb_remove_table(tlb, virt_to_page(pudp));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -215,17 +215,6 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
|
|||||||
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
|
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
|
||||||
* table levels (pgd/pud/pmd).
|
* table levels (pgd/pud/pmd).
|
||||||
*/
|
*/
|
||||||
static inline void __flush_tlb_pgtable(struct mm_struct *mm,
|
|
||||||
unsigned long uaddr)
|
|
||||||
{
|
|
||||||
unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
|
|
||||||
|
|
||||||
dsb(ishst);
|
|
||||||
__tlbi(vae1is, addr);
|
|
||||||
__tlbi_user(vae1is, addr);
|
|
||||||
dsb(ish);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
|
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
|
||||||
{
|
{
|
||||||
unsigned long addr = __TLBI_VADDR(kaddr, 0);
|
unsigned long addr = __TLBI_VADDR(kaddr, 0);
|
||||||
|
Loading…
Reference in New Issue
Block a user