mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 15:41:36 +00:00
9e1b32caa5
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() Upcoming paches to support the new 64-bit "BookE" powerpc architecture will need to have the virtual address corresponding to PTE page when freeing it, due to the way the HW table walker works. Basically, the TLB can be loaded with "large" pages that cover the whole virtual space (well, sort-of, half of it actually) represented by a PTE page, and which contain an "indirect" bit indicating that this TLB entry RPN points to an array of PTEs from which the TLB can then create direct entries. Thus, in order to invalidate those when PTE pages are deleted, we need the virtual address to pass to tlbilx or tlbivax instructions. The old trick of sticking it somewhere in the PTE page struct page sucks too much, the address is almost readily available in all call sites and almost everybody implemets these as macros, so we may as well add the argument everywhere. I added it to the pmd and pud variants for consistency. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV] Acked-by: Nick Piggin <npiggin@suse.de> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390] Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
160 lines
4.5 KiB
C
160 lines
4.5 KiB
C
#ifndef _S390_TLB_H
|
|
#define _S390_TLB_H
|
|
|
|
/*
|
|
* TLB flushing on s390 is complicated. The following requirement
|
|
* from the principles of operation is the most arduous:
|
|
*
|
|
* "A valid table entry must not be changed while it is attached
|
|
* to any CPU and may be used for translation by that CPU except to
|
|
* (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
|
|
* or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
|
|
* table entry, or (3) make a change by means of a COMPARE AND SWAP
|
|
* AND PURGE instruction that purges the TLB."
|
|
*
|
|
* The modification of a pte of an active mm struct therefore is
|
|
* a two step process: i) invalidate the pte, ii) store the new pte.
|
|
* This is true for the page protection bit as well.
|
|
* The only possible optimization is to flush at the beginning of
|
|
* a tlb_gather_mmu cycle if the mm_struct is currently not in use.
|
|
*
|
|
* Pages used for the page tables is a different story. FIXME: more
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/swap.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#ifndef CONFIG_SMP
|
|
#define TLB_NR_PTRS 1
|
|
#else
|
|
#define TLB_NR_PTRS 508
|
|
#endif
|
|
|
|
struct mmu_gather {
|
|
struct mm_struct *mm;
|
|
unsigned int fullmm;
|
|
unsigned int nr_ptes;
|
|
unsigned int nr_pxds;
|
|
void *array[TLB_NR_PTRS];
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
|
|
|
|
static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
|
|
unsigned int full_mm_flush)
|
|
{
|
|
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
|
|
|
|
tlb->mm = mm;
|
|
tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
|
|
(atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
|
|
tlb->nr_ptes = 0;
|
|
tlb->nr_pxds = TLB_NR_PTRS;
|
|
if (tlb->fullmm)
|
|
__tlb_flush_mm(mm);
|
|
return tlb;
|
|
}
|
|
|
|
static inline void tlb_flush_mmu(struct mmu_gather *tlb,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS))
|
|
__tlb_flush_mm(tlb->mm);
|
|
while (tlb->nr_ptes > 0)
|
|
pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]);
|
|
while (tlb->nr_pxds < TLB_NR_PTRS)
|
|
/* pgd_free frees the pointer as region or segment table */
|
|
pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]);
|
|
}
|
|
|
|
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
tlb_flush_mmu(tlb, start, end);
|
|
|
|
/* keep the page table cache within bounds */
|
|
check_pgt_cache();
|
|
|
|
put_cpu_var(mmu_gathers);
|
|
}
|
|
|
|
/*
|
|
* Release the page cache reference for a pte removed by
|
|
* tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page
|
|
* has already been freed, so just do free_page_and_swap_cache.
|
|
*/
|
|
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|
{
|
|
free_page_and_swap_cache(page);
|
|
}
|
|
|
|
/*
|
|
* pte_free_tlb frees a pte table and clears the CRSTE for the
|
|
* page table from the tlb.
|
|
*/
|
|
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
|
unsigned long address)
|
|
{
|
|
if (!tlb->fullmm) {
|
|
tlb->array[tlb->nr_ptes++] = pte;
|
|
if (tlb->nr_ptes >= tlb->nr_pxds)
|
|
tlb_flush_mmu(tlb, 0, 0);
|
|
} else
|
|
pte_free(tlb->mm, pte);
|
|
}
|
|
|
|
/*
|
|
* pmd_free_tlb frees a pmd table and clears the CRSTE for the
|
|
* segment table entry from the tlb.
|
|
* If the mm uses a two level page table the single pmd is freed
|
|
* as the pgd. pmd_free_tlb checks the asce_limit against 2GB
|
|
* to avoid the double free of the pmd in this case.
|
|
*/
|
|
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
|
unsigned long address)
|
|
{
|
|
#ifdef __s390x__
|
|
if (tlb->mm->context.asce_limit <= (1UL << 31))
|
|
return;
|
|
if (!tlb->fullmm) {
|
|
tlb->array[--tlb->nr_pxds] = pmd;
|
|
if (tlb->nr_ptes >= tlb->nr_pxds)
|
|
tlb_flush_mmu(tlb, 0, 0);
|
|
} else
|
|
pmd_free(tlb->mm, pmd);
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* pud_free_tlb frees a pud table and clears the CRSTE for the
|
|
* region third table entry from the tlb.
|
|
* If the mm uses a three level page table the single pud is freed
|
|
* as the pgd. pud_free_tlb checks the asce_limit against 4TB
|
|
* to avoid the double free of the pud in this case.
|
|
*/
|
|
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
|
unsigned long address)
|
|
{
|
|
#ifdef __s390x__
|
|
if (tlb->mm->context.asce_limit <= (1UL << 42))
|
|
return;
|
|
if (!tlb->fullmm) {
|
|
tlb->array[--tlb->nr_pxds] = pud;
|
|
if (tlb->nr_ptes >= tlb->nr_pxds)
|
|
tlb_flush_mmu(tlb, 0, 0);
|
|
} else
|
|
pud_free(tlb->mm, pud);
|
|
#endif
|
|
}
|
|
|
|
#define tlb_start_vma(tlb, vma) do { } while (0)
|
|
#define tlb_end_vma(tlb, vma) do { } while (0)
|
|
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
|
|
#define tlb_migrate_finish(mm) do { } while (0)
|
|
|
|
#endif /* _S390_TLB_H */
|