forked from Minki/linux
74bf4312ff
We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller <davem@davemloft.net>
94 lines
1.8 KiB
C
94 lines
1.8 KiB
C
/* arch/sparc64/mm/tlb.c
|
|
*
|
|
* Copyright (C) 2004 David S. Miller <davem@redhat.com>
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/swap.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/tlb.h>
|
|
|
|
/* Heavily inspired by the ppc64 code. */
|
|
|
|
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
|
|
|
|
void flush_tlb_pending(void)
|
|
{
|
|
struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
|
|
|
|
if (mp->tlb_nr) {
|
|
flush_tsb_user(mp);
|
|
|
|
if (CTX_VALID(mp->mm->context)) {
|
|
#ifdef CONFIG_SMP
|
|
smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
|
|
&mp->vaddrs[0]);
|
|
#else
|
|
__flush_tlb_pending(CTX_HWBITS(mp->mm->context),
|
|
mp->tlb_nr, &mp->vaddrs[0]);
|
|
#endif
|
|
}
|
|
mp->tlb_nr = 0;
|
|
}
|
|
}
|
|
|
|
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
|
|
{
|
|
struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
|
|
unsigned long nr;
|
|
|
|
vaddr &= PAGE_MASK;
|
|
if (pte_exec(orig))
|
|
vaddr |= 0x1UL;
|
|
|
|
if (pte_dirty(orig)) {
|
|
unsigned long paddr, pfn = pte_pfn(orig);
|
|
struct address_space *mapping;
|
|
struct page *page;
|
|
|
|
if (!pfn_valid(pfn))
|
|
goto no_cache_flush;
|
|
|
|
page = pfn_to_page(pfn);
|
|
if (PageReserved(page))
|
|
goto no_cache_flush;
|
|
|
|
/* A real file page? */
|
|
mapping = page_mapping(page);
|
|
if (!mapping)
|
|
goto no_cache_flush;
|
|
|
|
paddr = (unsigned long) page_address(page);
|
|
if ((paddr ^ vaddr) & (1 << 13))
|
|
flush_dcache_page_all(mm, page);
|
|
}
|
|
|
|
no_cache_flush:
|
|
|
|
if (mp->fullmm)
|
|
return;
|
|
|
|
nr = mp->tlb_nr;
|
|
|
|
if (unlikely(nr != 0 && mm != mp->mm)) {
|
|
flush_tlb_pending();
|
|
nr = 0;
|
|
}
|
|
|
|
if (nr == 0)
|
|
mp->mm = mm;
|
|
|
|
mp->vaddrs[nr] = vaddr;
|
|
mp->tlb_nr = ++nr;
|
|
if (nr >= TLB_BATCH_NR)
|
|
flush_tlb_pending();
|
|
}
|