linux/arch/mips/mm/tlb-r4k.c
Mike Rapoport e31cf2f4ca mm: don't include asm/pgtable.h if linux/mm.h is already included
Patch series "mm: consolidate definitions of page table accessors", v2.

The low level page table accessors (pXY_index(), pXY_offset()) are
duplicated across all architectures and sometimes more than once.  For
instance, we have 31 definition of pgd_offset() for 25 supported
architectures.

Most of these definitions are actually identical and typically it boils
down to, e.g.

static inline unsigned long pmd_index(unsigned long address)
{
        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}

static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
        return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}

These definitions can be shared among 90% of the arches provided
XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined.

For architectures that really need a custom version there is always
possibility to override the generic version with the usual ifdefs magic.

These patches introduce include/linux/pgtable.h that replaces
include/asm-generic/pgtable.h and add the definitions of the page table
accessors to the new header.

This patch (of 12):

The linux/mm.h header includes <asm/pgtable.h> to allow inlining of the
functions involving page table manipulations, e.g.  pte_alloc() and
pmd_alloc().  So, there is no point to explicitly include <asm/pgtable.h>
in the files that include <linux/mm.h>.

The include statements in such cases are remove with a simple loop:

	for f in $(git grep -l "include <linux/mm.h>") ; do
		sed -i -e '/include <asm\/pgtable.h>/ d' $f
	done

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org
Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-09 09:39:13 -07:00

583 lines
14 KiB
C

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/cpu_pm.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/bootinfo.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/tlbmisc.h>
extern void build_tlb_refill_handler(void);
/*
* LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
* a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
* itlb/dtlb are not totally transparent to software.
*/
static inline void flush_micro_tlb(void)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2EF:
write_c0_diag(LOONGSON_DIAG_ITLB);
break;
case CPU_LOONGSON64:
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
break;
default:
break;
}
}
static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_EXEC)
flush_micro_tlb();
}
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
int entry, ftlbhighset;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
entry = num_wired_entries();
/*
* Blast 'em all away.
* If there are any wired entries, fall back to iterating
*/
if (cpu_has_tlbinv && !entry) {
if (current_cpu_data.tlbsizevtlb) {
write_c0_index(0);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate VTLB */
}
ftlbhighset = current_cpu_data.tlbsizevtlb +
current_cpu_data.tlbsizeftlbsets;
for (entry = current_cpu_data.tlbsizevtlb;
entry < ftlbhighset;
entry++) {
write_c0_index(entry);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate one FTLB set */
}
} else {
while (entry < current_cpu_data.tlbsize) {
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
entry++;
}
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
htw_start();
flush_micro_tlb();
local_irq_restore(flags);
}
EXPORT_SYMBOL(local_flush_tlb_all);
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long size, flags;
local_irq_save(flags);
start = round_down(start, PAGE_SIZE << 1);
end = round_up(end, PAGE_SIZE << 1);
size = (end - start) >> (PAGE_SHIFT + 1);
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
unsigned long old_entryhi, uninitialized_var(old_mmid);
int newpid = cpu_asid(cpu, mm);
old_entryhi = read_c0_entryhi();
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(newpid);
}
htw_stop();
while (start < end) {
int idx;
if (cpu_has_mmid)
write_c0_entryhi(start);
else
write_c0_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(old_entryhi);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
htw_start();
} else {
drop_mmu_context(mm);
}
flush_micro_tlb();
local_irq_restore(flags);
}
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long size, flags;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
int pid = read_c0_entryhi();
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
htw_stop();
while (start < end) {
int idx;
write_c0_entryhi(start);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(pid);
htw_start();
} else {
local_flush_tlb_all();
}
flush_micro_tlb();
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
unsigned long uninitialized_var(old_mmid);
unsigned long flags, old_entryhi;
int idx;
page &= (PAGE_MASK << 1);
local_irq_save(flags);
old_entryhi = read_c0_entryhi();
htw_stop();
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_entryhi(page);
write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
} else {
write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
}
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
goto finish;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
finish:
write_c0_entryhi(old_entryhi);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
}
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
void local_flush_tlb_one(unsigned long page)
{
unsigned long flags;
int oldpid, idx;
local_irq_save(flags);
oldpid = read_c0_entryhi();
htw_stop();
page &= (PAGE_MASK << 1);
write_c0_entryhi(page);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx >= 0) {
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
htw_start();
flush_micro_tlb();
local_irq_restore(flags);
}
/*
* We will need multiple versions of update_mmu_cache(), one that just
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
*/
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
int idx, pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
local_irq_save(flags);
htw_stop();
address &= (PAGE_MASK << 1);
if (cpu_has_mmid) {
write_c0_entryhi(address);
} else {
pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
write_c0_entryhi(address | pid);
}
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
p4dp = p4d_offset(pgdp, address);
pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
if (pmd_huge(*pmdp)) {
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
ptep = (pte_t *)pmdp;
lo = pte_to_entrylo(pte_val(*ptep));
write_c0_entrylo0(lo);
write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
tlbw_use_hazard();
write_c0_pagemask(PM_DEFAULT_MASK);
} else
#endif
{
ptep = pte_offset_map(pmdp, address);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
if (cpu_has_xpa)
writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
ptep++;
write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
if (cpu_has_xpa)
writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
#endif
#else
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
#endif
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
}
tlbw_use_hazard();
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
}
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
unsigned int uninitialized_var(old_mmid);
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(MMID_KERNEL_WIRED);
}
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
old_pagemask = read_c0_pagemask();
wired = num_wired_entries();
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
tlbw_use_hazard(); /* What is the hazard here? */
htw_start();
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
#endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int has_transparent_hugepage(void)
{
static unsigned int mask = -1;
if (mask == -1) { /* first call comes during __init */
unsigned long flags;
local_irq_save(flags);
write_c0_pagemask(PM_HUGE_MASK);
back_to_back_c0_hazard();
mask = read_c0_pagemask();
write_c0_pagemask(PM_DEFAULT_MASK);
local_irq_restore(flags);
}
return mask == PM_HUGE_MASK;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* Used for loading TLB entries before trap_init() has started, when we
* don't actually want to add a wired entry which remains throughout the
* lifetime of the system
*/
int temp_tlb_entry;
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
int ret = 0;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
wired = num_wired_entries();
if (--temp_tlb_entry < wired) {
printk(KERN_WARNING
"No TLB space left for add_temporary_entry\n");
ret = -ENOSPC;
goto out;
}
write_c0_index(temp_tlb_entry);
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
htw_start();
out:
local_irq_restore(flags);
return ret;
}
static int ntlb;
static int __init set_ntlb(char *str)
{
get_option(&str, &ntlb);
return 1;
}
__setup("ntlb=", set_ntlb);
/*
* Configure TLB (for init or after a CPU has been powered off).
*/
static void r4k_tlb_configure(void)
{
/*
* You should never change this register:
* - On R4600 1.7 the tlbp never hits for pages smaller than
* the value in the c0_pagemask register.
* - The entire mm handling assumes the c0_pagemask register to
* be set to fixed-size pages.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
back_to_back_c0_hazard();
if (read_c0_pagemask() != PM_DEFAULT_MASK)
panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
write_c0_wired(0);
if (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000 ||
current_cpu_type() == CPU_R14000 ||
current_cpu_type() == CPU_R16000)
write_c0_framemask(0);
if (cpu_has_rixi) {
/*
* Enable the no read, no exec bits, and enable large physical
* address.
*/
#ifdef CONFIG_64BIT
set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
#else
set_c0_pagegrain(PG_RIE | PG_XIE);
#endif
}
temp_tlb_entry = current_cpu_data.tlbsize - 1;
/* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
}
void tlb_init(void)
{
r4k_tlb_configure();
if (ntlb) {
if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
int wired = current_cpu_data.tlbsize - ntlb;
write_c0_wired(wired);
write_c0_index(wired-1);
printk("Restricting TLB to %d entries\n", ntlb);
} else
printk("Ignoring invalid argument ntlb=%d\n", ntlb);
}
build_tlb_refill_handler();
}
static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
switch (cmd) {
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
r4k_tlb_configure();
break;
}
return NOTIFY_OK;
}
static struct notifier_block r4k_tlb_pm_notifier_block = {
.notifier_call = r4k_tlb_pm_notifier,
};
static int __init r4k_tlb_init_pm(void)
{
return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
}
arch_initcall(r4k_tlb_init_pm);