2019-06-04 08:11:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-01-18 09:42:19 +00:00
|
|
|
/*
|
2019-09-10 00:36:34 +00:00
|
|
|
* TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
|
2013-01-18 09:42:19 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
2013-07-01 12:42:28 +00:00
|
|
|
#include <linux/bug.h>
|
2017-02-03 23:16:44 +00:00
|
|
|
#include <linux/mm_types.h>
|
|
|
|
|
2013-01-18 09:42:19 +00:00
|
|
|
#include <asm/arcregs.h>
|
2013-01-18 09:42:20 +00:00
|
|
|
#include <asm/setup.h>
|
2013-01-18 09:42:19 +00:00
|
|
|
#include <asm/mmu_context.h>
|
2013-05-14 07:58:17 +00:00
|
|
|
#include <asm/mmu.h>
|
2013-01-18 09:42:19 +00:00
|
|
|
|
|
|
|
/* A copy of the ASID from the PID reg is kept in asid_cache */
|
2013-08-23 13:46:34 +00:00
|
|
|
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
|
2013-01-18 09:42:19 +00:00
|
|
|
|
2020-06-11 06:03:22 +00:00
|
|
|
static struct cpuinfo_arc_mmu {
|
|
|
|
unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
|
|
|
|
} mmuinfo;
|
2017-08-03 12:15:44 +00:00
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
/*
|
|
|
|
* Utility Routine to erase a J-TLB entry
|
2013-07-01 12:42:28 +00:00
|
|
|
* Caller needs to setup Index Reg (manually or via getIndex)
|
2013-01-18 09:42:20 +00:00
|
|
|
*/
|
2013-07-01 12:42:28 +00:00
|
|
|
static inline void __tlb_entry_erase(void)
|
2013-01-18 09:42:20 +00:00
|
|
|
{
|
|
|
|
write_aux_reg(ARC_REG_TLBPD1, 0);
|
2015-02-06 15:44:57 +00:00
|
|
|
|
|
|
|
if (is_pae40_enabled())
|
|
|
|
write_aux_reg(ARC_REG_TLBPD1HI, 0);
|
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
write_aux_reg(ARC_REG_TLBPD0, 0);
|
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
|
|
|
|
}
|
|
|
|
|
2015-10-17 11:24:14 +00:00
|
|
|
static void utlb_invalidate(void)
|
|
|
|
{
|
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
|
|
|
|
}
|
|
|
|
|
2019-09-10 00:36:34 +00:00
|
|
|
#ifdef CONFIG_ARC_MMU_V3
|
2015-04-06 11:52:39 +00:00
|
|
|
|
2013-07-01 12:42:28 +00:00
|
|
|
static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
|
2013-01-18 09:42:20 +00:00
|
|
|
{
|
|
|
|
unsigned int idx;
|
|
|
|
|
|
|
|
write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
|
2013-07-01 12:42:28 +00:00
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
|
|
|
|
idx = read_aux_reg(ARC_REG_TLBINDEX);
|
|
|
|
|
2013-07-01 12:42:28 +00:00
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tlb_entry_erase(unsigned int vaddr_n_asid)
|
|
|
|
{
|
|
|
|
unsigned int idx;
|
|
|
|
|
|
|
|
/* Locate the TLB entry for this vaddr + ASID */
|
|
|
|
idx = tlb_entry_lkup(vaddr_n_asid);
|
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
/* No error means entry found, zero it out */
|
|
|
|
if (likely(!(idx & TLB_LKUP_ERR))) {
|
|
|
|
__tlb_entry_erase();
|
2013-07-01 12:42:28 +00:00
|
|
|
} else {
|
2013-01-18 09:42:20 +00:00
|
|
|
/* Duplicate entry error */
|
2013-07-01 12:42:28 +00:00
|
|
|
WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
|
|
|
|
vaddr_n_asid);
|
2013-01-18 09:42:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 19:02:47 +00:00
|
|
|
static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
|
2013-07-01 12:42:28 +00:00
|
|
|
{
|
|
|
|
unsigned int idx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First verify if entry for this vaddr+ASID already exists
|
|
|
|
* This also sets up PD0 (vaddr, ASID..) for final commit
|
|
|
|
*/
|
|
|
|
idx = tlb_entry_lkup(pd0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If Not already present get a free slot from MMU.
|
|
|
|
* Otherwise, Probe would have located the entry and set INDEX Reg
|
|
|
|
* with existing location. This will cause Write CMD to over-write
|
|
|
|
* existing entry with new PD0 and PD1
|
|
|
|
*/
|
|
|
|
if (likely(idx & TLB_LKUP_ERR))
|
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
|
|
|
|
|
|
|
|
/* setup the other half of TLB entry (pfn, rwx..) */
|
|
|
|
write_aux_reg(ARC_REG_TLBPD1, pd1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Commit the Entry to MMU
|
2016-02-23 23:24:55 +00:00
|
|
|
* It doesn't sound safe to use the TLBWriteNI cmd here
|
2013-07-01 12:42:28 +00:00
|
|
|
* which doesn't flush uTLBs. I'd rather be safe than sorry.
|
|
|
|
*/
|
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
|
|
|
|
}
|
|
|
|
|
2019-09-10 00:36:34 +00:00
|
|
|
#else /* MMUv4 */
|
2015-04-06 11:52:39 +00:00
|
|
|
|
|
|
|
static void tlb_entry_erase(unsigned int vaddr_n_asid)
|
|
|
|
{
|
|
|
|
write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
|
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
|
|
|
|
}
|
|
|
|
|
2019-10-28 19:02:47 +00:00
|
|
|
static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
|
2015-04-06 11:52:39 +00:00
|
|
|
{
|
|
|
|
write_aux_reg(ARC_REG_TLBPD0, pd0);
|
2015-02-06 15:44:57 +00:00
|
|
|
|
2019-10-28 19:02:47 +00:00
|
|
|
if (!is_pae40_enabled()) {
|
|
|
|
write_aux_reg(ARC_REG_TLBPD1, pd1);
|
|
|
|
} else {
|
|
|
|
write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
|
2015-02-06 15:44:57 +00:00
|
|
|
write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
|
2019-10-28 19:02:47 +00:00
|
|
|
}
|
2015-02-06 15:44:57 +00:00
|
|
|
|
2015-04-06 11:52:39 +00:00
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
/*
|
|
|
|
* Un-conditionally (without lookup) erase the entire MMU contents
|
|
|
|
*/
|
|
|
|
|
|
|
|
noinline void local_flush_tlb_all(void)
|
|
|
|
{
|
2020-06-11 06:03:22 +00:00
|
|
|
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
|
2013-01-18 09:42:20 +00:00
|
|
|
unsigned long flags;
|
|
|
|
unsigned int entry;
|
2015-10-02 06:55:35 +00:00
|
|
|
int num_tlb = mmu->sets * mmu->ways;
|
2013-01-18 09:42:20 +00:00
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
/* Load PD0 and PD1 with template for a Blank Entry */
|
|
|
|
write_aux_reg(ARC_REG_TLBPD1, 0);
|
2015-02-06 15:44:57 +00:00
|
|
|
|
|
|
|
if (is_pae40_enabled())
|
|
|
|
write_aux_reg(ARC_REG_TLBPD1HI, 0);
|
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
write_aux_reg(ARC_REG_TLBPD0, 0);
|
|
|
|
|
2015-10-02 06:55:35 +00:00
|
|
|
for (entry = 0; entry < num_tlb; entry++) {
|
2013-01-18 09:42:20 +00:00
|
|
|
/* write this entry to the TLB */
|
|
|
|
write_aux_reg(ARC_REG_TLBINDEX, entry);
|
2015-10-17 11:24:14 +00:00
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
|
2013-01-18 09:42:20 +00:00
|
|
|
}
|
|
|
|
|
2014-07-08 13:13:47 +00:00
|
|
|
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
|
|
|
|
const int stlb_idx = 0x800;
|
|
|
|
|
|
|
|
/* Blank sTLB entry */
|
|
|
|
write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
|
|
|
|
|
|
|
|
for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
|
|
|
|
write_aux_reg(ARC_REG_TLBINDEX, entry);
|
2015-10-17 11:24:14 +00:00
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
|
2014-07-08 13:13:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
utlb_invalidate();
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-11-09 13:21:30 +00:00
|
|
|
* Flush the entire MM for userland. The fastest way is to move to Next ASID
|
2013-01-18 09:42:20 +00:00
|
|
|
*/
|
|
|
|
noinline void local_flush_tlb_mm(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Small optimisation courtesy IA64
|
|
|
|
* flush_mm called during fork,exit,munmap etc, multiple times as well.
|
|
|
|
* Only for fork( ) do we need to move parent to a new MMU ctxt,
|
|
|
|
* all other cases are NOPs, hence this check.
|
|
|
|
*/
|
|
|
|
if (atomic_read(&mm->mm_users) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
2013-07-24 20:53:45 +00:00
|
|
|
* - Move to a new ASID, but only if the mm is still wired in
|
|
|
|
* (Android Binder ended up calling this for vma->mm != tsk->mm,
|
|
|
|
* causing h/w - s/w ASID to get out of sync)
|
|
|
|
* - Also get_new_mmu_context() new implementation allocates a new
|
|
|
|
* ASID only if it is not allocated already - so unallocate first
|
2013-01-18 09:42:20 +00:00
|
|
|
*/
|
2013-07-24 20:53:45 +00:00
|
|
|
destroy_context(mm);
|
|
|
|
if (current->mm == mm)
|
2013-01-18 09:42:20 +00:00
|
|
|
get_new_mmu_context(mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush a Range of TLB entries for userland.
|
|
|
|
* @start is inclusive, while @end is exclusive
|
|
|
|
* Difference between this and Kernel Range Flush is
|
|
|
|
* -Here the fastest way (if range is too large) is to move to next ASID
|
|
|
|
* without doing any explicit Shootdown
|
2020-11-09 13:21:30 +00:00
|
|
|
* -In case of kernel Flush, entry has to be shot down explicitly
|
2013-01-18 09:42:20 +00:00
|
|
|
*/
|
|
|
|
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
2013-08-23 13:46:34 +00:00
|
|
|
const unsigned int cpu = smp_processor_id();
|
2013-01-18 09:42:20 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* If range @start to @end is more than 32 TLB entries deep,
|
2024-03-29 22:14:32 +00:00
|
|
|
* it's better to move to a new ASID rather than searching for
|
2013-01-18 09:42:20 +00:00
|
|
|
* individual entries and then shooting them down
|
|
|
|
*
|
|
|
|
* The calc above is rough, doesn't account for unaligned parts,
|
|
|
|
* since this is heuristics based anyways
|
|
|
|
*/
|
|
|
|
if (unlikely((end - start) >= PAGE_SIZE * 32)) {
|
|
|
|
local_flush_tlb_mm(vma->vm_mm);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @start moved to page start: this alone suffices for checking
|
|
|
|
* loop end condition below, w/o need for aligning @end to end
|
|
|
|
* e.g. 2000 to 4001 will anyhow loop twice
|
|
|
|
*/
|
|
|
|
start &= PAGE_MASK;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2013-08-23 13:46:34 +00:00
|
|
|
if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
|
2013-01-18 09:42:20 +00:00
|
|
|
while (start < end) {
|
2013-08-23 13:46:34 +00:00
|
|
|
tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
|
2013-01-18 09:42:20 +00:00
|
|
|
start += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
|
|
|
|
* @start, @end interpreted as kvaddr
|
|
|
|
* Interestingly, shared TLB entries can also be flushed using just
|
|
|
|
* @start,@end alone (interpreted as user vaddr), although technically SASID
|
|
|
|
* is also needed. However our smart TLbProbe lookup takes care of that.
|
|
|
|
*/
|
|
|
|
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* exactly same as above, except for TLB entry not taking ASID */
|
|
|
|
|
|
|
|
if (unlikely((end - start) >= PAGE_SIZE * 32)) {
|
|
|
|
local_flush_tlb_all();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
start &= PAGE_MASK;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
while (start < end) {
|
|
|
|
tlb_entry_erase(start);
|
|
|
|
start += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete TLB entry in MMU for a given page (??? address)
|
|
|
|
* NOTE One TLB entry contains translation for single PAGE
|
|
|
|
*/
|
|
|
|
|
|
|
|
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|
|
|
{
|
2013-08-23 13:46:34 +00:00
|
|
|
const unsigned int cpu = smp_processor_id();
|
2013-01-18 09:42:20 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Note that it is critical that interrupts are DISABLED between
|
|
|
|
* checking the ASID and using it flush the TLB entry
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2013-08-23 13:46:34 +00:00
|
|
|
if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
|
|
|
|
tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
|
2013-01-18 09:42:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
2013-01-18 09:42:19 +00:00
|
|
|
|
2013-10-27 09:19:02 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
struct tlb_args {
|
|
|
|
struct vm_area_struct *ta_vma;
|
|
|
|
unsigned long ta_start;
|
|
|
|
unsigned long ta_end;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void ipi_flush_tlb_page(void *arg)
|
|
|
|
{
|
|
|
|
struct tlb_args *ta = arg;
|
|
|
|
|
|
|
|
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ipi_flush_tlb_range(void *arg)
|
|
|
|
{
|
|
|
|
struct tlb_args *ta = arg;
|
|
|
|
|
|
|
|
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
|
|
|
|
}
|
|
|
|
|
2015-10-15 02:34:45 +00:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
static inline void ipi_flush_pmd_tlb_range(void *arg)
|
|
|
|
{
|
|
|
|
struct tlb_args *ta = arg;
|
|
|
|
|
|
|
|
local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-10-27 09:19:02 +00:00
|
|
|
static inline void ipi_flush_tlb_kernel_range(void *arg)
|
|
|
|
{
|
|
|
|
struct tlb_args *ta = (struct tlb_args *)arg;
|
|
|
|
|
|
|
|
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_all(void)
|
|
|
|
{
|
|
|
|
on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_mm(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
|
|
|
|
mm, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
|
|
|
{
|
|
|
|
struct tlb_args ta = {
|
|
|
|
.ta_vma = vma,
|
|
|
|
.ta_start = uaddr
|
|
|
|
};
|
|
|
|
|
|
|
|
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
struct tlb_args ta = {
|
|
|
|
.ta_vma = vma,
|
|
|
|
.ta_start = start,
|
|
|
|
.ta_end = end
|
|
|
|
};
|
|
|
|
|
|
|
|
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
|
|
|
|
}
|
|
|
|
|
2015-10-15 02:34:45 +00:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
struct tlb_args ta = {
|
|
|
|
.ta_vma = vma,
|
|
|
|
.ta_start = start,
|
|
|
|
.ta_end = end
|
|
|
|
};
|
|
|
|
|
|
|
|
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-10-27 09:19:02 +00:00
|
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
struct tlb_args ta = {
|
|
|
|
.ta_start = start,
|
|
|
|
.ta_end = end
|
|
|
|
};
|
|
|
|
|
|
|
|
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-01-18 09:42:19 +00:00
|
|
|
/*
|
|
|
|
* Routine to create a TLB entry
|
|
|
|
*/
|
2023-08-13 01:23:59 +00:00
|
|
|
static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
|
2013-01-18 09:42:19 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2013-07-01 12:42:28 +00:00
|
|
|
unsigned int asid_or_sasid, rwx;
|
2015-02-06 15:44:57 +00:00
|
|
|
unsigned long pd0;
|
2019-10-28 19:02:47 +00:00
|
|
|
phys_addr_t pd1;
|
2013-01-18 09:42:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* create_tlb() assumes that current->mm == vma->mm, since
|
|
|
|
* -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
|
|
|
|
* -completes the lazy write to SASID reg (again valid for curr tsk)
|
|
|
|
*
|
|
|
|
* Removing the assumption involves
|
|
|
|
* -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
|
|
|
|
* -More importantly it makes this handler inconsistent with fast-path
|
|
|
|
* TLB Refill handler which always deals with "current"
|
|
|
|
*
|
2024-03-29 22:14:32 +00:00
|
|
|
* Let's see the use cases when current->mm != vma->mm and we land here
|
2013-01-18 09:42:19 +00:00
|
|
|
* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
|
|
|
|
* Here VM wants to pre-install a TLB entry for user stack while
|
|
|
|
* current->mm still points to pre-execve mm (hence the condition).
|
|
|
|
* However the stack vaddr is soon relocated (randomization) and
|
|
|
|
* move_page_tables() tries to undo that TLB entry.
|
|
|
|
* Thus not creating TLB entry is not any worse.
|
|
|
|
*
|
|
|
|
* 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
|
|
|
|
* breakpoint in debugged task. Not creating a TLB now is not
|
|
|
|
* performance critical.
|
|
|
|
*
|
|
|
|
* Both the cases above are not good enough for code churn.
|
|
|
|
*/
|
|
|
|
if (current->active_mm != vma->vm_mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2015-09-15 01:43:42 +00:00
|
|
|
vaddr &= PAGE_MASK;
|
2013-01-18 09:42:19 +00:00
|
|
|
|
|
|
|
/* update this PTE credentials */
|
|
|
|
pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
|
|
|
|
|
2013-06-17 14:14:06 +00:00
|
|
|
/* Create HW TLB(PD0,PD1) from PTE */
|
2013-01-18 09:42:19 +00:00
|
|
|
|
|
|
|
/* ASID for this task */
|
|
|
|
asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
|
|
|
|
|
2015-09-15 01:43:42 +00:00
|
|
|
pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
|
2013-01-18 09:42:19 +00:00
|
|
|
|
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
The current ARC VM code has 13 flags in Page Table entry: some software
(accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU
page, we need 19 bits for addressing page frame so remaining 13 bits is
just about enough to accomodate the current flags.
In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT
(cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19
before for 8k). Thus these can't be held in current PTE w/o making each
entry 64bit wide.
It seems there is some scope of compressing the current PTE flags (and
freeing up a few bits). Currently PTE contains fully orthogonal distinct
access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux)
which can be folded into one set (R, W, X). The translation of 3 PTE
bits into 6 TLB bits (when programming the MMU) can be done based on
following pre-requites/assumptions:
1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to
0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user
space entries can never be global). Thus such a PTE can translate
to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts.
2. For non global entries, the PTE flags can be used to create mirrored
K and U TLB bits. This is true after commit a950549c675f2c8c504
"ARC: copy_(to|from)_user() to honor usermode-access permissions"
which ensured that user-space translations _MUST_ have same access
permissions for both U/K mode accesses so that copy_{to,from}_user()
play fair with fault based CoW break and such...
There is no such thing as free lunch - the cost is slightly infalted
TLB-Miss Handlers.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-06-17 12:42:13 +00:00
|
|
|
/*
|
|
|
|
* ARC MMU provides fully orthogonal access bits for K/U mode,
|
|
|
|
* however Linux only saves 1 set to save PTE real-estate
|
|
|
|
* Here we convert 3 PTE bits into 6 MMU bits:
|
|
|
|
* -Kernel only entries have Kr Kw Kx 0 0 0
|
|
|
|
* -User entries have mirrored K and U bits
|
|
|
|
*/
|
|
|
|
rwx = pte_val(*ptep) & PTE_BITS_RWX;
|
|
|
|
|
|
|
|
if (pte_val(*ptep) & _PAGE_GLOBAL)
|
|
|
|
rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
|
|
|
|
else
|
|
|
|
rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
|
|
|
|
|
2013-07-01 12:42:28 +00:00
|
|
|
pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
|
2013-01-18 09:42:19 +00:00
|
|
|
|
2013-07-01 12:42:28 +00:00
|
|
|
tlb_entry_insert(pd0, pd1);
|
2013-01-18 09:42:19 +00:00
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2013-04-16 08:40:48 +00:00
|
|
|
/*
|
|
|
|
* Called at the end of pagefault, for a userspace mapped page
|
|
|
|
* -pre-install the corresponding TLB entry into MMU
|
2013-05-09 16:24:51 +00:00
|
|
|
* -Finalize the delayed D-cache flush of kernel mapping of page due to
|
|
|
|
* flush_dcache_page(), copy_user_page()
|
|
|
|
*
|
|
|
|
* Note that flush (when done) involves both WBACK - so physical page is
|
|
|
|
* in sync as well as INV - so any non-congruent aliases don't remain
|
2013-01-18 09:42:19 +00:00
|
|
|
*/
|
2023-08-02 15:13:36 +00:00
|
|
|
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
|
|
|
unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
|
2013-01-18 09:42:19 +00:00
|
|
|
{
|
2013-04-11 13:06:35 +00:00
|
|
|
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
|
2021-04-27 12:12:37 +00:00
|
|
|
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
|
2013-05-19 10:21:03 +00:00
|
|
|
struct page *page = pfn_to_page(pte_pfn(*ptep));
|
2013-04-11 13:06:35 +00:00
|
|
|
|
|
|
|
create_tlb(vma, vaddr, ptep);
|
2013-01-18 09:42:19 +00:00
|
|
|
|
2023-02-16 05:06:33 +00:00
|
|
|
if (page == ZERO_PAGE(0))
|
2013-05-19 10:21:03 +00:00
|
|
|
return;
|
|
|
|
|
2013-05-09 16:24:51 +00:00
|
|
|
/*
|
2023-02-16 05:06:33 +00:00
|
|
|
* For executable pages, since icache doesn't snoop dcache, any
|
|
|
|
* dirty K-mapping of a code page needs to be wback+inv so that
|
|
|
|
* icache fetch by userspace sees code correctly.
|
2013-05-09 16:24:51 +00:00
|
|
|
*/
|
2023-02-16 05:06:33 +00:00
|
|
|
if (vma->vm_flags & VM_EXEC) {
|
2023-08-02 15:13:36 +00:00
|
|
|
struct folio *folio = page_folio(page);
|
|
|
|
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
|
2013-04-16 08:40:48 +00:00
|
|
|
if (dirty) {
|
2023-08-02 15:13:36 +00:00
|
|
|
unsigned long offset = offset_in_folio(folio, paddr);
|
|
|
|
nr = folio_nr_pages(folio);
|
|
|
|
paddr -= offset;
|
|
|
|
vaddr -= offset;
|
2015-09-02 12:27:58 +00:00
|
|
|
/* wback + inv dcache lines (K-mapping) */
|
2023-08-02 15:13:36 +00:00
|
|
|
__flush_dcache_pages(paddr, paddr, nr);
|
2013-05-09 16:24:51 +00:00
|
|
|
|
2015-09-02 12:27:58 +00:00
|
|
|
/* invalidate any existing icache lines (U-mapping) */
|
2013-05-09 16:24:51 +00:00
|
|
|
if (vma->vm_flags & VM_EXEC)
|
2023-08-02 15:13:36 +00:00
|
|
|
__inv_icache_pages(paddr, vaddr, nr);
|
2013-04-16 08:40:48 +00:00
|
|
|
}
|
2013-04-11 13:06:35 +00:00
|
|
|
}
|
2013-01-18 09:42:19 +00:00
|
|
|
}
|
|
|
|
|
2014-07-08 13:13:47 +00:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
|
|
|
|
* support.
|
|
|
|
*
|
|
|
|
* Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
|
2016-02-23 23:24:55 +00:00
|
|
|
* new bit "SZ" in TLB page descriptor to distinguish between them.
|
2014-07-08 13:13:47 +00:00
|
|
|
* Super Page size is configurable in hardware (4K to 16M), but fixed once
|
|
|
|
* RTL builds.
|
|
|
|
*
|
2020-11-09 13:21:30 +00:00
|
|
|
* The exact THP size a Linux configuration will support is a function of:
|
2014-07-08 13:13:47 +00:00
|
|
|
* - MMU page size (typical 8K, RTL fixed)
|
|
|
|
* - software page walker address split between PGD:PTE:PFN (typical
|
|
|
|
* 11:8:13, but can be changed with 1 line)
|
|
|
|
* So for above default, THP size supported is 8K * (2^8) = 2M
|
|
|
|
*
|
|
|
|
* Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
|
|
|
|
* reduces to 1 level (as PTE is folded into PGD and canonically referred
|
|
|
|
* to as PMD).
|
|
|
|
* Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
|
|
|
|
*/
|
|
|
|
|
|
|
|
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
pmd_t *pmd)
|
|
|
|
{
|
|
|
|
pte_t pte = __pte(pmd_val(*pmd));
|
2023-08-02 15:13:36 +00:00
|
|
|
update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
|
2014-07-08 13:13:47 +00:00
|
|
|
}
|
|
|
|
|
2015-10-15 02:34:45 +00:00
|
|
|
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
|
unsigned long end)
|
2015-02-27 14:06:35 +00:00
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
|
|
|
|
if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
|
|
|
|
unsigned int asid = hw_pid(vma->vm_mm, cpu);
|
|
|
|
|
|
|
|
/* No need to loop here: this will always be for 1 Huge Page */
|
|
|
|
tlb_entry_erase(start | _PAGE_HW_SZ | asid);
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2014-07-08 13:13:47 +00:00
|
|
|
#endif
|
|
|
|
|
2020-11-09 13:21:30 +00:00
|
|
|
/* Read the Cache Build Configuration Registers, Decode them and save into
|
2013-01-18 09:42:19 +00:00
|
|
|
* the cpuinfo structure for later use.
|
|
|
|
* No Validation is done here, simply read/convert the BCRs
|
|
|
|
*/
|
2020-06-11 18:08:45 +00:00
|
|
|
int arc_mmu_mumbojumbo(int c, char *buf, int len)
|
2013-01-18 09:42:19 +00:00
|
|
|
{
|
2020-06-11 06:03:22 +00:00
|
|
|
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
|
|
|
|
unsigned int bcr, u_dtlb, u_itlb, sasid;
|
|
|
|
struct bcr_mmu_3 *mmu3;
|
|
|
|
struct bcr_mmu_4 *mmu4;
|
|
|
|
char super_pg[64] = "";
|
|
|
|
int n = 0;
|
2015-04-06 11:52:39 +00:00
|
|
|
|
2020-06-11 06:03:22 +00:00
|
|
|
bcr = read_aux_reg(ARC_REG_MMU_BCR);
|
|
|
|
mmu->ver = (bcr >> 24);
|
2013-01-18 09:42:19 +00:00
|
|
|
|
2019-09-10 00:36:34 +00:00
|
|
|
if (is_isa_arcompact() && mmu->ver == 3) {
|
2020-06-11 06:03:22 +00:00
|
|
|
mmu3 = (struct bcr_mmu_3 *)&bcr;
|
2019-09-10 00:36:34 +00:00
|
|
|
mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
|
|
|
|
mmu->sets = 1 << mmu3->sets;
|
|
|
|
mmu->ways = 1 << mmu3->ways;
|
2020-06-11 06:03:22 +00:00
|
|
|
u_dtlb = mmu3->u_dtlb;
|
|
|
|
u_itlb = mmu3->u_itlb;
|
|
|
|
sasid = mmu3->sasid;
|
2015-04-06 11:52:39 +00:00
|
|
|
} else {
|
2020-06-11 06:03:22 +00:00
|
|
|
mmu4 = (struct bcr_mmu_4 *)&bcr;
|
2015-04-06 11:52:39 +00:00
|
|
|
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
|
|
|
|
mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
|
|
|
|
mmu->sets = 64 << mmu4->n_entry;
|
|
|
|
mmu->ways = mmu4->n_ways * 2;
|
2020-06-11 06:03:22 +00:00
|
|
|
u_dtlb = mmu4->u_dtlb * 4;
|
|
|
|
u_itlb = mmu4->u_itlb * 4;
|
|
|
|
sasid = mmu4->sasid;
|
|
|
|
mmu->pae = mmu4->pae;
|
2013-01-18 09:42:19 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 06:03:22 +00:00
|
|
|
if (mmu->s_pg_sz_m)
|
|
|
|
scnprintf(super_pg, 64, "/%dM%s",
|
|
|
|
mmu->s_pg_sz_m,
|
|
|
|
IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
|
2013-01-18 09:42:24 +00:00
|
|
|
|
|
|
|
n += scnprintf(buf + n, len - n,
|
2020-06-11 06:03:22 +00:00
|
|
|
"MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
|
|
|
|
mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
|
|
|
|
mmu->sets, mmu->ways,
|
|
|
|
u_dtlb, u_itlb,
|
|
|
|
IS_AVAIL1(sasid, ", SASID"),
|
|
|
|
IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
|
2013-01-18 09:42:24 +00:00
|
|
|
|
2020-06-11 18:08:45 +00:00
|
|
|
return n;
|
2013-01-18 09:42:24 +00:00
|
|
|
}
|
|
|
|
|
2017-08-03 12:15:44 +00:00
|
|
|
int pae40_exist_but_not_enab(void)
|
|
|
|
{
|
2020-06-11 06:03:22 +00:00
|
|
|
return mmuinfo.pae && !is_pae40_enabled();
|
2017-08-03 12:15:44 +00:00
|
|
|
}
|
|
|
|
|
2013-06-24 19:30:15 +00:00
|
|
|
void arc_mmu_init(void)
|
2013-01-18 09:42:19 +00:00
|
|
|
{
|
2020-06-11 06:03:22 +00:00
|
|
|
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
|
2017-11-06 18:55:51 +00:00
|
|
|
int compat = 0;
|
2013-01-18 09:42:24 +00:00
|
|
|
|
2014-09-07 19:52:33 +00:00
|
|
|
/*
|
2020-11-09 13:21:30 +00:00
|
|
|
* Can't be done in processor.h due to header include dependencies
|
2014-09-07 19:52:33 +00:00
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
|
|
|
|
|
2015-12-06 13:40:55 +00:00
|
|
|
/*
|
|
|
|
* stack top size sanity check,
|
2020-11-09 13:21:30 +00:00
|
|
|
* Can't be done in processor.h due to header include dependencies
|
2015-12-06 13:40:55 +00:00
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
|
|
|
|
|
2017-11-06 18:55:51 +00:00
|
|
|
/*
|
|
|
|
* Ensure that MMU features assumed by kernel exist in hardware.
|
2019-09-10 00:36:34 +00:00
|
|
|
* - For older ARC700 cpus, only v3 supported
|
|
|
|
* - For HS cpus, v4 was baseline and v5 is backwards compatible
|
|
|
|
* (will run older software).
|
2013-01-18 09:42:24 +00:00
|
|
|
*/
|
2019-09-10 00:36:34 +00:00
|
|
|
if (is_isa_arcompact() && mmu->ver == 3)
|
2017-11-06 18:55:51 +00:00
|
|
|
compat = 1;
|
2019-09-10 00:36:34 +00:00
|
|
|
else if (is_isa_arcv2() && mmu->ver >= 4)
|
2017-11-06 18:55:51 +00:00
|
|
|
compat = 1;
|
|
|
|
|
2019-09-10 00:36:34 +00:00
|
|
|
if (!compat)
|
|
|
|
panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
|
2013-01-18 09:42:24 +00:00
|
|
|
|
2015-02-13 13:03:47 +00:00
|
|
|
if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
|
2013-01-18 09:42:24 +00:00
|
|
|
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
|
|
|
|
|
2015-03-12 14:18:03 +00:00
|
|
|
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
|
|
|
|
mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
|
|
|
|
panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
|
|
|
|
(unsigned long)TO_MB(HPAGE_PMD_SIZE));
|
|
|
|
|
2015-02-06 15:44:57 +00:00
|
|
|
if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
|
|
|
|
panic("Hardware doesn't support PAE40\n");
|
|
|
|
|
2019-09-13 22:20:01 +00:00
|
|
|
/* Enable the MMU with ASID 0 */
|
|
|
|
mmu_setup_asid(NULL, 0);
|
2013-01-18 09:42:23 +00:00
|
|
|
|
2019-09-13 22:20:01 +00:00
|
|
|
/* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
|
|
|
|
mmu_setup_pgd(NULL, swapper_pg_dir);
|
2017-08-03 12:15:44 +00:00
|
|
|
|
|
|
|
if (pae40_exist_but_not_enab())
|
|
|
|
write_aux_reg(ARC_REG_TLBPD1HI, 0);
|
2013-01-18 09:42:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
|
|
|
|
* The mapping is Column-first.
|
|
|
|
* --------------------- -----------
|
|
|
|
* |way0|way1|way2|way3| |way0|way1|
|
|
|
|
* --------------------- -----------
|
|
|
|
* [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
|
|
|
|
* [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
|
|
|
|
* ~ ~ ~ ~
|
|
|
|
* [set127] | 508| 509| 510| 511| | 254| 255|
|
|
|
|
* --------------------- -----------
|
|
|
|
* For normal operations we don't(must not) care how above works since
|
|
|
|
* MMU cmd getIndex(vaddr) abstracts that out.
|
|
|
|
* However for walking WAYS of a SET, we need to know this
|
|
|
|
*/
|
|
|
|
#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
|
|
|
|
|
|
|
|
/* Handling of Duplicate PD (TLB entry) in MMU.
|
|
|
|
* -Could be due to buggy customer tapeouts or obscure kernel bugs
|
|
|
|
* -MMU complaints not at the time of duplicate PD installation, but at the
|
|
|
|
* time of lookup matching multiple ways.
|
|
|
|
* -Ideally these should never happen - but if they do - workaround by deleting
|
|
|
|
* the duplicate one.
|
|
|
|
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
|
|
|
|
*/
|
2020-11-09 13:21:30 +00:00
|
|
|
volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
|
2013-01-18 09:42:19 +00:00
|
|
|
|
|
|
|
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
2020-06-11 06:03:22 +00:00
|
|
|
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
|
2015-10-13 06:41:38 +00:00
|
|
|
unsigned long flags;
|
2019-05-07 17:45:24 +00:00
|
|
|
int set, n_ways = mmu->ways;
|
|
|
|
|
|
|
|
n_ways = min(n_ways, 4);
|
|
|
|
BUG_ON(mmu->ways > 4);
|
2013-01-18 09:42:19 +00:00
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
/* loop thru all sets of TLB */
|
|
|
|
for (set = 0; set < mmu->sets; set++) {
|
|
|
|
|
2015-10-13 06:41:38 +00:00
|
|
|
int is_valid, way;
|
2019-05-07 17:45:24 +00:00
|
|
|
unsigned int pd0[4];
|
2015-10-13 06:41:38 +00:00
|
|
|
|
2013-01-18 09:42:19 +00:00
|
|
|
/* read out all the ways of current set */
|
2019-05-07 17:45:24 +00:00
|
|
|
for (way = 0, is_valid = 0; way < n_ways; way++) {
|
2013-01-18 09:42:19 +00:00
|
|
|
write_aux_reg(ARC_REG_TLBINDEX,
|
|
|
|
SET_WAY_TO_IDX(mmu, set, way));
|
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
|
|
|
|
pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
|
|
|
|
is_valid |= pd0[way] & _PAGE_PRESENT;
|
2015-10-13 06:41:38 +00:00
|
|
|
pd0[way] &= PAGE_MASK;
|
2013-01-18 09:42:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If all the WAYS in SET are empty, skip to next SET */
|
|
|
|
if (!is_valid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Scan the set for duplicate ways: needs a nested loop */
|
2019-05-07 17:45:24 +00:00
|
|
|
for (way = 0; way < n_ways - 1; way++) {
|
2015-10-13 06:41:38 +00:00
|
|
|
|
|
|
|
int n;
|
|
|
|
|
2013-01-18 09:42:19 +00:00
|
|
|
if (!pd0[way])
|
|
|
|
continue;
|
|
|
|
|
2019-05-07 17:45:24 +00:00
|
|
|
for (n = way + 1; n < n_ways; n++) {
|
2015-10-13 06:41:38 +00:00
|
|
|
if (pd0[way] != pd0[n])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!dup_pd_silent)
|
|
|
|
pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
|
|
|
|
pd0[way], set, way, n);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* clear entry @way and not @n.
|
|
|
|
* This is critical to our optimised loop
|
|
|
|
*/
|
|
|
|
pd0[way] = 0;
|
|
|
|
write_aux_reg(ARC_REG_TLBINDEX,
|
2013-01-18 09:42:19 +00:00
|
|
|
SET_WAY_TO_IDX(mmu, set, way));
|
2015-10-13 06:41:38 +00:00
|
|
|
__tlb_entry_erase();
|
2013-01-18 09:42:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|