mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 20:01:55 +00:00
1f62ed00a5
C-SKY MMU would pre-fetch invalid pte entries, and it could work with flush_tlb_fix_spurious_fault, but the additional page fault exceptions would reduce performance. So flushing the entry of the TLB would prevent the following spurious page faults. Here is the test code: define DATA_LEN 4096 define COPY_NUM (504*100) unsigned char src[DATA_LEN*COPY_NUM] = {0}; unsigned char dst[DATA_LEN*COPY_NUM] = {0}; unsigned char func_src[DATA_LEN*COPY_NUM] = {0}; unsigned char func_dst[DATA_LEN*COPY_NUM] = {0}; void main(void) { int j; for (j = 0; j < COPY_NUM; j++) memcpy(&dst[j*DATA_LEN], &src[j*DATA_LEN], 4); } perf stat -e page-faults ./main.elf The amount of page fault traps would be reduced in half with the patch. Signed-off-by: Guo Ren <guoren@linux.alibaba.com> Signed-off-by: Guo Ren <guoren@kernel.org>
92 lines
2.0 KiB
C
92 lines
2.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
|
|
|
#include <linux/cache.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|
pte_t *pte)
|
|
{
|
|
unsigned long addr;
|
|
struct page *page;
|
|
|
|
flush_tlb_page(vma, address);
|
|
|
|
if (!pfn_valid(pte_pfn(*pte)))
|
|
return;
|
|
|
|
page = pfn_to_page(pte_pfn(*pte));
|
|
if (page == ZERO_PAGE(0))
|
|
return;
|
|
|
|
if (test_and_set_bit(PG_dcache_clean, &page->flags))
|
|
return;
|
|
|
|
addr = (unsigned long) kmap_atomic(page);
|
|
|
|
dcache_wb_range(addr, addr + PAGE_SIZE);
|
|
|
|
if (vma->vm_flags & VM_EXEC)
|
|
icache_inv_range(addr, addr + PAGE_SIZE);
|
|
|
|
kunmap_atomic((void *) addr);
|
|
}
|
|
|
|
void flush_icache_deferred(struct mm_struct *mm)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
cpumask_t *mask = &mm->context.icache_stale_mask;
|
|
|
|
if (cpumask_test_cpu(cpu, mask)) {
|
|
cpumask_clear_cpu(cpu, mask);
|
|
/*
|
|
* Ensure the remote hart's writes are visible to this hart.
|
|
* This pairs with a barrier in flush_icache_mm.
|
|
*/
|
|
smp_mb();
|
|
local_icache_inv_all(NULL);
|
|
}
|
|
}
|
|
|
|
void flush_icache_mm_range(struct mm_struct *mm,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
unsigned int cpu;
|
|
cpumask_t others, *mask;
|
|
|
|
preempt_disable();
|
|
|
|
#ifdef CONFIG_CPU_HAS_ICACHE_INS
|
|
if (mm == current->mm) {
|
|
icache_inv_range(start, end);
|
|
preempt_enable();
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
/* Mark every hart's icache as needing a flush for this MM. */
|
|
mask = &mm->context.icache_stale_mask;
|
|
cpumask_setall(mask);
|
|
|
|
/* Flush this hart's I$ now, and mark it as flushed. */
|
|
cpu = smp_processor_id();
|
|
cpumask_clear_cpu(cpu, mask);
|
|
local_icache_inv_all(NULL);
|
|
|
|
/*
|
|
* Flush the I$ of other harts concurrently executing, and mark them as
|
|
* flushed.
|
|
*/
|
|
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
|
|
|
if (mm != current->active_mm || !cpumask_empty(&others)) {
|
|
on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
|
|
cpumask_clear(mask);
|
|
}
|
|
|
|
preempt_enable();
|
|
}
|