forked from Minki/linux
6bd33e1ece
The kernel runs in M-mode without using page tables, and thus can't run bare metal without help from additional firmware. Most of the patch is just stubbing out code not needed without page tables, but there is an interesting detail in the signals implementation: - The normal RISC-V syscall ABI only implements rt_sigreturn as VDSO entry point, but the ELF VDSO is not supported for nommu Linux. We instead copy the code to call the syscall onto the stack. In addition to enabling the nommu code a new defconfig for a small kernel image that can run in nommu mode on qemu is also provided, to run a kernel in qemu you can use the following command line: qemu-system-riscv64 -smp 2 -m 64 -machine virt -nographic \ -kernel arch/riscv/boot/loader \ -drive file=rootfs.ext2,format=raw,id=hd0 \ -device virtio-blk-device,drive=hd0 Contains contributions from Damien Le Moal <Damien.LeMoal@wdc.com>. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Anup Patel <anup@brainfault.org> [paul.walmsley@sifive.com: updated to apply; add CONFIG_MMU guards around PCI_IOBASE definition to fix build issues; fixed checkpatch issues; move the PCI_IO_* and VMEMMAP address space macros along with the others; resolve sparse warning] Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
68 lines
1.9 KiB
C
68 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
* Copyright (C) 2017 SiFive
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/mmu_context.h>
|
|
|
|
/*
|
|
* When necessary, performs a deferred icache flush for the given MM context,
|
|
* on the local CPU. RISC-V has no direct mechanism for instruction cache
|
|
* shoot downs, so instead we send an IPI that informs the remote harts they
|
|
* need to flush their local instruction caches. To avoid pathologically slow
|
|
* behavior in a common case (a bunch of single-hart processes on a many-hart
|
|
* machine, ie 'make -j') we avoid the IPIs for harts that are not currently
|
|
* executing a MM context and instead schedule a deferred local instruction
|
|
* cache flush to be performed before execution resumes on each hart. This
|
|
* actually performs that local instruction cache flush, which implicitly only
|
|
* refers to the current hart.
|
|
*/
|
|
static inline void flush_icache_deferred(struct mm_struct *mm)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
unsigned int cpu = smp_processor_id();
|
|
cpumask_t *mask = &mm->context.icache_stale_mask;
|
|
|
|
if (cpumask_test_cpu(cpu, mask)) {
|
|
cpumask_clear_cpu(cpu, mask);
|
|
/*
|
|
* Ensure the remote hart's writes are visible to this hart.
|
|
* This pairs with a barrier in flush_icache_mm.
|
|
*/
|
|
smp_mb();
|
|
local_flush_icache_all();
|
|
}
|
|
|
|
#endif
|
|
}
|
|
|
|
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *task)
|
|
{
|
|
unsigned int cpu;
|
|
|
|
if (unlikely(prev == next))
|
|
return;
|
|
|
|
/*
|
|
* Mark the current MM context as inactive, and the next as
|
|
* active. This is at least used by the icache flushing
|
|
* routines in order to determine who should be flushed.
|
|
*/
|
|
cpu = smp_processor_id();
|
|
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
|
|
#ifdef CONFIG_MMU
|
|
csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
|
|
local_flush_tlb_all();
|
|
#endif
|
|
|
|
flush_icache_deferred(next);
|
|
}
|