2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2011-06-04 08:06:11 +00:00
|
|
|
/*
|
|
|
|
* OpenRISC idle.c
|
|
|
|
*
|
|
|
|
* Linux architectural port borrowing liberally from similar works of
|
|
|
|
* others. All original copyrights apply as per the original source
|
|
|
|
* declaration.
|
|
|
|
*
|
|
|
|
* Modifications for the OpenRISC architecture:
|
|
|
|
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
|
|
|
|
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/smp.h>
|
2018-10-30 22:09:49 +00:00
|
|
|
#include <linux/memblock.h>
|
2011-06-04 08:06:11 +00:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/dma.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
|
|
#include <asm/tlbflush.h>
|
2013-04-29 22:06:45 +00:00
|
|
|
#include <asm/sections.h>
|
2011-06-04 08:06:11 +00:00
|
|
|
|
|
|
|
int mem_init_done;
|
|
|
|
|
|
|
|
static void __init zone_sizes_init(void)
|
|
|
|
{
|
2020-06-03 22:57:06 +00:00
|
|
|
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
|
2011-06-04 08:06:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We use only ZONE_NORMAL
|
|
|
|
*/
|
2020-06-03 22:57:06 +00:00
|
|
|
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
|
2011-06-04 08:06:11 +00:00
|
|
|
|
2020-06-03 22:57:06 +00:00
|
|
|
free_area_init(max_zone_pfn);
|
2011-06-04 08:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
extern const char _s_kernel_ro[], _e_kernel_ro[];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map all physical memory into kernel's address space.
|
|
|
|
*
|
|
|
|
* This is explicitly coded for two-level page tables, so if you need
|
|
|
|
* something else then this needs to change.
|
|
|
|
*/
|
|
|
|
static void __init map_ram(void)
|
|
|
|
{
|
2020-10-13 23:58:08 +00:00
|
|
|
phys_addr_t start, end;
|
2011-06-04 08:06:11 +00:00
|
|
|
unsigned long v, p, e;
|
|
|
|
pgprot_t prot;
|
|
|
|
pgd_t *pge;
|
2020-06-04 23:46:39 +00:00
|
|
|
p4d_t *p4e;
|
2011-06-04 08:06:11 +00:00
|
|
|
pud_t *pue;
|
|
|
|
pmd_t *pme;
|
|
|
|
pte_t *pte;
|
2020-10-13 23:58:08 +00:00
|
|
|
u64 i;
|
2011-06-04 08:06:11 +00:00
|
|
|
/* These mark extents of read-only kernel pages...
|
|
|
|
* ...from vmlinux.lds.S
|
|
|
|
*/
|
|
|
|
|
|
|
|
v = PAGE_OFFSET;
|
|
|
|
|
2020-10-13 23:58:08 +00:00
|
|
|
for_each_mem_range(i, &start, &end) {
|
|
|
|
p = (u32) start & PAGE_MASK;
|
|
|
|
e = (u32) end;
|
2011-06-04 08:06:11 +00:00
|
|
|
|
|
|
|
v = (u32) __va(p);
|
|
|
|
pge = pgd_offset_k(v);
|
|
|
|
|
|
|
|
while (p < e) {
|
|
|
|
int j;
|
2020-06-04 23:46:39 +00:00
|
|
|
p4e = p4d_offset(pge, v);
|
|
|
|
pue = pud_offset(p4e, v);
|
2011-06-04 08:06:11 +00:00
|
|
|
pme = pmd_offset(pue, v);
|
|
|
|
|
|
|
|
if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
|
|
|
|
panic("%s: OR1K kernel hardcoded for "
|
|
|
|
"two-level page tables",
|
|
|
|
__func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Alloc one page for holding PTE's... */
|
openrisc: prefer memblock APIs returning virtual address
Patch series "Refine memblock API", v2.
Current memblock API is quite extensive and, which is more annoying,
duplicated. Except the low-level functions that allow searching for a
free memory region and marking it as reserved, memblock provides three
(well, two and a half) sets of functions to allocate memory.
There are several overlapping functions that return a physical address
and there are functions that return virtual address. Those that return
the virtual address may also clear the allocated memory. And, on top of
all that, some allocators panic and some return NULL in case of error.
This set tries to reduce the mess, and trim down the amount of memblock
allocation methods.
Patches 1-10 consolidate the functions that return physical address of
the allocated memory
Patches 11-13 are some trivial cleanups
Patches 14-19 add checks for the return value of memblock_alloc*() and
panics in case of errors. The patches 14-18 include some minor
refactoring to have better readability of the resulting code and patch
19 is a mechanical addition of
if (!ptr)
panic();
after memblock_alloc*() calls.
And, finally, patches 20 and 21 remove panic() calls memblock and
_nopanic variants from memblock.
This patch (of 21):
The allocation of the page tables memory in openrics uses
memblock_phys_alloc() and then converts the returned physical address to
virtual one. Use memblock_alloc_raw() and add a panic() if the
allocation fails.
Link: http://lkml.kernel.org/r/1548057848-15136-2-git-send-email-rppt@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Stafford Horne <shorne@gmail.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Guo Ren <ren_guo@c-sky.com> [c-sky]
Cc: Juergen Gross <jgross@suse.com> [Xen]
Cc: Rob Herring <robh@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 06:28:55 +00:00
|
|
|
pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
|
|
|
|
if (!pte)
|
|
|
|
panic("%s: Failed to allocate page for PTEs\n",
|
|
|
|
__func__);
|
2011-06-04 08:06:11 +00:00
|
|
|
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
|
|
|
|
|
|
|
|
/* Fill the newly allocated page with PTE'S */
|
2014-01-10 22:17:38 +00:00
|
|
|
for (j = 0; p < e && j < PTRS_PER_PTE;
|
2011-06-04 08:06:11 +00:00
|
|
|
v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
|
|
|
|
if (v >= (u32) _e_kernel_ro ||
|
|
|
|
v < (u32) _s_kernel_ro)
|
|
|
|
prot = PAGE_KERNEL;
|
|
|
|
else
|
|
|
|
prot = PAGE_KERNEL_RO;
|
|
|
|
|
|
|
|
set_pte(pte, mk_pte_phys(p, prot));
|
|
|
|
}
|
|
|
|
|
|
|
|
pge++;
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
|
2021-05-09 09:11:02 +00:00
|
|
|
start, end);
|
2011-06-04 08:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init paging_init(void)
|
|
|
|
{
|
|
|
|
extern void tlb_init(void);
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
printk(KERN_INFO "Setting up paging and PTEs.\n");
|
|
|
|
|
|
|
|
/* clear out the init_mm.pgd that will contain the kernel's mappings */
|
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_PGD; i++)
|
|
|
|
swapper_pg_dir[i] = __pgd(0);
|
|
|
|
|
|
|
|
/* make sure the current pgd table points to something sane
|
|
|
|
* (even if it is most probably not used until the next
|
|
|
|
* switch_mm)
|
|
|
|
*/
|
2014-05-11 18:49:34 +00:00
|
|
|
current_pgd[smp_processor_id()] = init_mm.pgd;
|
2011-06-04 08:06:11 +00:00
|
|
|
|
|
|
|
map_ram();
|
|
|
|
|
|
|
|
zone_sizes_init();
|
|
|
|
|
|
|
|
/* self modifying code ;) */
|
|
|
|
/* Since the old TLB miss handler has been running up until now,
|
|
|
|
* the kernel pages are still all RW, so we can still modify the
|
|
|
|
* text directly... after this change and a TLB flush, the kernel
|
|
|
|
* pages will become RO.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
extern unsigned long dtlb_miss_handler;
|
|
|
|
extern unsigned long itlb_miss_handler;
|
|
|
|
|
|
|
|
unsigned long *dtlb_vector = __va(0x900);
|
|
|
|
unsigned long *itlb_vector = __va(0xa00);
|
|
|
|
|
2013-02-14 06:42:30 +00:00
|
|
|
printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
|
|
|
|
*itlb_vector = ((unsigned long)&itlb_miss_handler -
|
|
|
|
(unsigned long)itlb_vector) >> 2;
|
|
|
|
|
|
|
|
/* Soft ordering constraint to ensure that dtlb_vector is
|
|
|
|
* the last thing updated
|
|
|
|
*/
|
|
|
|
barrier();
|
|
|
|
|
2011-06-04 08:06:11 +00:00
|
|
|
printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
|
|
|
|
*dtlb_vector = ((unsigned long)&dtlb_miss_handler -
|
|
|
|
(unsigned long)dtlb_vector) >> 2;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-02-14 06:42:30 +00:00
|
|
|
/* Soft ordering constraint to ensure that cache invalidation and
|
|
|
|
* TLB flush really happen _after_ code has been modified.
|
|
|
|
*/
|
|
|
|
barrier();
|
|
|
|
|
2011-06-04 08:06:11 +00:00
|
|
|
/* Invalidate instruction caches after code modification */
|
|
|
|
mtspr(SPR_ICBIR, 0x900);
|
|
|
|
mtspr(SPR_ICBIR, 0xa00);
|
|
|
|
|
|
|
|
/* New TLB miss handlers and kernel page tables are in now place.
|
|
|
|
* Make sure that page flags get updated for all pages in TLB by
|
|
|
|
* flushing the TLB and forcing all TLB entries to be recreated
|
|
|
|
* from their page table flags.
|
|
|
|
*/
|
|
|
|
flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* References to section boundaries */
|
|
|
|
|
|
|
|
void __init mem_init(void)
|
|
|
|
{
|
2011-08-02 10:35:03 +00:00
|
|
|
BUG_ON(!mem_map);
|
2011-06-04 08:06:11 +00:00
|
|
|
|
2013-07-03 22:04:06 +00:00
|
|
|
max_mapnr = max_low_pfn;
|
2011-06-04 08:06:11 +00:00
|
|
|
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
|
|
|
|
|
|
|
|
/* clear the zero-page */
|
|
|
|
memset((void *)empty_zero_page, 0, PAGE_SIZE);
|
|
|
|
|
2013-07-03 22:04:06 +00:00
|
|
|
/* this will put all low memory onto the freelists */
|
2018-10-30 22:09:30 +00:00
|
|
|
memblock_free_all();
|
2011-06-04 08:06:11 +00:00
|
|
|
|
|
|
|
printk("mem_init_done ...........................................\n");
|
|
|
|
mem_init_done = 1;
|
|
|
|
return;
|
|
|
|
}
|
2022-07-11 07:05:44 +00:00
|
|
|
|
|
|
|
static const pgprot_t protection_map[16] = {
|
|
|
|
[VM_NONE] = PAGE_NONE,
|
|
|
|
[VM_READ] = PAGE_READONLY_X,
|
|
|
|
[VM_WRITE] = PAGE_COPY,
|
|
|
|
[VM_WRITE | VM_READ] = PAGE_COPY_X,
|
|
|
|
[VM_EXEC] = PAGE_READONLY,
|
|
|
|
[VM_EXEC | VM_READ] = PAGE_READONLY_X,
|
|
|
|
[VM_EXEC | VM_WRITE] = PAGE_COPY,
|
|
|
|
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
|
|
|
|
[VM_SHARED] = PAGE_NONE,
|
|
|
|
[VM_SHARED | VM_READ] = PAGE_READONLY_X,
|
|
|
|
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
|
|
|
|
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
|
|
|
|
[VM_SHARED | VM_EXEC] = PAGE_READONLY,
|
|
|
|
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
|
|
|
|
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
|
|
|
|
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
|
|
|
|
};
|
|
|
|
DECLARE_VM_GET_PAGE_PROT
|