mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 00:21:32 +00:00
6d0b922545
This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-22-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Brian Cain <bcain@quicinc.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Christoph Hellwig <hch@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
118 lines
3.4 KiB
C
118 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* linux/arch/m68k/mm/sun3mmu.c
|
|
*
|
|
* Implementations of mm routines specific to the sun3 MMU.
|
|
*
|
|
* Moved here 8/20/1999 Sam Creasey
|
|
*
|
|
*/
|
|
|
|
#include <linux/signal.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/init.h>
|
|
#include <linux/memblock.h>
|
|
|
|
#include <asm/setup.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/page.h>
|
|
#include <asm/machdep.h>
|
|
#include <asm/io.h>
|
|
|
|
extern void mmu_emu_init (unsigned long bootmem_end);
|
|
|
|
const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
|
|
|
|
extern unsigned long num_pages;
|
|
|
|
/* For the sun3 we try to follow the i386 paging_init() more closely */
|
|
/* start_mem and end_mem have PAGE_OFFSET added already */
|
|
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
|
|
void __init paging_init(void)
|
|
{
|
|
pgd_t * pg_dir;
|
|
pte_t * pg_table;
|
|
int i;
|
|
unsigned long address;
|
|
unsigned long next_pgtable;
|
|
unsigned long bootmem_end;
|
|
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
|
|
unsigned long size;
|
|
|
|
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
if (!empty_zero_page)
|
|
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
|
__func__, PAGE_SIZE, PAGE_SIZE);
|
|
|
|
address = PAGE_OFFSET;
|
|
pg_dir = swapper_pg_dir;
|
|
memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
|
|
memset (kernel_pg_dir, 0, sizeof (kernel_pg_dir));
|
|
|
|
size = num_pages * sizeof(pte_t);
|
|
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
|
|
|
|
next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
|
|
if (!next_pgtable)
|
|
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
|
__func__, size, PAGE_SIZE);
|
|
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
|
|
|
|
/* Map whole memory from PAGE_OFFSET (0x0E000000) */
|
|
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
|
|
|
|
while (address < (unsigned long)high_memory) {
|
|
pg_table = (pte_t *) __pa (next_pgtable);
|
|
next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
|
|
pgd_val(*pg_dir) = (unsigned long) pg_table;
|
|
pg_dir++;
|
|
|
|
/* now change pg_table to kernel virtual addresses */
|
|
pg_table = (pte_t *) __va ((unsigned long) pg_table);
|
|
for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
|
|
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
|
|
if (address >= (unsigned long)high_memory)
|
|
pte_val (pte) = 0;
|
|
set_pte (pg_table, pte);
|
|
address += PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
mmu_emu_init(bootmem_end);
|
|
|
|
current->mm = NULL;
|
|
|
|
/* memory sizing is a hack stolen from motorola.c.. hope it works for us */
|
|
max_zone_pfn[ZONE_DMA] = ((unsigned long)high_memory) >> PAGE_SHIFT;
|
|
|
|
/* I really wish I knew why the following change made things better... -- Sam */
|
|
free_area_init(max_zone_pfn);
|
|
|
|
|
|
}
|
|
|
|
static const pgprot_t protection_map[16] = {
|
|
[VM_NONE] = PAGE_NONE,
|
|
[VM_READ] = PAGE_READONLY,
|
|
[VM_WRITE] = PAGE_COPY,
|
|
[VM_WRITE | VM_READ] = PAGE_COPY,
|
|
[VM_EXEC] = PAGE_READONLY,
|
|
[VM_EXEC | VM_READ] = PAGE_READONLY,
|
|
[VM_EXEC | VM_WRITE] = PAGE_COPY,
|
|
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
|
|
[VM_SHARED] = PAGE_NONE,
|
|
[VM_SHARED | VM_READ] = PAGE_READONLY,
|
|
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
|
|
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
|
|
[VM_SHARED | VM_EXEC] = PAGE_READONLY,
|
|
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
|
|
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
|
|
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
|
|
};
|
|
DECLARE_VM_GET_PAGE_PROT
|