mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
fb2af0020a
Pull ARM updates from Russell King: "This contains the usual updates from other people (listed below) and the usual random muddle of miscellaneous ARM updates which cover some low priority bug fixes and performance improvements. I've started to put the pull request wording into the merge commits, which are: - NoMMU stuff: This includes the following series sent earlier to the list: - nommu-fixes - R7 Support - MPU support I've left out the ARCH_MULTIPLATFORM/!MMU stuff that Arnd and I were discussing today until we've reached a conclusion/that's had some more review. This is rebased (and re-tested) on your devel-stable branch because otherwise there were going to be conflicts with Uwe's V7M work now that you've merged that. I've included the fix for limiting MPU to CPU_V7. - Huge page support These changes bring both HugeTLB support and Transparent HugePage (THP) support to ARM. Only long descriptors (LPAE) are supported in this series. The code has been tested on an Arndale board (Exynos 5250). - LPAE updates Please pull these miscellaneous LPAE fixes I've been collecting for a while now for 3.11. They've been tested and reviewed by quite a few people, and most of the patches are pretty trivial. -- Will Deacon. - arch_timer cleanups Please pull these arch_timer cleanups I've been holding onto for a while. They're the same as my last posting, but have been rebased to v3.10-rc3. - mpidr linearisation (multiprocessor id register - identifies which CPU number we are in the system) This patch series that implements MPIDR linearization through a simple hashing algorithm and updates current cpu_{suspend}/{resume} code to use the newly created hash structures to retrieve context pointers. It represents a stepping stone for the implementation of power management code on forthcoming multi-cluster ARM systems. It has been tested on TC2 (dual cluster A15xA7 system), iMX6q, OMAP4 and Tegra, with processors hitting low-power states requiring warm-boot resume through the cpu_resume code path" * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (77 commits) ARM: 7775/1: mm: Remove do_sect_fault from LPAE code ARM: 7777/1: Avoid extra calls to the C compiler ARM: 7774/1: Fix dtb dependency to use order-only prerequisites ARM: 7770/1: remove residual ARMv2 support from decompressor ARM: 7769/1: Cortex-A15: fix erratum 798181 implementation ARM: 7768/1: prevent risks of out-of-bound access in ASID allocator ARM: 7767/1: let the ASID allocator handle suspended animation ARM: 7766/1: versatile: don't mark pen as __INIT ARM: 7765/1: perf: Record the user-mode PC in the call chain. ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork ARM: kernel: implement stack pointer save array through MPIDR hashing ARM: kernel: build MPIDR hash function data structure ARM: mpu: Ensure that MPU depends on CPU_V7 ARM: mpu: protect the vectors page with an MPU region ARM: mpu: Allow enabling of the MPU via kconfig ARM: 7758/1: introduce config HAS_BANDGAP ARM: 7757/1: mm: don't flush icache in switch_mm with hardware broadcasting ARM: 7751/1: zImage: don't overwrite ourself with a page table ARM: 7749/1: spinlock: retry trylock operation if strex fails on free lock ARM: 7748/1: oabi: handle faults when loading swi instruction from userspace ...
1299 lines
34 KiB
C
1299 lines
34 KiB
C
/*
|
|
* linux/arch/arm/mm/mmu.c
|
|
*
|
|
* Copyright (C) 1995-2005 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/init.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/sizes.h>
|
|
|
|
#include <asm/cp15.h>
|
|
#include <asm/cputype.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/cachetype.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/highmem.h>
|
|
#include <asm/system_info.h>
|
|
#include <asm/traps.h>
|
|
|
|
#include <asm/mach/arch.h>
|
|
#include <asm/mach/map.h>
|
|
#include <asm/mach/pci.h>
|
|
|
|
#include "mm.h"
|
|
#include "tcm.h"
|
|
|
|
/*
|
|
* empty_zero_page is a special page that is used for
|
|
* zero-initialized data and COW.
|
|
*/
|
|
struct page *empty_zero_page;
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
|
|
/*
|
|
* The pmd table for the upper-most set of pages.
|
|
*/
|
|
pmd_t *top_pmd;
|
|
|
|
#define CPOLICY_UNCACHED 0
|
|
#define CPOLICY_BUFFERED 1
|
|
#define CPOLICY_WRITETHROUGH 2
|
|
#define CPOLICY_WRITEBACK 3
|
|
#define CPOLICY_WRITEALLOC 4
|
|
|
|
static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
|
|
static unsigned int ecc_mask __initdata = 0;
|
|
pgprot_t pgprot_user;
|
|
pgprot_t pgprot_kernel;
|
|
pgprot_t pgprot_hyp_device;
|
|
pgprot_t pgprot_s2;
|
|
pgprot_t pgprot_s2_device;
|
|
|
|
EXPORT_SYMBOL(pgprot_user);
|
|
EXPORT_SYMBOL(pgprot_kernel);
|
|
|
|
struct cachepolicy {
|
|
const char policy[16];
|
|
unsigned int cr_mask;
|
|
pmdval_t pmd;
|
|
pteval_t pte;
|
|
pteval_t pte_s2;
|
|
};
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
#define s2_policy(policy) policy
|
|
#else
|
|
#define s2_policy(policy) 0
|
|
#endif
|
|
|
|
static struct cachepolicy cache_policies[] __initdata = {
|
|
{
|
|
.policy = "uncached",
|
|
.cr_mask = CR_W|CR_C,
|
|
.pmd = PMD_SECT_UNCACHED,
|
|
.pte = L_PTE_MT_UNCACHED,
|
|
.pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
|
|
}, {
|
|
.policy = "buffered",
|
|
.cr_mask = CR_C,
|
|
.pmd = PMD_SECT_BUFFERED,
|
|
.pte = L_PTE_MT_BUFFERABLE,
|
|
.pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
|
|
}, {
|
|
.policy = "writethrough",
|
|
.cr_mask = 0,
|
|
.pmd = PMD_SECT_WT,
|
|
.pte = L_PTE_MT_WRITETHROUGH,
|
|
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
|
|
}, {
|
|
.policy = "writeback",
|
|
.cr_mask = 0,
|
|
.pmd = PMD_SECT_WB,
|
|
.pte = L_PTE_MT_WRITEBACK,
|
|
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
|
|
}, {
|
|
.policy = "writealloc",
|
|
.cr_mask = 0,
|
|
.pmd = PMD_SECT_WBWA,
|
|
.pte = L_PTE_MT_WRITEALLOC,
|
|
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
|
|
}
|
|
};
|
|
|
|
#ifdef CONFIG_CPU_CP15
|
|
/*
|
|
* These are useful for identifying cache coherency
|
|
* problems by allowing the cache or the cache and
|
|
* writebuffer to be turned off. (Note: the write
|
|
* buffer should not be on and the cache off).
|
|
*/
|
|
static int __init early_cachepolicy(char *p)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
|
|
int len = strlen(cache_policies[i].policy);
|
|
|
|
if (memcmp(p, cache_policies[i].policy, len) == 0) {
|
|
cachepolicy = i;
|
|
cr_alignment &= ~cache_policies[i].cr_mask;
|
|
cr_no_alignment &= ~cache_policies[i].cr_mask;
|
|
break;
|
|
}
|
|
}
|
|
if (i == ARRAY_SIZE(cache_policies))
|
|
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
|
|
/*
|
|
* This restriction is partly to do with the way we boot; it is
|
|
* unpredictable to have memory mapped using two different sets of
|
|
* memory attributes (shared, type, and cache attribs). We can not
|
|
* change these attributes once the initial assembly has setup the
|
|
* page tables.
|
|
*/
|
|
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
|
|
printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
|
|
cachepolicy = CPOLICY_WRITEBACK;
|
|
}
|
|
flush_cache_all();
|
|
set_cr(cr_alignment);
|
|
return 0;
|
|
}
|
|
early_param("cachepolicy", early_cachepolicy);
|
|
|
|
static int __init early_nocache(char *__unused)
|
|
{
|
|
char *p = "buffered";
|
|
printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
|
|
early_cachepolicy(p);
|
|
return 0;
|
|
}
|
|
early_param("nocache", early_nocache);
|
|
|
|
static int __init early_nowrite(char *__unused)
|
|
{
|
|
char *p = "uncached";
|
|
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
|
|
early_cachepolicy(p);
|
|
return 0;
|
|
}
|
|
early_param("nowb", early_nowrite);
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
static int __init early_ecc(char *p)
|
|
{
|
|
if (memcmp(p, "on", 2) == 0)
|
|
ecc_mask = PMD_PROTECTION;
|
|
else if (memcmp(p, "off", 3) == 0)
|
|
ecc_mask = 0;
|
|
return 0;
|
|
}
|
|
early_param("ecc", early_ecc);
|
|
#endif
|
|
|
|
static int __init noalign_setup(char *__unused)
|
|
{
|
|
cr_alignment &= ~CR_A;
|
|
cr_no_alignment &= ~CR_A;
|
|
set_cr(cr_alignment);
|
|
return 1;
|
|
}
|
|
__setup("noalign", noalign_setup);
|
|
|
|
#ifndef CONFIG_SMP
|
|
void adjust_cr(unsigned long mask, unsigned long set)
|
|
{
|
|
unsigned long flags;
|
|
|
|
mask &= ~CR_A;
|
|
|
|
set &= mask;
|
|
|
|
local_irq_save(flags);
|
|
|
|
cr_no_alignment = (cr_no_alignment & ~mask) | set;
|
|
cr_alignment = (cr_alignment & ~mask) | set;
|
|
|
|
set_cr((get_cr() & ~mask) | set);
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
#endif
|
|
|
|
#else /* ifdef CONFIG_CPU_CP15 */
|
|
|
|
static int __init early_cachepolicy(char *p)
|
|
{
|
|
pr_warning("cachepolicy kernel parameter not supported without cp15\n");
|
|
}
|
|
early_param("cachepolicy", early_cachepolicy);
|
|
|
|
static int __init noalign_setup(char *__unused)
|
|
{
|
|
pr_warning("noalign kernel parameter not supported without cp15\n");
|
|
}
|
|
__setup("noalign", noalign_setup);
|
|
|
|
#endif /* ifdef CONFIG_CPU_CP15 / else */
|
|
|
|
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
|
|
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
|
|
|
|
static struct mem_type mem_types[] = {
|
|
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
|
|
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
|
|
L_PTE_SHARED,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
|
|
.domain = DOMAIN_IO,
|
|
},
|
|
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
|
|
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PROT_SECT_DEVICE,
|
|
.domain = DOMAIN_IO,
|
|
},
|
|
[MT_DEVICE_CACHED] = { /* ioremap_cached */
|
|
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
|
|
.domain = DOMAIN_IO,
|
|
},
|
|
[MT_DEVICE_WC] = { /* ioremap_wc */
|
|
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PROT_SECT_DEVICE,
|
|
.domain = DOMAIN_IO,
|
|
},
|
|
[MT_UNCACHED] = {
|
|
.prot_pte = PROT_PTE_DEVICE,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
|
|
.domain = DOMAIN_IO,
|
|
},
|
|
[MT_CACHECLEAN] = {
|
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
#ifndef CONFIG_ARM_LPAE
|
|
[MT_MINICLEAN] = {
|
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
#endif
|
|
[MT_LOW_VECTORS] = {
|
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
|
L_PTE_RDONLY,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.domain = DOMAIN_USER,
|
|
},
|
|
[MT_HIGH_VECTORS] = {
|
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
|
L_PTE_USER | L_PTE_RDONLY,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.domain = DOMAIN_USER,
|
|
},
|
|
[MT_MEMORY] = {
|
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
[MT_ROM] = {
|
|
.prot_sect = PMD_TYPE_SECT,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
[MT_MEMORY_NONCACHED] = {
|
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
|
L_PTE_MT_BUFFERABLE,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
[MT_MEMORY_DTCM] = {
|
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
|
L_PTE_XN,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
[MT_MEMORY_ITCM] = {
|
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
[MT_MEMORY_SO] = {
|
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
|
L_PTE_MT_UNCACHED | L_PTE_XN,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
|
|
PMD_SECT_UNCACHED | PMD_SECT_XN,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
[MT_MEMORY_DMA_READY] = {
|
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
|
|
.prot_l1 = PMD_TYPE_TABLE,
|
|
.domain = DOMAIN_KERNEL,
|
|
},
|
|
};
|
|
|
|
const struct mem_type *get_mem_type(unsigned int type)
|
|
{
|
|
return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
|
|
}
|
|
EXPORT_SYMBOL(get_mem_type);
|
|
|
|
/*
|
|
* Adjust the PMD section entries according to the CPU in use.
|
|
*/
|
|
static void __init build_mem_type_table(void)
|
|
{
|
|
struct cachepolicy *cp;
|
|
unsigned int cr = get_cr();
|
|
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
|
|
pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
|
|
int cpu_arch = cpu_architecture();
|
|
int i;
|
|
|
|
if (cpu_arch < CPU_ARCH_ARMv6) {
|
|
#if defined(CONFIG_CPU_DCACHE_DISABLE)
|
|
if (cachepolicy > CPOLICY_BUFFERED)
|
|
cachepolicy = CPOLICY_BUFFERED;
|
|
#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
|
|
if (cachepolicy > CPOLICY_WRITETHROUGH)
|
|
cachepolicy = CPOLICY_WRITETHROUGH;
|
|
#endif
|
|
}
|
|
if (cpu_arch < CPU_ARCH_ARMv5) {
|
|
if (cachepolicy >= CPOLICY_WRITEALLOC)
|
|
cachepolicy = CPOLICY_WRITEBACK;
|
|
ecc_mask = 0;
|
|
}
|
|
if (is_smp())
|
|
cachepolicy = CPOLICY_WRITEALLOC;
|
|
|
|
/*
|
|
* Strip out features not present on earlier architectures.
|
|
* Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
|
|
* without extended page tables don't have the 'Shared' bit.
|
|
*/
|
|
if (cpu_arch < CPU_ARCH_ARMv5)
|
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
|
|
mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
|
|
if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
|
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
|
|
mem_types[i].prot_sect &= ~PMD_SECT_S;
|
|
|
|
/*
|
|
* ARMv5 and lower, bit 4 must be set for page tables (was: cache
|
|
* "update-able on write" bit on ARM610). However, Xscale and
|
|
* Xscale3 require this bit to be cleared.
|
|
*/
|
|
if (cpu_is_xscale() || cpu_is_xsc3()) {
|
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
|
mem_types[i].prot_sect &= ~PMD_BIT4;
|
|
mem_types[i].prot_l1 &= ~PMD_BIT4;
|
|
}
|
|
} else if (cpu_arch < CPU_ARCH_ARMv6) {
|
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
|
if (mem_types[i].prot_l1)
|
|
mem_types[i].prot_l1 |= PMD_BIT4;
|
|
if (mem_types[i].prot_sect)
|
|
mem_types[i].prot_sect |= PMD_BIT4;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Mark the device areas according to the CPU/architecture.
|
|
*/
|
|
if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
|
|
if (!cpu_is_xsc3()) {
|
|
/*
|
|
* Mark device regions on ARMv6+ as execute-never
|
|
* to prevent speculative instruction fetches.
|
|
*/
|
|
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
|
|
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
|
|
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
|
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
|
|
}
|
|
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
|
|
/*
|
|
* For ARMv7 with TEX remapping,
|
|
* - shared device is SXCB=1100
|
|
* - nonshared device is SXCB=0100
|
|
* - write combine device mem is SXCB=0001
|
|
* (Uncached Normal memory)
|
|
*/
|
|
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
|
|
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
|
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
|
|
} else if (cpu_is_xsc3()) {
|
|
/*
|
|
* For Xscale3,
|
|
* - shared device is TEXCB=00101
|
|
* - nonshared device is TEXCB=01000
|
|
* - write combine device mem is TEXCB=00100
|
|
* (Inner/Outer Uncacheable in xsc3 parlance)
|
|
*/
|
|
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
|
|
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
|
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
|
|
} else {
|
|
/*
|
|
* For ARMv6 and ARMv7 without TEX remapping,
|
|
* - shared device is TEXCB=00001
|
|
* - nonshared device is TEXCB=01000
|
|
* - write combine device mem is TEXCB=00100
|
|
* (Uncached Normal in ARMv6 parlance).
|
|
*/
|
|
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
|
|
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
|
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
|
|
}
|
|
} else {
|
|
/*
|
|
* On others, write combining is "Uncached/Buffered"
|
|
*/
|
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
|
|
}
|
|
|
|
/*
|
|
* Now deal with the memory-type mappings
|
|
*/
|
|
cp = &cache_policies[cachepolicy];
|
|
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
|
|
s2_pgprot = cp->pte_s2;
|
|
hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
|
|
|
|
/*
|
|
* ARMv6 and above have extended page tables.
|
|
*/
|
|
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
|
|
#ifndef CONFIG_ARM_LPAE
|
|
/*
|
|
* Mark cache clean areas and XIP ROM read only
|
|
* from SVC mode and no access from userspace.
|
|
*/
|
|
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
|
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
|
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
|
#endif
|
|
|
|
if (is_smp()) {
|
|
/*
|
|
* Mark memory with the "shared" attribute
|
|
* for SMP systems
|
|
*/
|
|
user_pgprot |= L_PTE_SHARED;
|
|
kern_pgprot |= L_PTE_SHARED;
|
|
vecs_pgprot |= L_PTE_SHARED;
|
|
s2_pgprot |= L_PTE_SHARED;
|
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
|
|
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
|
|
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
|
|
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
|
|
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
|
|
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
|
|
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
|
|
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
|
|
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Non-cacheable Normal - intended for memory areas that must
|
|
* not cause dirty cache line writebacks when used
|
|
*/
|
|
if (cpu_arch >= CPU_ARCH_ARMv6) {
|
|
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
|
|
/* Non-cacheable Normal is XCB = 001 */
|
|
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
|
|
PMD_SECT_BUFFERED;
|
|
} else {
|
|
/* For both ARMv6 and non-TEX-remapping ARMv7 */
|
|
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
|
|
PMD_SECT_TEX(1);
|
|
}
|
|
} else {
|
|
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
|
|
}
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
/*
|
|
* Do not generate access flag faults for the kernel mappings.
|
|
*/
|
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
|
mem_types[i].prot_pte |= PTE_EXT_AF;
|
|
if (mem_types[i].prot_sect)
|
|
mem_types[i].prot_sect |= PMD_SECT_AF;
|
|
}
|
|
kern_pgprot |= PTE_EXT_AF;
|
|
vecs_pgprot |= PTE_EXT_AF;
|
|
#endif
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
pteval_t v = pgprot_val(protection_map[i]);
|
|
protection_map[i] = __pgprot(v | user_pgprot);
|
|
}
|
|
|
|
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
|
|
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
|
|
|
|
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
|
|
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
|
|
L_PTE_DIRTY | kern_pgprot);
|
|
pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
|
|
pgprot_s2_device = __pgprot(s2_device_pgprot);
|
|
pgprot_hyp_device = __pgprot(hyp_device_pgprot);
|
|
|
|
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
|
|
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
|
|
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
|
|
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
|
|
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
|
|
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
|
|
mem_types[MT_ROM].prot_sect |= cp->pmd;
|
|
|
|
switch (cp->pmd) {
|
|
case PMD_SECT_WT:
|
|
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
|
|
break;
|
|
case PMD_SECT_WB:
|
|
case PMD_SECT_WBWA:
|
|
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
|
|
break;
|
|
}
|
|
printk("Memory policy: ECC %sabled, Data cache %s\n",
|
|
ecc_mask ? "en" : "dis", cp->policy);
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
|
struct mem_type *t = &mem_types[i];
|
|
if (t->prot_l1)
|
|
t->prot_l1 |= PMD_DOMAIN(t->domain);
|
|
if (t->prot_sect)
|
|
t->prot_sect |= PMD_DOMAIN(t->domain);
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
|
|
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
unsigned long size, pgprot_t vma_prot)
|
|
{
|
|
if (!pfn_valid(pfn))
|
|
return pgprot_noncached(vma_prot);
|
|
else if (file->f_flags & O_SYNC)
|
|
return pgprot_writecombine(vma_prot);
|
|
return vma_prot;
|
|
}
|
|
EXPORT_SYMBOL(phys_mem_access_prot);
|
|
#endif
|
|
|
|
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
|
|
|
|
static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
|
|
{
|
|
void *ptr = __va(memblock_alloc(sz, align));
|
|
memset(ptr, 0, sz);
|
|
return ptr;
|
|
}
|
|
|
|
static void __init *early_alloc(unsigned long sz)
|
|
{
|
|
return early_alloc_aligned(sz, sz);
|
|
}
|
|
|
|
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
|
|
{
|
|
if (pmd_none(*pmd)) {
|
|
pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
|
|
__pmd_populate(pmd, __pa(pte), prot);
|
|
}
|
|
BUG_ON(pmd_bad(*pmd));
|
|
return pte_offset_kernel(pmd, addr);
|
|
}
|
|
|
|
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end, unsigned long pfn,
|
|
const struct mem_type *type)
|
|
{
|
|
pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
|
|
do {
|
|
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
|
|
pfn++;
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
}
|
|
|
|
static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end, phys_addr_t phys,
|
|
const struct mem_type *type)
|
|
{
|
|
pmd_t *p = pmd;
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
/*
|
|
* In classic MMU format, puds and pmds are folded in to
|
|
* the pgds. pmd_offset gives the PGD entry. PGDs refer to a
|
|
* group of L1 entries making up one logical pointer to
|
|
* an L2 table (2MB), where as PMDs refer to the individual
|
|
* L1 entries (1MB). Hence increment to get the correct
|
|
* offset for odd 1MB sections.
|
|
* (See arch/arm/include/asm/pgtable-2level.h)
|
|
*/
|
|
if (addr & SECTION_SIZE)
|
|
pmd++;
|
|
#endif
|
|
do {
|
|
*pmd = __pmd(phys | type->prot_sect);
|
|
phys += SECTION_SIZE;
|
|
} while (pmd++, addr += SECTION_SIZE, addr != end);
|
|
|
|
flush_pmd_entry(p);
|
|
}
|
|
|
|
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
|
unsigned long end, phys_addr_t phys,
|
|
const struct mem_type *type)
|
|
{
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
/*
|
|
* With LPAE, we must loop over to map
|
|
* all the pmds for the given range.
|
|
*/
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
/*
|
|
* Try a section mapping - addr, next and phys must all be
|
|
* aligned to a section boundary.
|
|
*/
|
|
if (type->prot_sect &&
|
|
((addr | next | phys) & ~SECTION_MASK) == 0) {
|
|
__map_init_section(pmd, addr, next, phys, type);
|
|
} else {
|
|
alloc_init_pte(pmd, addr, next,
|
|
__phys_to_pfn(phys), type);
|
|
}
|
|
|
|
phys += next - addr;
|
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
}
|
|
|
|
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
|
unsigned long end, phys_addr_t phys,
|
|
const struct mem_type *type)
|
|
{
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
alloc_init_pmd(pud, addr, next, phys, type);
|
|
phys += next - addr;
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
static void __init create_36bit_mapping(struct map_desc *md,
|
|
const struct mem_type *type)
|
|
{
|
|
unsigned long addr, length, end;
|
|
phys_addr_t phys;
|
|
pgd_t *pgd;
|
|
|
|
addr = md->virtual;
|
|
phys = __pfn_to_phys(md->pfn);
|
|
length = PAGE_ALIGN(md->length);
|
|
|
|
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
|
|
printk(KERN_ERR "MM: CPU does not support supersection "
|
|
"mapping for 0x%08llx at 0x%08lx\n",
|
|
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
|
return;
|
|
}
|
|
|
|
/* N.B. ARMv6 supersections are only defined to work with domain 0.
|
|
* Since domain assignments can in fact be arbitrary, the
|
|
* 'domain == 0' check below is required to insure that ARMv6
|
|
* supersections are only allocated for domain 0 regardless
|
|
* of the actual domain assignments in use.
|
|
*/
|
|
if (type->domain) {
|
|
printk(KERN_ERR "MM: invalid domain in supersection "
|
|
"mapping for 0x%08llx at 0x%08lx\n",
|
|
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
|
return;
|
|
}
|
|
|
|
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
|
|
printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
|
|
" at 0x%08lx invalid alignment\n",
|
|
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* Shift bits [35:32] of address into bits [23:20] of PMD
|
|
* (See ARMv6 spec).
|
|
*/
|
|
phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
end = addr + length;
|
|
do {
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
int i;
|
|
|
|
for (i = 0; i < 16; i++)
|
|
*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
|
|
|
|
addr += SUPERSECTION_SIZE;
|
|
phys += SUPERSECTION_SIZE;
|
|
pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
|
|
} while (addr != end);
|
|
}
|
|
#endif /* !CONFIG_ARM_LPAE */
|
|
|
|
/*
|
|
* Create the page directory entries and any necessary
|
|
* page tables for the mapping specified by `md'. We
|
|
* are able to cope here with varying sizes and address
|
|
* offsets, and we take full advantage of sections and
|
|
* supersections.
|
|
*/
|
|
static void __init create_mapping(struct map_desc *md)
|
|
{
|
|
unsigned long addr, length, end;
|
|
phys_addr_t phys;
|
|
const struct mem_type *type;
|
|
pgd_t *pgd;
|
|
|
|
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
|
|
printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
|
|
" at 0x%08lx in user region\n",
|
|
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
|
return;
|
|
}
|
|
|
|
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
|
|
md->virtual >= PAGE_OFFSET &&
|
|
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
|
|
printk(KERN_WARNING "BUG: mapping for 0x%08llx"
|
|
" at 0x%08lx out of vmalloc space\n",
|
|
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
|
}
|
|
|
|
type = &mem_types[md->type];
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
/*
|
|
* Catch 36-bit addresses
|
|
*/
|
|
if (md->pfn >= 0x100000) {
|
|
create_36bit_mapping(md, type);
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
addr = md->virtual & PAGE_MASK;
|
|
phys = __pfn_to_phys(md->pfn);
|
|
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
|
|
|
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
|
|
printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
|
|
"be mapped using pages, ignoring.\n",
|
|
(long long)__pfn_to_phys(md->pfn), addr);
|
|
return;
|
|
}
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
end = addr + length;
|
|
do {
|
|
unsigned long next = pgd_addr_end(addr, end);
|
|
|
|
alloc_init_pud(pgd, addr, next, phys, type);
|
|
|
|
phys += next - addr;
|
|
addr = next;
|
|
} while (pgd++, addr != end);
|
|
}
|
|
|
|
/*
|
|
* Create the architecture specific mappings
|
|
*/
|
|
void __init iotable_init(struct map_desc *io_desc, int nr)
|
|
{
|
|
struct map_desc *md;
|
|
struct vm_struct *vm;
|
|
struct static_vm *svm;
|
|
|
|
if (!nr)
|
|
return;
|
|
|
|
svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
|
|
|
|
for (md = io_desc; nr; md++, nr--) {
|
|
create_mapping(md);
|
|
|
|
vm = &svm->vm;
|
|
vm->addr = (void *)(md->virtual & PAGE_MASK);
|
|
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
|
vm->phys_addr = __pfn_to_phys(md->pfn);
|
|
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
|
vm->flags |= VM_ARM_MTYPE(md->type);
|
|
vm->caller = iotable_init;
|
|
add_static_vm_early(svm++);
|
|
}
|
|
}
|
|
|
|
void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
|
|
void *caller)
|
|
{
|
|
struct vm_struct *vm;
|
|
struct static_vm *svm;
|
|
|
|
svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
|
|
|
|
vm = &svm->vm;
|
|
vm->addr = (void *)addr;
|
|
vm->size = size;
|
|
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
|
|
vm->caller = caller;
|
|
add_static_vm_early(svm);
|
|
}
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
|
|
/*
|
|
* The Linux PMD is made of two consecutive section entries covering 2MB
|
|
* (see definition in include/asm/pgtable-2level.h). However a call to
|
|
* create_mapping() may optimize static mappings by using individual
|
|
* 1MB section mappings. This leaves the actual PMD potentially half
|
|
* initialized if the top or bottom section entry isn't used, leaving it
|
|
* open to problems if a subsequent ioremap() or vmalloc() tries to use
|
|
* the virtual space left free by that unused section entry.
|
|
*
|
|
* Let's avoid the issue by inserting dummy vm entries covering the unused
|
|
* PMD halves once the static mappings are in place.
|
|
*/
|
|
|
|
static void __init pmd_empty_section_gap(unsigned long addr)
|
|
{
|
|
vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
|
|
}
|
|
|
|
static void __init fill_pmd_gaps(void)
|
|
{
|
|
struct static_vm *svm;
|
|
struct vm_struct *vm;
|
|
unsigned long addr, next = 0;
|
|
pmd_t *pmd;
|
|
|
|
list_for_each_entry(svm, &static_vmlist, list) {
|
|
vm = &svm->vm;
|
|
addr = (unsigned long)vm->addr;
|
|
if (addr < next)
|
|
continue;
|
|
|
|
/*
|
|
* Check if this vm starts on an odd section boundary.
|
|
* If so and the first section entry for this PMD is free
|
|
* then we block the corresponding virtual address.
|
|
*/
|
|
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
|
|
pmd = pmd_off_k(addr);
|
|
if (pmd_none(*pmd))
|
|
pmd_empty_section_gap(addr & PMD_MASK);
|
|
}
|
|
|
|
/*
|
|
* Then check if this vm ends on an odd section boundary.
|
|
* If so and the second section entry for this PMD is empty
|
|
* then we block the corresponding virtual address.
|
|
*/
|
|
addr += vm->size;
|
|
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
|
|
pmd = pmd_off_k(addr) + 1;
|
|
if (pmd_none(*pmd))
|
|
pmd_empty_section_gap(addr);
|
|
}
|
|
|
|
/* no need to look at any vm entry until we hit the next PMD */
|
|
next = (addr + PMD_SIZE - 1) & PMD_MASK;
|
|
}
|
|
}
|
|
|
|
#else
|
|
#define fill_pmd_gaps() do { } while (0)
|
|
#endif
|
|
|
|
#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
|
|
static void __init pci_reserve_io(void)
|
|
{
|
|
struct static_vm *svm;
|
|
|
|
svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
|
|
if (svm)
|
|
return;
|
|
|
|
vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
|
|
}
|
|
#else
|
|
#define pci_reserve_io() do { } while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_LL
|
|
void __init debug_ll_io_init(void)
|
|
{
|
|
struct map_desc map;
|
|
|
|
debug_ll_addr(&map.pfn, &map.virtual);
|
|
if (!map.pfn || !map.virtual)
|
|
return;
|
|
map.pfn = __phys_to_pfn(map.pfn);
|
|
map.virtual &= PAGE_MASK;
|
|
map.length = PAGE_SIZE;
|
|
map.type = MT_DEVICE;
|
|
create_mapping(&map);
|
|
}
|
|
#endif
|
|
|
|
static void * __initdata vmalloc_min =
|
|
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
|
|
|
|
/*
|
|
* vmalloc=size forces the vmalloc area to be exactly 'size'
|
|
* bytes. This can be used to increase (or decrease) the vmalloc
|
|
* area - the default is 240m.
|
|
*/
|
|
static int __init early_vmalloc(char *arg)
|
|
{
|
|
unsigned long vmalloc_reserve = memparse(arg, NULL);
|
|
|
|
if (vmalloc_reserve < SZ_16M) {
|
|
vmalloc_reserve = SZ_16M;
|
|
printk(KERN_WARNING
|
|
"vmalloc area too small, limiting to %luMB\n",
|
|
vmalloc_reserve >> 20);
|
|
}
|
|
|
|
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
|
|
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
|
|
printk(KERN_WARNING
|
|
"vmalloc area is too big, limiting to %luMB\n",
|
|
vmalloc_reserve >> 20);
|
|
}
|
|
|
|
vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
|
|
return 0;
|
|
}
|
|
early_param("vmalloc", early_vmalloc);
|
|
|
|
phys_addr_t arm_lowmem_limit __initdata = 0;
|
|
|
|
void __init sanity_check_meminfo(void)
|
|
{
|
|
int i, j, highmem = 0;
|
|
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
|
|
|
|
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
|
|
struct membank *bank = &meminfo.bank[j];
|
|
phys_addr_t size_limit;
|
|
|
|
*bank = meminfo.bank[i];
|
|
size_limit = bank->size;
|
|
|
|
if (bank->start >= vmalloc_limit)
|
|
highmem = 1;
|
|
else
|
|
size_limit = vmalloc_limit - bank->start;
|
|
|
|
bank->highmem = highmem;
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
/*
|
|
* Split those memory banks which are partially overlapping
|
|
* the vmalloc area greatly simplifying things later.
|
|
*/
|
|
if (!highmem && bank->size > size_limit) {
|
|
if (meminfo.nr_banks >= NR_BANKS) {
|
|
printk(KERN_CRIT "NR_BANKS too low, "
|
|
"ignoring high memory\n");
|
|
} else {
|
|
memmove(bank + 1, bank,
|
|
(meminfo.nr_banks - i) * sizeof(*bank));
|
|
meminfo.nr_banks++;
|
|
i++;
|
|
bank[1].size -= size_limit;
|
|
bank[1].start = vmalloc_limit;
|
|
bank[1].highmem = highmem = 1;
|
|
j++;
|
|
}
|
|
bank->size = size_limit;
|
|
}
|
|
#else
|
|
/*
|
|
* Highmem banks not allowed with !CONFIG_HIGHMEM.
|
|
*/
|
|
if (highmem) {
|
|
printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
|
|
"(!CONFIG_HIGHMEM).\n",
|
|
(unsigned long long)bank->start,
|
|
(unsigned long long)bank->start + bank->size - 1);
|
|
continue;
|
|
}
|
|
|
|
/*
|
|
* Check whether this memory bank would partially overlap
|
|
* the vmalloc area.
|
|
*/
|
|
if (bank->size > size_limit) {
|
|
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
|
|
"to -%.8llx (vmalloc region overlap).\n",
|
|
(unsigned long long)bank->start,
|
|
(unsigned long long)bank->start + bank->size - 1,
|
|
(unsigned long long)bank->start + size_limit - 1);
|
|
bank->size = size_limit;
|
|
}
|
|
#endif
|
|
if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
|
|
arm_lowmem_limit = bank->start + bank->size;
|
|
|
|
j++;
|
|
}
|
|
#ifdef CONFIG_HIGHMEM
|
|
if (highmem) {
|
|
const char *reason = NULL;
|
|
|
|
if (cache_is_vipt_aliasing()) {
|
|
/*
|
|
* Interactions between kmap and other mappings
|
|
* make highmem support with aliasing VIPT caches
|
|
* rather difficult.
|
|
*/
|
|
reason = "with VIPT aliasing cache";
|
|
}
|
|
if (reason) {
|
|
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
|
|
reason);
|
|
while (j > 0 && meminfo.bank[j - 1].highmem)
|
|
j--;
|
|
}
|
|
}
|
|
#endif
|
|
meminfo.nr_banks = j;
|
|
high_memory = __va(arm_lowmem_limit - 1) + 1;
|
|
memblock_set_current_limit(arm_lowmem_limit);
|
|
}
|
|
|
|
static inline void prepare_page_table(void)
|
|
{
|
|
unsigned long addr;
|
|
phys_addr_t end;
|
|
|
|
/*
|
|
* Clear out all the mappings below the kernel image.
|
|
*/
|
|
for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
|
|
pmd_clear(pmd_off_k(addr));
|
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
/* The XIP kernel is mapped in the module area -- skip over it */
|
|
addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
|
|
#endif
|
|
for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
|
|
pmd_clear(pmd_off_k(addr));
|
|
|
|
/*
|
|
* Find the end of the first block of lowmem.
|
|
*/
|
|
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
|
|
if (end >= arm_lowmem_limit)
|
|
end = arm_lowmem_limit;
|
|
|
|
/*
|
|
* Clear out all the kernel space mappings, except for the first
|
|
* memory bank, up to the vmalloc region.
|
|
*/
|
|
for (addr = __phys_to_virt(end);
|
|
addr < VMALLOC_START; addr += PMD_SIZE)
|
|
pmd_clear(pmd_off_k(addr));
|
|
}
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
/* the first page is reserved for pgd */
|
|
#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
|
|
PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
|
|
#else
|
|
#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
|
#endif
|
|
|
|
/*
|
|
* Reserve the special regions of memory
|
|
*/
|
|
void __init arm_mm_memblock_reserve(void)
|
|
{
|
|
/*
|
|
* Reserve the page tables. These are already in use,
|
|
* and can only be in node 0.
|
|
*/
|
|
memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
|
|
|
|
#ifdef CONFIG_SA1111
|
|
/*
|
|
* Because of the SA1111 DMA bug, we want to preserve our
|
|
* precious DMA-able memory...
|
|
*/
|
|
memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Set up the device mappings. Since we clear out the page tables for all
|
|
* mappings above VMALLOC_START, we will remove any debug device mappings.
|
|
* This means you have to be careful how you debug this function, or any
|
|
* called function. This means you can't use any function or debugging
|
|
* method which may touch any device, otherwise the kernel _will_ crash.
|
|
*/
|
|
static void __init devicemaps_init(struct machine_desc *mdesc)
|
|
{
|
|
struct map_desc map;
|
|
unsigned long addr;
|
|
void *vectors;
|
|
|
|
/*
|
|
* Allocate the vector page early.
|
|
*/
|
|
vectors = early_alloc(PAGE_SIZE);
|
|
|
|
early_trap_init(vectors);
|
|
|
|
for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
|
|
pmd_clear(pmd_off_k(addr));
|
|
|
|
/*
|
|
* Map the kernel if it is XIP.
|
|
* It is always first in the modulearea.
|
|
*/
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
|
|
map.virtual = MODULES_VADDR;
|
|
map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
|
|
map.type = MT_ROM;
|
|
create_mapping(&map);
|
|
#endif
|
|
|
|
/*
|
|
* Map the cache flushing regions.
|
|
*/
|
|
#ifdef FLUSH_BASE
|
|
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
|
|
map.virtual = FLUSH_BASE;
|
|
map.length = SZ_1M;
|
|
map.type = MT_CACHECLEAN;
|
|
create_mapping(&map);
|
|
#endif
|
|
#ifdef FLUSH_BASE_MINICACHE
|
|
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
|
|
map.virtual = FLUSH_BASE_MINICACHE;
|
|
map.length = SZ_1M;
|
|
map.type = MT_MINICLEAN;
|
|
create_mapping(&map);
|
|
#endif
|
|
|
|
/*
|
|
* Create a mapping for the machine vectors at the high-vectors
|
|
* location (0xffff0000). If we aren't using high-vectors, also
|
|
* create a mapping at the low-vectors virtual address.
|
|
*/
|
|
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
|
|
map.virtual = 0xffff0000;
|
|
map.length = PAGE_SIZE;
|
|
map.type = MT_HIGH_VECTORS;
|
|
create_mapping(&map);
|
|
|
|
if (!vectors_high()) {
|
|
map.virtual = 0;
|
|
map.type = MT_LOW_VECTORS;
|
|
create_mapping(&map);
|
|
}
|
|
|
|
/*
|
|
* Ask the machine support to map in the statically mapped devices.
|
|
*/
|
|
if (mdesc->map_io)
|
|
mdesc->map_io();
|
|
else
|
|
debug_ll_io_init();
|
|
fill_pmd_gaps();
|
|
|
|
/* Reserve fixed i/o space in VMALLOC region */
|
|
pci_reserve_io();
|
|
|
|
/*
|
|
* Finally flush the caches and tlb to ensure that we're in a
|
|
* consistent state wrt the writebuffer. This also ensures that
|
|
* any write-allocated cache lines in the vector page are written
|
|
* back. After this point, we can start to touch devices again.
|
|
*/
|
|
local_flush_tlb_all();
|
|
flush_cache_all();
|
|
}
|
|
|
|
static void __init kmap_init(void)
|
|
{
|
|
#ifdef CONFIG_HIGHMEM
|
|
pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
|
|
PKMAP_BASE, _PAGE_KERNEL_TABLE);
|
|
#endif
|
|
}
|
|
|
|
static void __init map_lowmem(void)
|
|
{
|
|
struct memblock_region *reg;
|
|
|
|
/* Map all the lowmem memory banks. */
|
|
for_each_memblock(memory, reg) {
|
|
phys_addr_t start = reg->base;
|
|
phys_addr_t end = start + reg->size;
|
|
struct map_desc map;
|
|
|
|
if (end > arm_lowmem_limit)
|
|
end = arm_lowmem_limit;
|
|
if (start >= end)
|
|
break;
|
|
|
|
map.pfn = __phys_to_pfn(start);
|
|
map.virtual = __phys_to_virt(start);
|
|
map.length = end - start;
|
|
map.type = MT_MEMORY;
|
|
|
|
create_mapping(&map);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* paging_init() sets up the page tables, initialises the zone memory
|
|
* maps, and sets up the zero page, bad page and bad page tables.
|
|
*/
|
|
void __init paging_init(struct machine_desc *mdesc)
|
|
{
|
|
void *zero_page;
|
|
|
|
memblock_set_current_limit(arm_lowmem_limit);
|
|
|
|
build_mem_type_table();
|
|
prepare_page_table();
|
|
map_lowmem();
|
|
dma_contiguous_remap();
|
|
devicemaps_init(mdesc);
|
|
kmap_init();
|
|
tcm_init();
|
|
|
|
top_pmd = pmd_off_k(0xffff0000);
|
|
|
|
/* allocate the zero page. */
|
|
zero_page = early_alloc(PAGE_SIZE);
|
|
|
|
bootmem_init();
|
|
|
|
empty_zero_page = virt_to_page(zero_page);
|
|
__flush_dcache_page(NULL, empty_zero_page);
|
|
}
|