mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 09:41:44 +00:00
e40678559f
Flushing the cache is needed for the hardware to see the idmap table and therefore can be done at init time. On ARMv7 it is not necessary to flush L2 so flush_cache_louis() is used here instead. There is no point flushing the cache in setup_mm_for_reboot() as the caller should, and already is, taking care of this. If switching the memory map requires a cache flush, then cpu_switch_mm() already includes that operation. What is not done by cpu_switch_mm() on ASID capable CPUs is TLB flushing as the whole point of the ASID is to tag the TLBs and avoid flushing them on a context switch. Since we don't have a clean ASID for the identity mapping, we need to flush the TLB explicitly in that case. Otherwise this is already performed by cpu_switch_mm(). Signed-off-by: Nicolas Pitre <nico@linaro.org> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
121 lines
3.0 KiB
C
121 lines
3.0 KiB
C
#include <linux/kernel.h>
|
|
|
|
#include <asm/cputype.h>
|
|
#include <asm/idmap.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/system_info.h>
|
|
|
|
pgd_t *idmap_pgd;
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
|
unsigned long prot)
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
|
|
if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
|
|
pmd = pmd_alloc_one(&init_mm, addr);
|
|
if (!pmd) {
|
|
pr_warning("Failed to allocate identity pmd.\n");
|
|
return;
|
|
}
|
|
pud_populate(&init_mm, pud, pmd);
|
|
pmd += pmd_index(addr);
|
|
} else
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
*pmd = __pmd((addr & PMD_MASK) | prot);
|
|
flush_pmd_entry(pmd);
|
|
} while (pmd++, addr = next, addr != end);
|
|
}
|
|
#else /* !CONFIG_ARM_LPAE */
|
|
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
|
unsigned long prot)
|
|
{
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
|
addr = (addr & PMD_MASK) | prot;
|
|
pmd[0] = __pmd(addr);
|
|
addr += SECTION_SIZE;
|
|
pmd[1] = __pmd(addr);
|
|
flush_pmd_entry(pmd);
|
|
}
|
|
#endif /* CONFIG_ARM_LPAE */
|
|
|
|
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|
unsigned long prot)
|
|
{
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
idmap_add_pmd(pud, addr, next, prot);
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
|
|
{
|
|
unsigned long prot, next;
|
|
|
|
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
|
|
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
|
|
prot |= PMD_BIT4;
|
|
|
|
pgd += pgd_index(addr);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
idmap_add_pud(pgd, addr, next, prot);
|
|
} while (pgd++, addr = next, addr != end);
|
|
}
|
|
|
|
extern char __idmap_text_start[], __idmap_text_end[];
|
|
|
|
static int __init init_static_idmap(void)
|
|
{
|
|
phys_addr_t idmap_start, idmap_end;
|
|
|
|
idmap_pgd = pgd_alloc(&init_mm);
|
|
if (!idmap_pgd)
|
|
return -ENOMEM;
|
|
|
|
/* Add an identity mapping for the physical address of the section. */
|
|
idmap_start = virt_to_phys((void *)__idmap_text_start);
|
|
idmap_end = virt_to_phys((void *)__idmap_text_end);
|
|
|
|
pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
|
|
(long long)idmap_start, (long long)idmap_end);
|
|
identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
|
|
|
|
/* Flush L1 for the hardware to see this page table content */
|
|
flush_cache_louis();
|
|
|
|
return 0;
|
|
}
|
|
early_initcall(init_static_idmap);
|
|
|
|
/*
|
|
* In order to soft-boot, we need to switch to a 1:1 mapping for the
|
|
* cpu_reset functions. This will then ensure that we have predictable
|
|
* results when turning off the mmu.
|
|
*/
|
|
void setup_mm_for_reboot(void)
|
|
{
|
|
/* Switch to the identity mapping. */
|
|
cpu_switch_mm(idmap_pgd, &init_mm);
|
|
|
|
#ifdef CONFIG_CPU_HAS_ASID
|
|
/*
|
|
* We don't have a clean ASID for the identity mapping, which
|
|
* may clash with virtual addresses of the previous page tables
|
|
* and therefore potentially in the TLB.
|
|
*/
|
|
local_flush_tlb_all();
|
|
#endif
|
|
}
|