2012-03-05 11:49:28 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/mm/proc.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
|
|
|
#include <asm/hwcap.h>
|
|
|
|
#include <asm/pgtable-hwdef.h>
|
|
|
|
#include <asm/pgtable.h>
|
2016-02-25 01:44:57 +00:00
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
#include <asm/alternative.h>
|
2012-03-05 11:49:28 +00:00
|
|
|
|
|
|
|
#include "proc-macros.S"
|
|
|
|
|
2014-04-02 16:55:40 +00:00
|
|
|
#ifdef CONFIG_ARM64_64K_PAGES
|
|
|
|
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
|
2015-10-19 13:19:37 +00:00
|
|
|
#elif defined(CONFIG_ARM64_16K_PAGES)
|
|
|
|
#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K
|
|
|
|
#else /* CONFIG_ARM64_4K_PAGES */
|
2014-04-02 16:55:40 +00:00
|
|
|
#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define TCR_SMP_FLAGS TCR_SHARED
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2014-04-02 16:55:40 +00:00
|
|
|
/* PTWs cacheable, inner/outer WBWA */
|
|
|
|
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cpu_do_idle()
|
|
|
|
*
|
|
|
|
* Idle the processor (wait for interrupt).
|
|
|
|
*/
|
|
|
|
ENTRY(cpu_do_idle)
|
|
|
|
dsb sy // WFI may enter a low-power mode
|
|
|
|
wfi
|
|
|
|
ret
|
|
|
|
ENDPROC(cpu_do_idle)
|
|
|
|
|
2015-01-26 18:33:44 +00:00
|
|
|
#ifdef CONFIG_CPU_PM
|
2013-07-17 09:14:45 +00:00
|
|
|
/**
|
|
|
|
* cpu_do_suspend - save CPU registers context
|
|
|
|
*
|
|
|
|
* x0: virtual address of context pointer
|
|
|
|
*/
|
|
|
|
ENTRY(cpu_do_suspend)
|
|
|
|
mrs x2, tpidr_el0
|
|
|
|
mrs x3, tpidrro_el0
|
|
|
|
mrs x4, contextidr_el1
|
|
|
|
mrs x5, mair_el1
|
|
|
|
mrs x6, cpacr_el1
|
|
|
|
mrs x7, ttbr1_el1
|
|
|
|
mrs x8, tcr_el1
|
|
|
|
mrs x9, vbar_el1
|
|
|
|
mrs x10, mdscr_el1
|
|
|
|
mrs x11, oslsr_el1
|
|
|
|
mrs x12, sctlr_el1
|
|
|
|
stp x2, x3, [x0]
|
|
|
|
stp x4, x5, [x0, #16]
|
|
|
|
stp x6, x7, [x0, #32]
|
|
|
|
stp x8, x9, [x0, #48]
|
|
|
|
stp x10, x11, [x0, #64]
|
|
|
|
str x12, [x0, #80]
|
|
|
|
ret
|
|
|
|
ENDPROC(cpu_do_suspend)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cpu_do_resume - restore CPU register context
|
|
|
|
*
|
|
|
|
* x0: Physical address of context pointer
|
|
|
|
* x1: ttbr0_el1 to be restored
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* sctlr_el1 value in x0
|
|
|
|
*/
|
|
|
|
ENTRY(cpu_do_resume)
|
|
|
|
/*
|
|
|
|
* Invalidate local tlb entries before turning on MMU
|
|
|
|
*/
|
|
|
|
tlbi vmalle1
|
|
|
|
ldp x2, x3, [x0]
|
|
|
|
ldp x4, x5, [x0, #16]
|
|
|
|
ldp x6, x7, [x0, #32]
|
|
|
|
ldp x8, x9, [x0, #48]
|
|
|
|
ldp x10, x11, [x0, #64]
|
|
|
|
ldr x12, [x0, #80]
|
|
|
|
msr tpidr_el0, x2
|
|
|
|
msr tpidrro_el0, x3
|
|
|
|
msr contextidr_el1, x4
|
|
|
|
msr mair_el1, x5
|
|
|
|
msr cpacr_el1, x6
|
|
|
|
msr ttbr0_el1, x1
|
|
|
|
msr ttbr1_el1, x7
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
tcr_set_idmap_t0sz x8, x7
|
2013-07-17 09:14:45 +00:00
|
|
|
msr tcr_el1, x8
|
|
|
|
msr vbar_el1, x9
|
|
|
|
msr mdscr_el1, x10
|
|
|
|
/*
|
|
|
|
* Restore oslsr_el1 by writing oslar_el1
|
|
|
|
*/
|
|
|
|
ubfx x11, x11, #1, #1
|
|
|
|
msr oslar_el1, x11
|
2016-01-13 14:50:03 +00:00
|
|
|
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
2013-07-17 09:14:45 +00:00
|
|
|
mov x0, x12
|
|
|
|
dsb nsh // Make sure local tlb invalidation completed
|
|
|
|
isb
|
|
|
|
ret
|
|
|
|
ENDPROC(cpu_do_resume)
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
/*
|
2014-01-27 07:19:32 +00:00
|
|
|
* cpu_do_switch_mm(pgd_phys, tsk)
|
2012-03-05 11:49:28 +00:00
|
|
|
*
|
|
|
|
* Set the translation table base pointer to be pgd_phys.
|
|
|
|
*
|
|
|
|
* - pgd_phys - physical address of new TTB
|
|
|
|
*/
|
|
|
|
ENTRY(cpu_do_switch_mm)
|
2015-10-06 17:46:24 +00:00
|
|
|
mmid x1, x1 // get mm->context.id
|
2012-03-05 11:49:28 +00:00
|
|
|
bfi x0, x1, #48, #16 // set the ASID
|
|
|
|
msr ttbr0_el1, x0 // set TTBR0
|
|
|
|
isb
|
2016-02-25 01:44:57 +00:00
|
|
|
alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
|
2012-03-05 11:49:28 +00:00
|
|
|
ret
|
2016-02-25 01:44:57 +00:00
|
|
|
nop
|
|
|
|
nop
|
|
|
|
nop
|
|
|
|
alternative_else
|
|
|
|
ic iallu
|
|
|
|
dsb nsh
|
|
|
|
isb
|
|
|
|
ret
|
|
|
|
alternative_endif
|
2012-03-05 11:49:28 +00:00
|
|
|
ENDPROC(cpu_do_switch_mm)
|
|
|
|
|
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
|
|
|
.pushsection ".idmap.text", "ax"
|
|
|
|
/*
|
|
|
|
* void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
|
|
|
|
*
|
|
|
|
* This is the low-level counterpart to cpu_replace_ttbr1, and should not be
|
|
|
|
* called by anything else. It can only be executed from a TTBR0 mapping.
|
|
|
|
*/
|
|
|
|
ENTRY(idmap_cpu_replace_ttbr1)
|
|
|
|
mrs x2, daif
|
|
|
|
msr daifset, #0xf
|
|
|
|
|
|
|
|
adrp x1, empty_zero_page
|
|
|
|
msr ttbr1_el1, x1
|
|
|
|
isb
|
|
|
|
|
|
|
|
tlbi vmalle1
|
|
|
|
dsb nsh
|
|
|
|
isb
|
|
|
|
|
|
|
|
msr ttbr1_el1, x0
|
|
|
|
isb
|
|
|
|
|
|
|
|
msr daif, x2
|
|
|
|
|
|
|
|
ret
|
|
|
|
ENDPROC(idmap_cpu_replace_ttbr1)
|
|
|
|
.popsection
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
/*
|
|
|
|
* __cpu_setup
|
|
|
|
*
|
|
|
|
* Initialise the processor for turning the MMU on. Return in x0 the
|
|
|
|
* value of the SCTLR_EL1 register.
|
|
|
|
*/
|
|
|
|
ENTRY(__cpu_setup)
|
2015-10-06 17:46:22 +00:00
|
|
|
tlbi vmalle1 // Invalidate local TLB
|
|
|
|
dsb nsh
|
2012-03-05 11:49:28 +00:00
|
|
|
|
|
|
|
mov x0, #3 << 20
|
|
|
|
msr cpacr_el1, x0 // Enable FP/ASIMD
|
2015-08-20 10:47:13 +00:00
|
|
|
mov x0, #1 << 12 // Reset mdscr_el1 and disable
|
|
|
|
msr mdscr_el1, x0 // access to the DCC from EL0
|
2016-01-13 14:50:03 +00:00
|
|
|
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
2012-03-05 11:49:28 +00:00
|
|
|
/*
|
|
|
|
* Memory region attributes for LPAE:
|
|
|
|
*
|
|
|
|
* n = AttrIndx[2:0]
|
|
|
|
* n MAIR
|
|
|
|
* DEVICE_nGnRnE 000 00000000
|
|
|
|
* DEVICE_nGnRE 001 00000100
|
|
|
|
* DEVICE_GRE 010 00001100
|
|
|
|
* NORMAL_NC 011 01000100
|
|
|
|
* NORMAL 100 11111111
|
2015-08-07 08:36:59 +00:00
|
|
|
* NORMAL_WT 101 10111011
|
2012-03-05 11:49:28 +00:00
|
|
|
*/
|
|
|
|
ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
|
|
|
|
MAIR(0x04, MT_DEVICE_nGnRE) | \
|
|
|
|
MAIR(0x0c, MT_DEVICE_GRE) | \
|
|
|
|
MAIR(0x44, MT_NORMAL_NC) | \
|
2015-08-07 08:36:59 +00:00
|
|
|
MAIR(0xff, MT_NORMAL) | \
|
|
|
|
MAIR(0xbb, MT_NORMAL_WT)
|
2012-03-05 11:49:28 +00:00
|
|
|
msr mair_el1, x5
|
|
|
|
/*
|
|
|
|
* Prepare SCTLR
|
|
|
|
*/
|
|
|
|
adr x5, crval
|
|
|
|
ldp w5, w6, [x5]
|
|
|
|
mrs x0, sctlr_el1
|
|
|
|
bic x0, x0, x5 // clear bits
|
|
|
|
orr x0, x0, x6 // set bits
|
|
|
|
/*
|
|
|
|
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
|
|
|
|
* both user and kernel.
|
|
|
|
*/
|
2014-04-02 16:55:40 +00:00
|
|
|
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
|
|
|
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
tcr_set_idmap_t0sz x10, x9
|
|
|
|
|
2014-03-07 08:49:25 +00:00
|
|
|
/*
|
|
|
|
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
|
|
|
|
* TCR_EL1.
|
|
|
|
*/
|
|
|
|
mrs x9, ID_AA64MMFR0_EL1
|
|
|
|
bfi x10, x9, #32, #3
|
2015-07-10 16:24:28 +00:00
|
|
|
#ifdef CONFIG_ARM64_HW_AFDBM
|
|
|
|
/*
|
|
|
|
* Hardware update of the Access and Dirty bits.
|
|
|
|
*/
|
|
|
|
mrs x9, ID_AA64MMFR1_EL1
|
|
|
|
and x9, x9, #0xf
|
|
|
|
cbz x9, 2f
|
|
|
|
cmp x9, #2
|
|
|
|
b.lt 1f
|
|
|
|
orr x10, x10, #TCR_HD // hardware Dirty flag update
|
|
|
|
1: orr x10, x10, #TCR_HA // hardware Access flag update
|
|
|
|
2:
|
|
|
|
#endif /* CONFIG_ARM64_HW_AFDBM */
|
2012-03-05 11:49:28 +00:00
|
|
|
msr tcr_el1, x10
|
|
|
|
ret // return to head.S
|
|
|
|
ENDPROC(__cpu_setup)
|
|
|
|
|
|
|
|
/*
|
2014-12-17 15:50:21 +00:00
|
|
|
* We set the desired value explicitly, including those of the
|
|
|
|
* reserved bits. The values of bits EE & E0E were set early in
|
|
|
|
* el2_setup, which are left untouched below.
|
|
|
|
*
|
2012-03-05 11:49:28 +00:00
|
|
|
* n n T
|
|
|
|
* U E WT T UD US IHBS
|
|
|
|
* CE0 XWHW CZ ME TEEA S
|
|
|
|
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
|
2014-12-17 15:50:21 +00:00
|
|
|
* 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
|
|
|
|
* .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
|
2012-03-05 11:49:28 +00:00
|
|
|
*/
|
|
|
|
.type crval, #object
|
|
|
|
crval:
|
2014-12-17 15:50:21 +00:00
|
|
|
.word 0xfcffffff // clear
|
|
|
|
.word 0x34d5d91d // set
|