2012-03-05 11:49:28 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/mmu_context.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996 Russell King.
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_MMU_CONTEXT_H
|
|
|
|
#define __ASM_MMU_CONTEXT_H
|
|
|
|
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/proc-fns.h>
|
|
|
|
#include <asm-generic/mm_hooks.h>
|
|
|
|
#include <asm/cputype.h>
|
|
|
|
#include <asm/pgtable.h>
|
2016-01-25 11:44:58 +00:00
|
|
|
#include <asm/tlbflush.h>
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2013-01-17 12:31:45 +00:00
|
|
|
#ifdef CONFIG_PID_IN_CONTEXTIDR
|
|
|
|
static inline void contextidr_thread_switch(struct task_struct *next)
|
|
|
|
{
|
|
|
|
asm(
|
|
|
|
" msr contextidr_el1, %0\n"
|
|
|
|
" isb"
|
|
|
|
:
|
|
|
|
: "r" (task_pid_nr(next)));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void contextidr_thread_switch(struct task_struct *next)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
/*
|
|
|
|
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
|
|
|
|
*/
|
|
|
|
static inline void cpu_set_reserved_ttbr0(void)
|
|
|
|
{
|
arm64: mm: place empty_zero_page in bss
Currently the zero page is set up in paging_init, and thus we cannot use
the zero page earlier. We use the zero page as a reserved TTBR value
from which no TLB entries may be allocated (e.g. when uninstalling the
idmap). To enable such usage earlier (as may be required for invasive
changes to the kernel page tables), and to minimise the time that the
idmap is active, we need to be able to use the zero page before
paging_init.
This patch follows the example set by x86, by allocating the zero page
at compile time, in .bss. This means that the zero page itself is
available immediately upon entry to start_kernel (as we zero .bss before
this), and also means that the zero page takes up no space in the raw
Image binary. The associated struct page is allocated in bootmem_init,
and remains unavailable until this time.
Outside of arch code, the only users of empty_zero_page assume that the
empty_zero_page symbol refers to the zeroed memory itself, and that
ZERO_PAGE(x) must be used to acquire the associated struct page,
following the example of x86. This patch also brings arm64 inline with
these assumptions.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:44:57 +00:00
|
|
|
unsigned long ttbr = virt_to_phys(empty_zero_page);
|
2012-03-05 11:49:28 +00:00
|
|
|
|
|
|
|
asm(
|
|
|
|
" msr ttbr0_el1, %0 // set TTBR0\n"
|
|
|
|
" isb"
|
|
|
|
:
|
|
|
|
: "r" (ttbr));
|
|
|
|
}
|
|
|
|
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
/*
|
|
|
|
* TCR.T0SZ value to use when the ID map is active. Usually equals
|
|
|
|
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
|
|
|
|
* physical memory, in which case it will be smaller.
|
|
|
|
*/
|
|
|
|
extern u64 idmap_t0sz;
|
|
|
|
|
|
|
|
static inline bool __cpu_uses_extended_idmap(void)
|
|
|
|
{
|
|
|
|
return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
|
|
|
|
unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set TCR.T0SZ to its default value (based on VA_BITS)
|
|
|
|
*/
|
2016-01-25 11:45:00 +00:00
|
|
|
static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
{
|
2015-10-06 17:46:21 +00:00
|
|
|
unsigned long tcr;
|
|
|
|
|
|
|
|
if (!__cpu_uses_extended_idmap())
|
|
|
|
return;
|
|
|
|
|
|
|
|
asm volatile (
|
|
|
|
" mrs %0, tcr_el1 ;"
|
|
|
|
" bfi %0, %1, %2, %3 ;"
|
|
|
|
" msr tcr_el1, %0 ;"
|
|
|
|
" isb"
|
|
|
|
: "=&r" (tcr)
|
2016-01-25 11:45:00 +00:00
|
|
|
: "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
}
|
|
|
|
|
2016-01-25 11:45:00 +00:00
|
|
|
#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
|
|
|
|
#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
|
|
|
|
|
2016-01-25 11:44:58 +00:00
|
|
|
/*
|
|
|
|
* Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
|
|
|
|
*
|
|
|
|
* The idmap lives in the same VA range as userspace, but uses global entries
|
|
|
|
* and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
|
|
|
|
* speculative TLB fetches, we must temporarily install the reserved page
|
|
|
|
* tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
|
|
|
|
*
|
|
|
|
* If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
|
|
|
|
* which should not be installed in TTBR0_EL1. In this case we can leave the
|
|
|
|
* reserved page tables in place.
|
|
|
|
*/
|
|
|
|
static inline void cpu_uninstall_idmap(void)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->active_mm;
|
|
|
|
|
|
|
|
cpu_set_reserved_ttbr0();
|
|
|
|
local_flush_tlb_all();
|
|
|
|
cpu_set_default_tcr_t0sz();
|
|
|
|
|
|
|
|
if (mm != &init_mm)
|
|
|
|
cpu_switch_mm(mm->pgd, mm);
|
|
|
|
}
|
|
|
|
|
2016-01-25 11:45:00 +00:00
|
|
|
static inline void cpu_install_idmap(void)
|
|
|
|
{
|
|
|
|
cpu_set_reserved_ttbr0();
|
|
|
|
local_flush_tlb_all();
|
|
|
|
cpu_set_idmap_tcr_t0sz();
|
|
|
|
|
|
|
|
cpu_switch_mm(idmap_pg_dir, &init_mm);
|
|
|
|
}
|
|
|
|
|
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
|
|
|
/*
|
|
|
|
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
|
|
|
|
* avoiding the possibility of conflicting TLB entries being allocated.
|
|
|
|
*/
|
|
|
|
static inline void cpu_replace_ttbr1(pgd_t *pgd)
|
|
|
|
{
|
|
|
|
typedef void (ttbr_replace_func)(phys_addr_t);
|
|
|
|
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
|
|
|
|
ttbr_replace_func *replace_phys;
|
|
|
|
|
|
|
|
phys_addr_t pgd_phys = virt_to_phys(pgd);
|
|
|
|
|
|
|
|
replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
|
|
|
|
|
|
|
|
cpu_install_idmap();
|
|
|
|
replace_phys(pgd_phys);
|
|
|
|
cpu_uninstall_idmap();
|
|
|
|
}
|
|
|
|
|
2015-10-06 17:46:24 +00:00
|
|
|
/*
|
|
|
|
* It would be nice to return ASIDs back to the allocator, but unfortunately
|
|
|
|
* that introduces a race with a generation rollover where we could erroneously
|
|
|
|
* free an ASID allocated in a future generation. We could workaround this by
|
|
|
|
* freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
|
|
|
|
* but we'd then need to make sure that we didn't dirty any TLBs afterwards.
|
|
|
|
* Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
|
|
|
|
* take CPU migration into account.
|
|
|
|
*/
|
2012-03-05 11:49:28 +00:00
|
|
|
#define destroy_context(mm) do { } while(0)
|
2015-10-06 17:46:24 +00:00
|
|
|
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2015-11-17 08:53:31 +00:00
|
|
|
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
|
2012-03-05 11:49:28 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called when "tsk" is about to enter lazy TLB mode.
|
|
|
|
*
|
|
|
|
* mm: describes the currently active mm context
|
|
|
|
* tsk: task which is entering lazy tlb
|
|
|
|
* cpu: cpu number which is entering lazy tlb
|
|
|
|
*
|
|
|
|
* tsk->mm will be NULL
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the actual mm switch as far as the scheduler
|
|
|
|
* is concerned. No registers are touched. We avoid
|
|
|
|
* calling the CPU specific function when the mm hasn't
|
|
|
|
* actually changed.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
|
|
struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
|
2015-10-06 17:46:27 +00:00
|
|
|
if (prev == next)
|
|
|
|
return;
|
|
|
|
|
2015-03-23 15:06:50 +00:00
|
|
|
/*
|
|
|
|
* init_mm.pgd does not contain any user mappings and it is always
|
|
|
|
* active for kernel addresses in TTBR1. Just set the reserved TTBR0.
|
|
|
|
*/
|
|
|
|
if (next == &init_mm) {
|
|
|
|
cpu_set_reserved_ttbr0();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-10-06 17:46:27 +00:00
|
|
|
check_and_switch_context(next, cpu);
|
2012-03-05 11:49:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
|
|
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
|
|
|
|
|
2016-02-23 10:31:45 +00:00
|
|
|
void verify_cpu_asid_bits(void);
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
#endif
|