2012-03-05 11:49:28 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/mmu_context.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996 Russell King.
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_MMU_CONTEXT_H
|
|
|
|
#define __ASM_MMU_CONTEXT_H
|
|
|
|
|
2017-02-08 20:08:37 +00:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-08 17:51:36 +00:00
|
|
|
#include <linux/sched/hotplug.h>
|
2017-02-03 23:16:44 +00:00
|
|
|
#include <linux/mm_types.h>
|
2012-03-05 11:49:28 +00:00
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
2016-09-02 13:54:03 +00:00
|
|
|
#include <asm/cpufeature.h>
|
2012-03-05 11:49:28 +00:00
|
|
|
#include <asm/proc-fns.h>
|
|
|
|
#include <asm-generic/mm_hooks.h>
|
|
|
|
#include <asm/cputype.h>
|
|
|
|
#include <asm/pgtable.h>
|
2016-09-08 12:55:38 +00:00
|
|
|
#include <asm/sysreg.h>
|
2016-01-25 11:44:58 +00:00
|
|
|
#include <asm/tlbflush.h>
|
2012-03-05 11:49:28 +00:00
|
|
|
|
arm64: mm: apply r/o permissions of VM areas to its linear alias as well
On arm64, we use block mappings and contiguous hints to map the linear
region, to minimize the TLB footprint. However, this means that the
entire region is mapped using read/write permissions, which we cannot
modify at page granularity without having to take intrusive measures to
prevent TLB conflicts.
This means the linear aliases of pages belonging to read-only mappings
(executable or otherwise) in the vmalloc region are also mapped read/write,
and could potentially be abused to modify things like module code, bpf JIT
code or other read-only data.
So let's fix this, by extending the set_memory_ro/rw routines to take
the linear alias into account. The consequence of enabling this is
that we can no longer use block mappings or contiguous hints, so in
cases where the TLB footprint of the linear region is a bottleneck,
performance may be affected.
Therefore, allow this feature to be runtime en/disabled, by setting
rodata=full (or 'on' to disable just this enhancement, or 'off' to
disable read-only mappings for code and r/o data entirely) on the
kernel command line. Also, allow the default value to be set via a
Kconfig option.
Tested-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-11-07 10:36:20 +00:00
|
|
|
extern bool rodata_full;
|
|
|
|
|
2013-01-17 12:31:45 +00:00
|
|
|
static inline void contextidr_thread_switch(struct task_struct *next)
|
|
|
|
{
|
2016-09-08 12:55:39 +00:00
|
|
|
if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
|
|
|
|
return;
|
|
|
|
|
2016-09-08 12:55:38 +00:00
|
|
|
write_sysreg(task_pid_nr(next), contextidr_el1);
|
|
|
|
isb();
|
2013-01-17 12:31:45 +00:00
|
|
|
}
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
/*
|
|
|
|
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
|
|
|
|
*/
|
|
|
|
static inline void cpu_set_reserved_ttbr0(void)
|
|
|
|
{
|
2017-12-13 17:07:18 +00:00
|
|
|
unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2016-09-08 12:55:38 +00:00
|
|
|
write_sysreg(ttbr, ttbr0_el1);
|
|
|
|
isb();
|
2012-03-05 11:49:28 +00:00
|
|
|
}
|
|
|
|
|
2017-08-10 12:19:09 +00:00
|
|
|
static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
BUG_ON(pgd == swapper_pg_dir);
|
|
|
|
cpu_set_reserved_ttbr0();
|
|
|
|
cpu_do_switch_mm(virt_to_phys(pgd),mm);
|
|
|
|
}
|
|
|
|
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
/*
|
|
|
|
* TCR.T0SZ value to use when the ID map is active. Usually equals
|
|
|
|
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
|
|
|
|
* physical memory, in which case it will be smaller.
|
|
|
|
*/
|
|
|
|
extern u64 idmap_t0sz;
|
arm64: allow ID map to be extended to 52 bits
Currently, when using VA_BITS < 48, if the ID map text happens to be
placed in physical memory above VA_BITS, we increase the VA size (up to
48) and create a new table level, in order to map in the ID map text.
This is okay because the system always supports 48 bits of VA.
This patch extends the code such that if the system supports 52 bits of
VA, and the ID map text is placed that high up, then we increase the VA
size accordingly, up to 52.
One difference from the current implementation is that so far the
condition of VA_BITS < 48 has meant that the top level table is always
"full", with the maximum number of entries, and an extra table level is
always needed. Now, when VA_BITS = 48 (and using 64k pages), the top
level table is not full, and we simply need to increase the number of
entries in it, instead of creating a new table level.
Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Bob Picco <bob.picco@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
[catalin.marinas@arm.com: reduce arguments to __create_hyp_mappings()]
[catalin.marinas@arm.com: reworked/renamed __cpu_uses_extended_idmap_level()]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-13 17:07:24 +00:00
|
|
|
extern u64 idmap_ptrs_per_pgd;
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
|
|
|
|
static inline bool __cpu_uses_extended_idmap(void)
|
|
|
|
{
|
2018-12-10 14:15:15 +00:00
|
|
|
if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52))
|
2018-12-06 22:50:41 +00:00
|
|
|
return false;
|
|
|
|
|
2018-01-15 15:23:48 +00:00
|
|
|
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
}
|
|
|
|
|
arm64: allow ID map to be extended to 52 bits
Currently, when using VA_BITS < 48, if the ID map text happens to be
placed in physical memory above VA_BITS, we increase the VA size (up to
48) and create a new table level, in order to map in the ID map text.
This is okay because the system always supports 48 bits of VA.
This patch extends the code such that if the system supports 52 bits of
VA, and the ID map text is placed that high up, then we increase the VA
size accordingly, up to 52.
One difference from the current implementation is that so far the
condition of VA_BITS < 48 has meant that the top level table is always
"full", with the maximum number of entries, and an extra table level is
always needed. Now, when VA_BITS = 48 (and using 64k pages), the top
level table is not full, and we simply need to increase the number of
entries in it, instead of creating a new table level.
Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Bob Picco <bob.picco@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
[catalin.marinas@arm.com: reduce arguments to __create_hyp_mappings()]
[catalin.marinas@arm.com: reworked/renamed __cpu_uses_extended_idmap_level()]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-13 17:07:24 +00:00
|
|
|
/*
|
|
|
|
* True if the extended ID map requires an extra level of translation table
|
|
|
|
* to be configured.
|
|
|
|
*/
|
|
|
|
static inline bool __cpu_uses_extended_idmap_level(void)
|
|
|
|
{
|
2018-01-15 15:23:48 +00:00
|
|
|
return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
|
arm64: allow ID map to be extended to 52 bits
Currently, when using VA_BITS < 48, if the ID map text happens to be
placed in physical memory above VA_BITS, we increase the VA size (up to
48) and create a new table level, in order to map in the ID map text.
This is okay because the system always supports 48 bits of VA.
This patch extends the code such that if the system supports 52 bits of
VA, and the ID map text is placed that high up, then we increase the VA
size accordingly, up to 52.
One difference from the current implementation is that so far the
condition of VA_BITS < 48 has meant that the top level table is always
"full", with the maximum number of entries, and an extra table level is
always needed. Now, when VA_BITS = 48 (and using 64k pages), the top
level table is not full, and we simply need to increase the number of
entries in it, instead of creating a new table level.
Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Bob Picco <bob.picco@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
[catalin.marinas@arm.com: reduce arguments to __create_hyp_mappings()]
[catalin.marinas@arm.com: reworked/renamed __cpu_uses_extended_idmap_level()]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-13 17:07:24 +00:00
|
|
|
}
|
|
|
|
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
/*
|
|
|
|
* Set TCR.T0SZ to its default value (based on VA_BITS)
|
|
|
|
*/
|
2016-01-25 11:45:00 +00:00
|
|
|
static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
{
|
2015-10-06 17:46:21 +00:00
|
|
|
unsigned long tcr;
|
|
|
|
|
|
|
|
if (!__cpu_uses_extended_idmap())
|
|
|
|
return;
|
|
|
|
|
2016-09-08 12:55:38 +00:00
|
|
|
tcr = read_sysreg(tcr_el1);
|
|
|
|
tcr &= ~TCR_T0SZ_MASK;
|
|
|
|
tcr |= t0sz << TCR_T0SZ_OFFSET;
|
|
|
|
write_sysreg(tcr, tcr_el1);
|
|
|
|
isb();
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
|
|
|
}
|
|
|
|
|
2016-01-25 11:45:00 +00:00
|
|
|
#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
|
|
|
|
#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
|
|
|
|
|
2016-01-25 11:44:58 +00:00
|
|
|
/*
|
|
|
|
* Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
|
|
|
|
*
|
|
|
|
* The idmap lives in the same VA range as userspace, but uses global entries
|
|
|
|
* and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
|
|
|
|
* speculative TLB fetches, we must temporarily install the reserved page
|
|
|
|
* tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
|
|
|
|
*
|
|
|
|
* If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
|
|
|
|
* which should not be installed in TTBR0_EL1. In this case we can leave the
|
|
|
|
* reserved page tables in place.
|
|
|
|
*/
|
|
|
|
static inline void cpu_uninstall_idmap(void)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->active_mm;
|
|
|
|
|
|
|
|
cpu_set_reserved_ttbr0();
|
|
|
|
local_flush_tlb_all();
|
|
|
|
cpu_set_default_tcr_t0sz();
|
|
|
|
|
2016-09-02 13:54:03 +00:00
|
|
|
if (mm != &init_mm && !system_uses_ttbr0_pan())
|
2016-01-25 11:44:58 +00:00
|
|
|
cpu_switch_mm(mm->pgd, mm);
|
|
|
|
}
|
|
|
|
|
2016-01-25 11:45:00 +00:00
|
|
|
static inline void cpu_install_idmap(void)
|
|
|
|
{
|
|
|
|
cpu_set_reserved_ttbr0();
|
|
|
|
local_flush_tlb_all();
|
|
|
|
cpu_set_idmap_tcr_t0sz();
|
|
|
|
|
2017-01-10 21:35:49 +00:00
|
|
|
cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
|
2016-01-25 11:45:00 +00:00
|
|
|
}
|
|
|
|
|
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
|
|
|
/*
|
|
|
|
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
|
|
|
|
* avoiding the possibility of conflicting TLB entries being allocated.
|
|
|
|
*/
|
2018-02-15 11:14:56 +00:00
|
|
|
static inline void cpu_replace_ttbr1(pgd_t *pgdp)
|
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
|
|
|
{
|
|
|
|
typedef void (ttbr_replace_func)(phys_addr_t);
|
|
|
|
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
|
|
|
|
ttbr_replace_func *replace_phys;
|
|
|
|
|
2018-07-31 13:08:56 +00:00
|
|
|
/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
|
|
|
|
phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
|
|
|
|
|
|
|
|
if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
|
|
|
|
/*
|
|
|
|
* cpu_replace_ttbr1() is used when there's a boot CPU
|
|
|
|
* up (i.e. cpufeature framework is not up yet) and
|
|
|
|
* latter only when we enable CNP via cpufeature's
|
|
|
|
* enable() callback.
|
|
|
|
* Also we rely on the cpu_hwcap bit being set before
|
|
|
|
* calling the enable() function.
|
|
|
|
*/
|
|
|
|
ttbr1 |= TTBR_CNP_BIT;
|
|
|
|
}
|
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
|
|
|
|
2017-01-10 21:35:49 +00:00
|
|
|
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
|
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
|
|
|
|
|
|
|
cpu_install_idmap();
|
2018-07-31 13:08:56 +00:00
|
|
|
replace_phys(ttbr1);
|
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
|
|
|
cpu_uninstall_idmap();
|
|
|
|
}
|
|
|
|
|
2015-10-06 17:46:24 +00:00
|
|
|
/*
|
|
|
|
* It would be nice to return ASIDs back to the allocator, but unfortunately
|
|
|
|
* that introduces a race with a generation rollover where we could erroneously
|
|
|
|
* free an ASID allocated in a future generation. We could workaround this by
|
|
|
|
* freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
|
|
|
|
* but we'd then need to make sure that we didn't dirty any TLBs afterwards.
|
|
|
|
* Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
|
|
|
|
* take CPU migration into account.
|
|
|
|
*/
|
2012-03-05 11:49:28 +00:00
|
|
|
#define destroy_context(mm) do { } while(0)
|
2015-10-06 17:46:24 +00:00
|
|
|
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2015-11-17 08:53:31 +00:00
|
|
|
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2016-09-02 13:54:03 +00:00
|
|
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
|
|
|
static inline void update_saved_ttbr0(struct task_struct *tsk,
|
|
|
|
struct mm_struct *mm)
|
2012-03-05 11:49:28 +00:00
|
|
|
{
|
2017-12-06 10:42:10 +00:00
|
|
|
u64 ttbr;
|
|
|
|
|
|
|
|
if (!system_uses_ttbr0_pan())
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (mm == &init_mm)
|
|
|
|
ttbr = __pa_symbol(empty_zero_page);
|
|
|
|
else
|
|
|
|
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
|
|
|
|
|
2018-01-10 13:18:30 +00:00
|
|
|
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
|
2016-09-02 13:54:03 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void update_saved_ttbr0(struct task_struct *tsk,
|
|
|
|
struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2017-12-06 10:51:12 +00:00
|
|
|
static inline void
|
|
|
|
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We don't actually care about the ttbr0 mapping, so point it at the
|
|
|
|
* zero page.
|
|
|
|
*/
|
|
|
|
update_saved_ttbr0(tsk, &init_mm);
|
|
|
|
}
|
|
|
|
|
2016-09-02 13:54:03 +00:00
|
|
|
static inline void __switch_mm(struct mm_struct *next)
|
|
|
|
{
|
|
|
|
unsigned int cpu = smp_processor_id();
|
2015-10-06 17:46:27 +00:00
|
|
|
|
2015-03-23 15:06:50 +00:00
|
|
|
/*
|
|
|
|
* init_mm.pgd does not contain any user mappings and it is always
|
|
|
|
* active for kernel addresses in TTBR1. Just set the reserved TTBR0.
|
|
|
|
*/
|
|
|
|
if (next == &init_mm) {
|
|
|
|
cpu_set_reserved_ttbr0();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-10-06 17:46:27 +00:00
|
|
|
check_and_switch_context(next, cpu);
|
2012-03-05 11:49:28 +00:00
|
|
|
}
|
|
|
|
|
2016-09-02 13:54:03 +00:00
|
|
|
static inline void
|
|
|
|
switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
|
|
struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
if (prev != next)
|
|
|
|
__switch_mm(next);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous
|
|
|
|
* value may have not been initialised yet (activate_mm caller) or the
|
|
|
|
* ASID has changed since the last run (following the context switch
|
2017-12-06 10:42:10 +00:00
|
|
|
* of another thread of the same process).
|
2016-09-02 13:54:03 +00:00
|
|
|
*/
|
2017-12-06 10:42:10 +00:00
|
|
|
update_saved_ttbr0(tsk, next);
|
2016-09-02 13:54:03 +00:00
|
|
|
}
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
2016-09-02 13:54:03 +00:00
|
|
|
#define activate_mm(prev,next) switch_mm(prev, next, current)
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2016-02-23 10:31:45 +00:00
|
|
|
void verify_cpu_asid_bits(void);
|
2018-01-10 13:18:30 +00:00
|
|
|
void post_ttbr_update_workaround(void);
|
2016-02-23 10:31:45 +00:00
|
|
|
|
2017-02-08 20:08:37 +00:00
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* !__ASM_MMU_CONTEXT_H */
|