2019-06-03 05:44:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-12-10 10:46:47 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ARM64_KVM_ARM_H__
|
|
|
|
#define __ARM64_KVM_ARM_H__
|
|
|
|
|
2014-11-24 14:05:44 +00:00
|
|
|
#include <asm/esr.h>
|
2014-10-31 23:06:47 +00:00
|
|
|
#include <asm/memory.h>
|
2012-12-10 10:46:47 +00:00
|
|
|
#include <asm/types.h>
|
|
|
|
|
|
|
|
/* Hyp Configuration Register (HCR) bits */
|
2018-04-06 11:27:28 +00:00
|
|
|
#define HCR_FWB (UL(1) << 46)
|
2018-12-07 18:39:23 +00:00
|
|
|
#define HCR_API (UL(1) << 41)
|
|
|
|
#define HCR_APK (UL(1) << 40)
|
2018-01-15 19:39:06 +00:00
|
|
|
#define HCR_TEA (UL(1) << 37)
|
|
|
|
#define HCR_TERR (UL(1) << 36)
|
arm64/kvm: Prohibit guest LOR accesses
We don't currently limit guest accesses to the LOR registers, which we
neither virtualize nor context-switch. As such, guests are provided with
unusable information/controls, and are not isolated from each other (or
the host).
To prevent these issues, we can trap register accesses and present the
illusion LORegions are unssupported by the CPU. To do this, we mask
ID_AA64MMFR1.LO, and set HCR_EL2.TLOR to trap accesses to the following
registers:
* LORC_EL1
* LOREA_EL1
* LORID_EL1
* LORN_EL1
* LORSA_EL1
... when trapped, we inject an UNDEFINED exception to EL1, simulating
their non-existence.
As noted in D7.2.67, when no LORegions are implemented, LoadLOAcquire
and StoreLORelease must behave as LoadAcquire and StoreRelease
respectively. We can ensure this by clearing LORC_EL1.EN when a CPU's
EL2 is first initialized, as the host kernel will not modify this.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-02-13 13:39:23 +00:00
|
|
|
#define HCR_TLOR (UL(1) << 35)
|
2015-01-29 15:47:55 +00:00
|
|
|
#define HCR_E2H (UL(1) << 34)
|
2012-12-10 10:46:47 +00:00
|
|
|
#define HCR_ID (UL(1) << 33)
|
|
|
|
#define HCR_CD (UL(1) << 32)
|
|
|
|
#define HCR_RW_SHIFT 31
|
|
|
|
#define HCR_RW (UL(1) << HCR_RW_SHIFT)
|
|
|
|
#define HCR_TRVM (UL(1) << 30)
|
|
|
|
#define HCR_HCD (UL(1) << 29)
|
|
|
|
#define HCR_TDZ (UL(1) << 28)
|
|
|
|
#define HCR_TGE (UL(1) << 27)
|
|
|
|
#define HCR_TVM (UL(1) << 26)
|
|
|
|
#define HCR_TTLB (UL(1) << 25)
|
|
|
|
#define HCR_TPU (UL(1) << 24)
|
|
|
|
#define HCR_TPC (UL(1) << 23)
|
|
|
|
#define HCR_TSW (UL(1) << 22)
|
|
|
|
#define HCR_TAC (UL(1) << 21)
|
|
|
|
#define HCR_TIDCP (UL(1) << 20)
|
|
|
|
#define HCR_TSC (UL(1) << 19)
|
|
|
|
#define HCR_TID3 (UL(1) << 18)
|
|
|
|
#define HCR_TID2 (UL(1) << 17)
|
|
|
|
#define HCR_TID1 (UL(1) << 16)
|
|
|
|
#define HCR_TID0 (UL(1) << 15)
|
|
|
|
#define HCR_TWE (UL(1) << 14)
|
|
|
|
#define HCR_TWI (UL(1) << 13)
|
|
|
|
#define HCR_DC (UL(1) << 12)
|
|
|
|
#define HCR_BSU (3 << 10)
|
|
|
|
#define HCR_BSU_IS (UL(1) << 10)
|
|
|
|
#define HCR_FB (UL(1) << 9)
|
2016-09-06 13:01:59 +00:00
|
|
|
#define HCR_VSE (UL(1) << 8)
|
2012-12-10 10:46:47 +00:00
|
|
|
#define HCR_VI (UL(1) << 7)
|
|
|
|
#define HCR_VF (UL(1) << 6)
|
|
|
|
#define HCR_AMO (UL(1) << 5)
|
|
|
|
#define HCR_IMO (UL(1) << 4)
|
|
|
|
#define HCR_FMO (UL(1) << 3)
|
|
|
|
#define HCR_PTW (UL(1) << 2)
|
|
|
|
#define HCR_SWIO (UL(1) << 1)
|
|
|
|
#define HCR_VM (UL(1) << 0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The bits we set in HCR:
|
arm64/kvm: Prohibit guest LOR accesses
We don't currently limit guest accesses to the LOR registers, which we
neither virtualize nor context-switch. As such, guests are provided with
unusable information/controls, and are not isolated from each other (or
the host).
To prevent these issues, we can trap register accesses and present the
illusion LORegions are unssupported by the CPU. To do this, we mask
ID_AA64MMFR1.LO, and set HCR_EL2.TLOR to trap accesses to the following
registers:
* LORC_EL1
* LOREA_EL1
* LORID_EL1
* LORN_EL1
* LORSA_EL1
... when trapped, we inject an UNDEFINED exception to EL1, simulating
their non-existence.
As noted in D7.2.67, when no LORegions are implemented, LoadLOAcquire
and StoreLORelease must behave as LoadAcquire and StoreRelease
respectively. We can ensure this by clearing LORC_EL1.EN when a CPU's
EL2 is first initialized, as the host kernel will not modify this.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-02-13 13:39:23 +00:00
|
|
|
* TLOR: Trap LORegion register accesses
|
2016-02-24 17:52:41 +00:00
|
|
|
* RW: 64bit by default, can be overridden for 32bit VMs
|
2012-12-10 10:46:47 +00:00
|
|
|
* TAC: Trap ACTLR
|
|
|
|
* TSC: Trap SMC
|
|
|
|
* TSW: Trap cache operations by set/way
|
2013-08-02 10:41:13 +00:00
|
|
|
* TWE: Trap WFE
|
2012-12-10 10:46:47 +00:00
|
|
|
* TWI: Trap WFI
|
|
|
|
* TIDCP: Trap L2CTLR/L2ECTLR
|
|
|
|
* BSU_IS: Upgrade barriers to the inner shareable domain
|
|
|
|
* FB: Force broadcast of all maintainance operations
|
|
|
|
* AMO: Override CPSR.A and enable signaling with VA
|
|
|
|
* IMO: Override CPSR.I and enable signaling with VI
|
|
|
|
* FMO: Override CPSR.F and enable signaling with VF
|
|
|
|
* SWIO: Turn set/way invalidates into set/way clean+invalidate
|
|
|
|
*/
|
2013-08-02 10:41:13 +00:00
|
|
|
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
|
KVM: arm64: Don't set HCR_EL2.TVM when S2FWB is supported
On CPUs that support S2FWB (Armv8.4+), KVM configures the stage 2 page
tables to override the memory attributes of memory accesses, regardless
of the stage 1 page table configurations, and also when the stage 1 MMU
is turned off. This results in all memory accesses to RAM being
cacheable, including during early boot of the guest.
On CPUs without this feature, memory accesses were non-cacheable during
boot until the guest turned on the stage 1 MMU, and we had to detect
when the guest turned on the MMU, such that we could invalidate all cache
entries and ensure a consistent view of memory with the MMU turned on.
When the guest turned on the caches, we would call stage2_flush_vm()
from kvm_toggle_cache().
However, stage2_flush_vm() walks all the stage 2 tables, and calls
__kvm_flush-dcache_pte, which on a system with S2FWB does ... absolutely
nothing.
We can avoid that whole song and dance, and simply not set TVM when
creating a VM on a system that has S2FWB.
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20191028130541.30536-1-christoffer.dall@arm.com
2019-10-28 13:05:41 +00:00
|
|
|
HCR_BSU_IS | HCR_FB | HCR_TAC | \
|
2017-08-03 09:45:21 +00:00
|
|
|
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
|
|
|
|
HCR_FMO | HCR_IMO)
|
2016-09-06 13:01:59 +00:00
|
|
|
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
|
2018-12-07 18:39:23 +00:00
|
|
|
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
|
2015-01-29 15:47:55 +00:00
|
|
|
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
2012-12-10 10:46:47 +00:00
|
|
|
|
|
|
|
/* TCR_EL2 Registers bits */
|
2016-04-04 10:43:15 +00:00
|
|
|
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
|
|
|
|
#define TCR_EL2_TBI (1 << 20)
|
|
|
|
#define TCR_EL2_PS_SHIFT 16
|
|
|
|
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
|
|
|
|
#define TCR_EL2_PS_40B (2 << TCR_EL2_PS_SHIFT)
|
|
|
|
#define TCR_EL2_TG0_MASK TCR_TG0_MASK
|
|
|
|
#define TCR_EL2_SH0_MASK TCR_SH0_MASK
|
|
|
|
#define TCR_EL2_ORGN0_MASK TCR_ORGN0_MASK
|
|
|
|
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
|
|
|
|
#define TCR_EL2_T0SZ_MASK 0x3f
|
|
|
|
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
|
|
|
|
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
|
2012-12-10 10:46:47 +00:00
|
|
|
|
|
|
|
/* VTCR_EL2 Registers bits */
|
2018-12-13 16:06:14 +00:00
|
|
|
#define VTCR_EL2_RES1 (1U << 31)
|
2016-04-13 16:57:37 +00:00
|
|
|
#define VTCR_EL2_HD (1 << 22)
|
|
|
|
#define VTCR_EL2_HA (1 << 21)
|
2018-09-26 16:32:41 +00:00
|
|
|
#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
|
2016-04-04 10:43:15 +00:00
|
|
|
#define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK
|
|
|
|
#define VTCR_EL2_TG0_MASK TCR_TG0_MASK
|
|
|
|
#define VTCR_EL2_TG0_4K TCR_TG0_4K
|
2016-03-17 14:29:24 +00:00
|
|
|
#define VTCR_EL2_TG0_16K TCR_TG0_16K
|
2016-04-04 10:43:15 +00:00
|
|
|
#define VTCR_EL2_TG0_64K TCR_TG0_64K
|
|
|
|
#define VTCR_EL2_SH0_MASK TCR_SH0_MASK
|
|
|
|
#define VTCR_EL2_SH0_INNER TCR_SH0_INNER
|
|
|
|
#define VTCR_EL2_ORGN0_MASK TCR_ORGN0_MASK
|
|
|
|
#define VTCR_EL2_ORGN0_WBWA TCR_ORGN0_WBWA
|
|
|
|
#define VTCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
|
|
|
|
#define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA
|
|
|
|
#define VTCR_EL2_SL0_SHIFT 6
|
|
|
|
#define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT)
|
2012-12-10 10:46:47 +00:00
|
|
|
#define VTCR_EL2_T0SZ_MASK 0x3f
|
2016-03-30 13:33:59 +00:00
|
|
|
#define VTCR_EL2_VS_SHIFT 19
|
|
|
|
#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
|
|
|
|
#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
|
2012-12-10 10:46:47 +00:00
|
|
|
|
2018-09-26 16:32:41 +00:00
|
|
|
#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x)
|
|
|
|
|
2014-07-09 16:17:04 +00:00
|
|
|
/*
|
|
|
|
* We configure the Stage-2 page tables to always restrict the IPA space to be
|
|
|
|
* 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
|
|
|
|
* not known to exist and will break with this configuration.
|
|
|
|
*
|
2018-10-01 12:40:36 +00:00
|
|
|
* The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2().
|
2015-03-10 19:07:01 +00:00
|
|
|
*
|
2014-07-09 16:17:04 +00:00
|
|
|
* Note that when using 4K pages, we concatenate two first level page tables
|
2016-03-17 14:29:24 +00:00
|
|
|
* together. With 16K pages, we concatenate 16 first level page tables.
|
2014-07-09 16:17:04 +00:00
|
|
|
*
|
|
|
|
*/
|
2016-04-04 10:53:52 +00:00
|
|
|
|
|
|
|
#define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
|
|
|
|
VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1)
|
|
|
|
|
2012-12-10 10:46:47 +00:00
|
|
|
/*
|
2018-09-26 16:32:48 +00:00
|
|
|
* VTCR_EL2:SL0 indicates the entry level for Stage2 translation.
|
|
|
|
* Interestingly, it depends on the page size.
|
|
|
|
* See D.10.2.121, VTCR_EL2, in ARM DDI 0487C.a
|
|
|
|
*
|
|
|
|
* -----------------------------------------
|
|
|
|
* | Entry level | 4K | 16K/64K |
|
|
|
|
* ------------------------------------------
|
|
|
|
* | Level: 0 | 2 | - |
|
|
|
|
* ------------------------------------------
|
|
|
|
* | Level: 1 | 1 | 2 |
|
|
|
|
* ------------------------------------------
|
|
|
|
* | Level: 2 | 0 | 1 |
|
|
|
|
* ------------------------------------------
|
|
|
|
* | Level: 3 | - | 0 |
|
|
|
|
* ------------------------------------------
|
|
|
|
*
|
|
|
|
* The table roughly translates to :
|
|
|
|
*
|
|
|
|
* SL0(PAGE_SIZE, Entry_level) = TGRAN_SL0_BASE - Entry_Level
|
|
|
|
*
|
|
|
|
* Where TGRAN_SL0_BASE is a magic number depending on the page size:
|
|
|
|
* TGRAN_SL0_BASE(4K) = 2
|
|
|
|
* TGRAN_SL0_BASE(16K) = 3
|
|
|
|
* TGRAN_SL0_BASE(64K) = 3
|
|
|
|
* provided we take care of ruling out the unsupported cases and
|
|
|
|
* Entry_Level = 4 - Number_of_levels.
|
|
|
|
*
|
2012-12-10 10:46:47 +00:00
|
|
|
*/
|
2018-09-26 16:32:48 +00:00
|
|
|
#ifdef CONFIG_ARM64_64K_PAGES
|
|
|
|
|
|
|
|
#define VTCR_EL2_TGRAN VTCR_EL2_TG0_64K
|
|
|
|
#define VTCR_EL2_TGRAN_SL0_BASE 3UL
|
|
|
|
|
2016-03-17 14:29:24 +00:00
|
|
|
#elif defined(CONFIG_ARM64_16K_PAGES)
|
2018-09-26 16:32:48 +00:00
|
|
|
|
|
|
|
#define VTCR_EL2_TGRAN VTCR_EL2_TG0_16K
|
|
|
|
#define VTCR_EL2_TGRAN_SL0_BASE 3UL
|
|
|
|
|
2016-03-17 14:29:24 +00:00
|
|
|
#else /* 4K */
|
2018-09-26 16:32:48 +00:00
|
|
|
|
|
|
|
#define VTCR_EL2_TGRAN VTCR_EL2_TG0_4K
|
|
|
|
#define VTCR_EL2_TGRAN_SL0_BASE 2UL
|
|
|
|
|
2012-12-10 10:46:47 +00:00
|
|
|
#endif
|
|
|
|
|
2018-09-26 16:32:48 +00:00
|
|
|
#define VTCR_EL2_LVLS_TO_SL0(levels) \
|
|
|
|
((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT)
|
|
|
|
#define VTCR_EL2_SL0_TO_LVLS(sl0) \
|
|
|
|
((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE)
|
|
|
|
#define VTCR_EL2_LVLS(vtcr) \
|
|
|
|
VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT)
|
|
|
|
|
|
|
|
#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN)
|
2018-09-26 16:32:49 +00:00
|
|
|
#define VTCR_EL2_IPA(vtcr) (64 - ((vtcr) & VTCR_EL2_T0SZ_MASK))
|
|
|
|
|
2018-09-26 16:32:47 +00:00
|
|
|
/*
|
|
|
|
* ARM VMSAv8-64 defines an algorithm for finding the translation table
|
|
|
|
* descriptors in section D4.2.8 in ARM DDI 0487C.a.
|
|
|
|
*
|
|
|
|
* The algorithm defines the expectations on the translation table
|
|
|
|
* addresses for each level, based on PAGE_SIZE, entry level
|
|
|
|
* and the translation table size (T0SZ). The variable "x" in the
|
|
|
|
* algorithm determines the alignment of a table base address at a given
|
|
|
|
* level and thus determines the alignment of VTTBR:BADDR for stage2
|
|
|
|
* page table entry level.
|
|
|
|
* Since the number of bits resolved at the entry level could vary
|
|
|
|
* depending on the T0SZ, the value of "x" is defined based on a
|
|
|
|
* Magic constant for a given PAGE_SIZE and Entry Level. The
|
|
|
|
* intermediate levels must be always aligned to the PAGE_SIZE (i.e,
|
|
|
|
* x = PAGE_SHIFT).
|
|
|
|
*
|
|
|
|
* The value of "x" for entry level is calculated as :
|
|
|
|
* x = Magic_N - T0SZ
|
|
|
|
*
|
|
|
|
* where Magic_N is an integer depending on the page size and the entry
|
|
|
|
* level of the page table as below:
|
|
|
|
*
|
|
|
|
* --------------------------------------------
|
|
|
|
* | Entry level | 4K 16K 64K |
|
|
|
|
* --------------------------------------------
|
|
|
|
* | Level: 0 (4 levels) | 28 | - | - |
|
|
|
|
* --------------------------------------------
|
|
|
|
* | Level: 1 (3 levels) | 37 | 31 | 25 |
|
|
|
|
* --------------------------------------------
|
|
|
|
* | Level: 2 (2 levels) | 46 | 42 | 38 |
|
|
|
|
* --------------------------------------------
|
|
|
|
* | Level: 3 (1 level) | - | 53 | 51 |
|
|
|
|
* --------------------------------------------
|
|
|
|
*
|
|
|
|
* We have a magic formula for the Magic_N below:
|
|
|
|
*
|
|
|
|
* Magic_N(PAGE_SIZE, Level) = 64 - ((PAGE_SHIFT - 3) * Number_of_levels)
|
|
|
|
*
|
|
|
|
* where Number_of_levels = (4 - Level). We are only interested in the
|
|
|
|
* value for Entry_Level for the stage2 page table.
|
|
|
|
*
|
|
|
|
* So, given that T0SZ = (64 - IPA_SHIFT), we can compute 'x' as follows:
|
|
|
|
*
|
|
|
|
* x = (64 - ((PAGE_SHIFT - 3) * Number_of_levels)) - (64 - IPA_SHIFT)
|
|
|
|
* = IPA_SHIFT - ((PAGE_SHIFT - 3) * Number of levels)
|
|
|
|
*
|
|
|
|
* Here is one way to explain the Magic Formula:
|
|
|
|
*
|
|
|
|
* x = log2(Size_of_Entry_Level_Table)
|
|
|
|
*
|
|
|
|
* Since, we can resolve (PAGE_SHIFT - 3) bits at each level, and another
|
|
|
|
* PAGE_SHIFT bits in the PTE, we have :
|
|
|
|
*
|
|
|
|
* Bits_Entry_level = IPA_SHIFT - ((PAGE_SHIFT - 3) * (n - 1) + PAGE_SHIFT)
|
|
|
|
* = IPA_SHIFT - (PAGE_SHIFT - 3) * n - 3
|
|
|
|
* where n = number of levels, and since each pointer is 8bytes, we have:
|
|
|
|
*
|
|
|
|
* x = Bits_Entry_Level + 3
|
|
|
|
* = IPA_SHIFT - (PAGE_SHIFT - 3) * n
|
|
|
|
*
|
|
|
|
* The only constraint here is that, we have to find the number of page table
|
|
|
|
* levels for a given IPA size (which we do, see stage2_pt_levels())
|
|
|
|
*/
|
|
|
|
#define ARM64_VTTBR_X(ipa, levels) ((ipa) - ((levels) * (PAGE_SHIFT - 3)))
|
2016-04-04 10:53:52 +00:00
|
|
|
|
2018-07-31 13:08:57 +00:00
|
|
|
#define VTTBR_CNP_BIT (UL(1))
|
2014-10-31 23:06:47 +00:00
|
|
|
#define VTTBR_VMID_SHIFT (UL(48))
|
2015-11-16 11:28:18 +00:00
|
|
|
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
2012-12-10 10:46:47 +00:00
|
|
|
|
|
|
|
/* Hyp System Trap Register */
|
|
|
|
#define HSTR_EL2_T(x) (1 << x)
|
|
|
|
|
2016-05-21 11:53:14 +00:00
|
|
|
/* Hyp Coprocessor Trap Register Shifts */
|
2015-07-16 21:29:37 +00:00
|
|
|
#define CPTR_EL2_TFP_SHIFT 10
|
|
|
|
|
2012-12-10 10:46:47 +00:00
|
|
|
/* Hyp Coprocessor Trap Register */
|
|
|
|
#define CPTR_EL2_TCPAC (1 << 31)
|
|
|
|
#define CPTR_EL2_TTA (1 << 20)
|
2015-07-16 21:29:37 +00:00
|
|
|
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
2017-10-31 15:51:00 +00:00
|
|
|
#define CPTR_EL2_TZ (1 << 8)
|
arm64/sve: KVM: Prevent guests from using SVE
Until KVM has full SVE support, guests must not be allowed to
execute SVE instructions.
This patch enables the necessary traps, and also ensures that the
traps are disabled again on exit from the guest so that the host
can still use SVE if it wants to.
On guest exit, high bits of the SVE Zn registers may have been
clobbered as a side-effect the execution of FPSIMD instructions in
the guest. The existing KVM host FPSIMD restore code is not
sufficient to restore these bits, so this patch explicitly marks
the CPU as not containing cached vector state for any task, thus
forcing a reload on the next return to userspace. This is an
interim measure, in advance of adding full SVE awareness to KVM.
This marking of cached vector state in the CPU as invalid is done
using __this_cpu_write(fpsimd_last_state, NULL) in fpsimd.c. Due
to the repeated use of this rather obscure operation, it makes
sense to factor it out as a separate helper with a clearer name.
This patch factors it out as fpsimd_flush_cpu_state(), and ports
all callers to use it.
As a side effect of this refactoring, a this_cpu_write() in
fpsimd_cpu_pm_notifier() is changed to __this_cpu_write(). This
should be fine, since cpu_pm_enter() is supposed to be called only
with interrupts disabled.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 15:51:16 +00:00
|
|
|
#define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */
|
|
|
|
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
|
2012-12-10 10:46:47 +00:00
|
|
|
|
|
|
|
/* Hyp Debug Configuration Register bits */
|
2016-09-22 10:35:43 +00:00
|
|
|
#define MDCR_EL2_TPMS (1 << 14)
|
|
|
|
#define MDCR_EL2_E2PB_MASK (UL(0x3))
|
|
|
|
#define MDCR_EL2_E2PB_SHIFT (UL(12))
|
2012-12-10 10:46:47 +00:00
|
|
|
#define MDCR_EL2_TDRA (1 << 11)
|
|
|
|
#define MDCR_EL2_TDOSA (1 << 10)
|
|
|
|
#define MDCR_EL2_TDA (1 << 9)
|
|
|
|
#define MDCR_EL2_TDE (1 << 8)
|
|
|
|
#define MDCR_EL2_HPME (1 << 7)
|
|
|
|
#define MDCR_EL2_TPM (1 << 6)
|
|
|
|
#define MDCR_EL2_TPMCR (1 << 5)
|
|
|
|
#define MDCR_EL2_HPMN_MASK (0x1F)
|
|
|
|
|
2014-11-24 14:05:44 +00:00
|
|
|
/* For compatibility with fault code shared with 32-bit */
|
|
|
|
#define FSC_FAULT ESR_ELx_FSC_FAULT
|
2015-03-12 18:16:51 +00:00
|
|
|
#define FSC_ACCESS ESR_ELx_FSC_ACCESS
|
2014-11-24 14:05:44 +00:00
|
|
|
#define FSC_PERM ESR_ELx_FSC_PERM
|
2017-06-21 18:17:14 +00:00
|
|
|
#define FSC_SEA ESR_ELx_FSC_EXTABT
|
|
|
|
#define FSC_SEA_TTW0 (0x14)
|
|
|
|
#define FSC_SEA_TTW1 (0x15)
|
|
|
|
#define FSC_SEA_TTW2 (0x16)
|
|
|
|
#define FSC_SEA_TTW3 (0x17)
|
|
|
|
#define FSC_SECC (0x18)
|
|
|
|
#define FSC_SECC_TTW0 (0x1c)
|
|
|
|
#define FSC_SECC_TTW1 (0x1d)
|
|
|
|
#define FSC_SECC_TTW2 (0x1e)
|
|
|
|
#define FSC_SECC_TTW3 (0x1f)
|
2012-12-10 10:46:47 +00:00
|
|
|
|
|
|
|
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
2014-10-31 23:06:47 +00:00
|
|
|
#define HPFAR_MASK (~UL(0xf))
|
2018-09-26 16:32:51 +00:00
|
|
|
/*
|
|
|
|
* We have
|
|
|
|
* PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12]
|
|
|
|
* HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12]
|
|
|
|
*/
|
|
|
|
#define PAR_TO_HPFAR(par) \
|
|
|
|
(((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
|
2012-12-10 10:46:47 +00:00
|
|
|
|
2015-08-30 13:55:22 +00:00
|
|
|
#define ECN(x) { ESR_ELx_EC_##x, #x }
|
|
|
|
|
|
|
|
#define kvm_arm_exception_class \
|
|
|
|
ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
|
2019-07-13 04:40:54 +00:00
|
|
|
ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \
|
|
|
|
ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \
|
|
|
|
ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \
|
|
|
|
ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
|
2015-08-30 13:55:22 +00:00
|
|
|
ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
|
|
|
|
ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
|
|
|
|
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
|
|
|
|
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
|
|
|
|
|
2015-10-28 14:15:45 +00:00
|
|
|
#define CPACR_EL1_FPEN (3 << 20)
|
|
|
|
#define CPACR_EL1_TTA (1 << 28)
|
arm64/sve: KVM: Prevent guests from using SVE
Until KVM has full SVE support, guests must not be allowed to
execute SVE instructions.
This patch enables the necessary traps, and also ensures that the
traps are disabled again on exit from the guest so that the host
can still use SVE if it wants to.
On guest exit, high bits of the SVE Zn registers may have been
clobbered as a side-effect the execution of FPSIMD instructions in
the guest. The existing KVM host FPSIMD restore code is not
sufficient to restore these bits, so this patch explicitly marks
the CPU as not containing cached vector state for any task, thus
forcing a reload on the next return to userspace. This is an
interim measure, in advance of adding full SVE awareness to KVM.
This marking of cached vector state in the CPU as invalid is done
using __this_cpu_write(fpsimd_last_state, NULL) in fpsimd.c. Due
to the repeated use of this rather obscure operation, it makes
sense to factor it out as a separate helper with a clearer name.
This patch factors it out as fpsimd_flush_cpu_state(), and ports
all callers to use it.
As a side effect of this refactoring, a this_cpu_write() in
fpsimd_cpu_pm_notifier() is changed to __this_cpu_write(). This
should be fine, since cpu_pm_enter() is supposed to be called only
with interrupts disabled.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 15:51:16 +00:00
|
|
|
#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN)
|
2015-10-28 14:15:45 +00:00
|
|
|
|
2012-12-10 10:46:47 +00:00
|
|
|
#endif /* __ARM64_KVM_ARM_H__ */
|