KVM/arm64 updates for v6.1

- Fixes for single-stepping in the presence of an async
   exception as well as the preservation of PSTATE.SS
 
 - Better handling of AArch32 ID registers on AArch64-only
   systems
 
 - Fixes for the dirty-ring API, allowing it to work on
   architectures with relaxed memory ordering
 
 - Advertise the new kvmarm mailing list
 
 - Various minor cleanups and spelling fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmM5hQcPHG1hekBrZXJu
 ZWwub3JnAAoJECPQ0LrRPXpDoMUP/jra4HSmujLUB5G7Op8HxuurEecOc6xtw0Af
 AbDLlVc2Vs4rrdVh8GMc8D80atUAVitp8IFjdp/PzI2GTBTzWz43Gav2AbhgIJbJ
 xoFVHL8LkdHKyMbq10359DqGMqhIf41OFzGwhbzcx2V4pKNkSpjbCpu3bi/+Ybjg
 006ZpZc7NAU0rZgw9Flb/dhn0jw7RMc3orhoDQ4tBp1P/VhvqvgFt5bWipkvvBP7
 +lQK28ujG3ghST/hKRhg6ozgy5+6NEEHMuhErMYP8nIivRchX+pWF2Lb0qGH1e+U
 v2MZIZnIIUjyTV1vbYlxtltzfYmPuQ2MFNUBawI9tmlIOU9vJSCzeJS64uWK4KLV
 kbmk57OfC7rQoSNJH4jaKQp0YpIktrB9Vei97t4I7NwEmkjQj6cLTgg4tQrNqTiQ
 cFGeC9mE+lhFC8z1lCbna2eG631FxpPrB1SJ1/CU9wboam9dUfXGIvBPh+i2pvMZ
 vcxzUZJ11y+/uhp4k8i2PBwNno0iwRXd5MinwRUs2CR5vhs8qa5y7FVWKyqKpgI2
 xqr4lYTixJZL3mWkYyOQuClrTbT1zkoaPldLq6M7wvO08+QV8ryMeyKT+9s/gNQU
 dcYSwBCWZaOZm2nN8/zjxRb7VqZVu3cwyXi9XXUWNTCgIe/Q/SDPbXU/Hwbgzf8X
 UsQF7e9A
 =aNPK
 -----END PGP SIGNATURE-----

Merge tag 'kvmarm-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 updates for v6.1

- Fixes for single-stepping in the presence of an async
  exception as well as the preservation of PSTATE.SS

- Better handling of AArch32 ID registers on AArch64-only
  systems

- Fixes for the dirty-ring API, allowing it to work on
  architectures with relaxed memory ordering

- Advertise the new kvmarm mailing list

- Various minor cleanups and spelling fixes
This commit is contained in:
Paolo Bonzini 2022-10-03 15:33:32 -04:00
commit fe4d9e4abf
51 changed files with 1294 additions and 604 deletions

View File

@ -7918,8 +7918,8 @@ guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf
(0x40000001). Otherwise, a guest may use the paravirtual features (0x40000001). Otherwise, a guest may use the paravirtual features
regardless of what has actually been exposed through the CPUID leaf. regardless of what has actually been exposed through the CPUID leaf.
8.29 KVM_CAP_DIRTY_LOG_RING 8.29 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL
--------------------------- ----------------------------------------------------------
:Architectures: x86 :Architectures: x86
:Parameters: args[0] - size of the dirty log ring :Parameters: args[0] - size of the dirty log ring
@ -7977,6 +7977,11 @@ on to the next GFN. The userspace should continue to do this until the
flags of a GFN have the DIRTY bit cleared, meaning that it has harvested flags of a GFN have the DIRTY bit cleared, meaning that it has harvested
all the dirty GFNs that were available. all the dirty GFNs that were available.
Note that on weakly ordered architectures, userspace accesses to the
ring buffer (and more specifically the 'flags' field) must be ordered,
using load-acquire/store-release accessors when available, or any
other memory barrier that will ensure this ordering.
It's not necessary for userspace to harvest the all dirty GFNs at once. It's not necessary for userspace to harvest the all dirty GFNs at once.
However it must collect the dirty GFNs in sequence, i.e., the userspace However it must collect the dirty GFNs in sequence, i.e., the userspace
program cannot skip one dirty GFN to collect the one next to it. program cannot skip one dirty GFN to collect the one next to it.
@ -8005,6 +8010,14 @@ KVM_CAP_DIRTY_LOG_RING with an acceptable dirty ring size, the virtual
machine will switch to ring-buffer dirty page tracking and further machine will switch to ring-buffer dirty page tracking and further
KVM_GET_DIRTY_LOG or KVM_CLEAR_DIRTY_LOG ioctls will fail. KVM_GET_DIRTY_LOG or KVM_CLEAR_DIRTY_LOG ioctls will fail.
NOTE: KVM_CAP_DIRTY_LOG_RING_ACQ_REL is the only capability that
should be exposed by weakly ordered architecture, in order to indicate
the additional memory ordering requirements imposed on userspace when
reading the state of an entry and mutating it from DIRTY to HARVESTED.
Architecture with TSO-like ordering (such as x86) are allowed to
expose both KVM_CAP_DIRTY_LOG_RING and KVM_CAP_DIRTY_LOG_RING_ACQ_REL
to userspace.
8.30 KVM_CAP_XEN_HVM 8.30 KVM_CAP_XEN_HVM
-------------------- --------------------

View File

@ -11125,7 +11125,8 @@ R: Alexandru Elisei <alexandru.elisei@arm.com>
R: Suzuki K Poulose <suzuki.poulose@arm.com> R: Suzuki K Poulose <suzuki.poulose@arm.com>
R: Oliver Upton <oliver.upton@linux.dev> R: Oliver Upton <oliver.upton@linux.dev>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu (moderated for non-subscribers) L: kvmarm@lists.linux.dev
L: kvmarm@lists.cs.columbia.edu (deprecated, moderated for non-subscribers)
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
F: arch/arm64/include/asm/kvm* F: arch/arm64/include/asm/kvm*

View File

@ -384,8 +384,8 @@ alternative_cb_end
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
mrs \tmp0, ID_AA64MMFR0_EL1 mrs \tmp0, ID_AA64MMFR0_EL1
// Narrow PARange to fit the PS field in TCR_ELx // Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
cmp \tmp0, \tmp1 cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi csel \tmp0, \tmp1, \tmp0, hi
bfi \tcr, \tmp0, \pos, #3 bfi \tcr, \tmp0, \pos, #3
@ -512,7 +512,7 @@ alternative_endif
*/ */
.macro reset_pmuserenr_el0, tmpreg .macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1 mrs \tmpreg, id_aa64dfr0_el1
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4 sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp \tmpreg, #1 // Skip if no PMU present cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0 msr pmuserenr_el0, xzr // Disable PMU access from EL0
@ -524,7 +524,7 @@ alternative_endif
*/ */
.macro reset_amuserenr_el0, tmpreg .macro reset_amuserenr_el0, tmpreg
mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1 mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4 ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz \tmpreg, .Lskip_\@ // Skip if no AMU present cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0 msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
.Lskip_\@: .Lskip_\@:
@ -612,7 +612,7 @@ alternative_endif
.macro offset_ttbr1, ttbr, tmp .macro offset_ttbr1, ttbr, tmp
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz \tmp, .Lskipoffs_\@ cbnz \tmp, .Lskipoffs_\@
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
.Lskipoffs_\@ : .Lskipoffs_\@ :

View File

@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void)
#define arch_slab_minalign() arch_slab_minalign() #define arch_slab_minalign() arch_slab_minalign()
#endif #endif
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_EL0_DMINLINE_SHIFT | \
CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr) #define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
#define ICACHEF_ALIASING 0 #define ICACHEF_ALIASING 0

View File

@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
u64 mask = GENMASK_ULL(field + 3, field); u64 mask = GENMASK_ULL(field + 3, field);
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
if (val == ID_AA64DFR0_PMUVER_IMP_DEF) if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
val = 0; val = 0;
if (val > cap) { if (val > cap) {
@ -597,43 +597,43 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
{ {
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 || return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1; cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
} }
static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT; return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
} }
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT; return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
} }
static inline bool id_aa64pfr0_sve(u64 pfr0) static inline bool id_aa64pfr0_sve(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
return val > 0; return val > 0;
} }
static inline bool id_aa64pfr1_sme(u64 pfr1) static inline bool id_aa64pfr1_sme(u64 pfr1)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
return val > 0; return val > 0;
} }
static inline bool id_aa64pfr1_mte(u64 pfr1) static inline bool id_aa64pfr1_mte(u64 pfr1)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
return val >= ID_AA64PFR1_MTE; return val >= ID_AA64PFR1_EL1_MTE_MTE2;
} }
void __init setup_cpu_features(void); void __init setup_cpu_features(void);
@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope)
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
csv2_val = cpuid_feature_extract_unsigned_field(pfr0, csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_CSV2_SHIFT); ID_AA64PFR0_EL1_CSV2_SHIFT);
return csv2_val == 3; return csv2_val == 3;
} }
@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN4_SHIFT); ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
} }
static inline bool system_supports_64kb_granule(void) static inline bool system_supports_64kb_granule(void)
@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN64_SHIFT); ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
} }
static inline bool system_supports_16kb_granule(void) static inline bool system_supports_16kb_granule(void)
@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN16_SHIFT); ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
} }
static inline bool system_supports_mixed_endian_el0(void) static inline bool system_supports_mixed_endian_el0(void)
@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_BIGENDEL_SHIFT); ID_AA64MMFR0_EL1_BIGEND_SHIFT);
return val == 0x1; return val == 0x1;
} }
@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
{ {
switch (parange) { switch (parange) {
case ID_AA64MMFR0_PARANGE_32: return 32; case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
case ID_AA64MMFR0_PARANGE_36: return 36; case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
case ID_AA64MMFR0_PARANGE_40: return 40; case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
case ID_AA64MMFR0_PARANGE_42: return 42; case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
case ID_AA64MMFR0_PARANGE_44: return 44; case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
case ID_AA64MMFR0_PARANGE_48: return 48; case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
case ID_AA64MMFR0_PARANGE_52: return 52; case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
/* /*
* A future PE could use a value unknown to the kernel. * A future PE could use a value unknown to the kernel.
* However, by the "D10.1.4 Principles of the ID scheme * However, by the "D10.1.4 Principles of the ID scheme
@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void)
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_HADBS_SHIFT); ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
} }
static inline bool cpu_has_pan(void) static inline bool cpu_has_pan(void)
{ {
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_PAN_SHIFT); ID_AA64MMFR1_EL1_PAN_SHIFT);
} }
#ifdef CONFIG_ARM64_AMU_EXTN #ifdef CONFIG_ARM64_AMU_EXTN
@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
int vmid_bits; int vmid_bits;
vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1, vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_VMIDBITS_SHIFT); ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16) if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
return 16; return 16;
/* /*

View File

@ -40,7 +40,7 @@
.macro __init_el2_debug .macro __init_el2_debug
mrs x1, id_aa64dfr0_el1 mrs x1, id_aa64dfr0_el1
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp x0, #1 cmp x0, #1
b.lt .Lskip_pmu_\@ // Skip if no PMU present b.lt .Lskip_pmu_\@ // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps mrs x0, pmcr_el0 // Disable debug access traps
@ -49,7 +49,7 @@
csel x2, xzr, x0, lt // all PMU counters from EL1 csel x2, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */ /* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cbz x0, .Lskip_spe_\@ // Skip if SPE not present cbz x0, .Lskip_spe_\@ // Skip if SPE not present
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
@ -65,7 +65,7 @@
.Lskip_spe_\@: .Lskip_spe_\@:
/* Trace buffer */ /* Trace buffer */
ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
mrs_s x0, SYS_TRBIDR_EL1 mrs_s x0, SYS_TRBIDR_EL1
@ -83,7 +83,7 @@
/* LORegions */ /* LORegions */
.macro __init_el2_lor .macro __init_el2_lor
mrs x1, id_aa64mmfr1_el1 mrs x1, id_aa64mmfr1_el1
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
cbz x0, .Lskip_lor_\@ cbz x0, .Lskip_lor_\@
msr_s SYS_LORC_EL1, xzr msr_s SYS_LORC_EL1, xzr
.Lskip_lor_\@: .Lskip_lor_\@:
@ -97,7 +97,7 @@
/* GICv3 system register access */ /* GICv3 system register access */
.macro __init_el2_gicv3 .macro __init_el2_gicv3
mrs x0, id_aa64pfr0_el1 mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
cbz x0, .Lskip_gicv3_\@ cbz x0, .Lskip_gicv3_\@
mrs_s x0, SYS_ICC_SRE_EL2 mrs_s x0, SYS_ICC_SRE_EL2
@ -132,12 +132,12 @@
/* Disable any fine grained traps */ /* Disable any fine grained traps */
.macro __init_el2_fgt .macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1 mrs x1, id_aa64mmfr0_el1
ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
cbz x1, .Lskip_fgt_\@ cbz x1, .Lskip_fgt_\@
mov x0, xzr mov x0, xzr
mrs x1, id_aa64dfr0_el1 mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cmp x1, #3 cmp x1, #3
b.lt .Lset_debug_fgt_\@ b.lt .Lset_debug_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */ /* Disable PMSNEVFR_EL1 read and write traps */
@ -149,7 +149,7 @@
mov x0, xzr mov x0, xzr
mrs x1, id_aa64pfr1_el1 mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
cbz x1, .Lset_fgt_\@ cbz x1, .Lset_fgt_\@
/* Disable nVHE traps of TPIDR2 and SMPRI */ /* Disable nVHE traps of TPIDR2 and SMPRI */
@ -162,7 +162,7 @@
msr_s SYS_HFGITR_EL2, xzr msr_s SYS_HFGITR_EL2, xzr
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz x1, .Lskip_fgt_\@ cbz x1, .Lskip_fgt_\@
msr_s SYS_HAFGRTR_EL2, xzr msr_s SYS_HAFGRTR_EL2, xzr

View File

@ -142,7 +142,7 @@ static inline int get_num_brps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_unsigned_field(dfr0, cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPS_SHIFT); ID_AA64DFR0_EL1_BRPs_SHIFT);
} }
/* Determine number of WRP registers available. */ /* Determine number of WRP registers available. */
@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_unsigned_field(dfr0, cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPS_SHIFT); ID_AA64DFR0_EL1_WRPs_SHIFT);
} }
#endif /* __ASM_BREAKPOINT_H */ #endif /* __ASM_BREAKPOINT_H */

View File

@ -393,6 +393,7 @@ struct kvm_vcpu_arch {
*/ */
struct { struct {
u32 mdscr_el1; u32 mdscr_el1;
bool pstate_ss;
} guest_debug_preserved; } guest_debug_preserved;
/* vcpu power state */ /* vcpu power state */
@ -535,6 +536,9 @@ struct kvm_vcpu_arch {
#define IN_WFIT __vcpu_single_flag(sflags, BIT(3)) #define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
/* vcpu system registers loaded on physical CPU */ /* vcpu system registers loaded on physical CPU */
#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
/* Software step state is Active-pending */
#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \

View File

@ -16,9 +16,9 @@
static inline u64 kvm_get_parange(u64 mmfr0) static inline u64 kvm_get_parange(u64 mmfr0)
{ {
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT); ID_AA64MMFR0_EL1_PARANGE_SHIFT);
if (parange > ID_AA64MMFR0_PARANGE_MAX) if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
parange = ID_AA64MMFR0_PARANGE_MAX; parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
return parange; return parange;
} }

View File

@ -190,19 +190,6 @@
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) #define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
@ -436,19 +423,11 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
#define SMIDR_EL1_IMPLEMENTER_SHIFT 24
#define SMIDR_EL1_SMPS_SHIFT 15
#define SMIDR_EL1_AFFINITY_SHIFT 0
#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
@ -537,7 +516,6 @@
#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) #define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) #define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2)
#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
@ -690,164 +668,30 @@
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
/* id_aa64pfr0 */ /* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
#define ID_AA64PFR0_DIT_SHIFT 48
#define ID_AA64PFR0_AMU_SHIFT 44
#define ID_AA64PFR0_MPAM_SHIFT 40
#define ID_AA64PFR0_SEL2_SHIFT 36
#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
#define ID_AA64PFR0_EL3_SHIFT 12
#define ID_AA64PFR0_EL2_SHIFT 8
#define ID_AA64PFR0_EL1_SHIFT 4
#define ID_AA64PFR0_EL0_SHIFT 0
#define ID_AA64PFR0_AMU 0x1
#define ID_AA64PFR0_SVE 0x1
#define ID_AA64PFR0_RAS_V1 0x1
#define ID_AA64PFR0_RAS_V1P1 0x2
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */
#define ID_AA64PFR1_SME_SHIFT 24
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
#define ID_AA64PFR1_RASFRAC_SHIFT 12
#define ID_AA64PFR1_MTE_SHIFT 8
#define ID_AA64PFR1_SSBS_SHIFT 4
#define ID_AA64PFR1_BT_SHIFT 0
#define ID_AA64PFR1_SSBS_PSTATE_NI 0
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
#define ID_AA64PFR1_SME 1
#define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1
#define ID_AA64PFR1_MTE 0x2
#define ID_AA64PFR1_MTE_ASYMM 0x3
/* id_aa64mmfr0 */ /* id_aa64mmfr0 */
#define ID_AA64MMFR0_ECV_SHIFT 60 #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_FGT_SHIFT 56 #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EXS_SHIFT 44 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
#define ID_AA64MMFR0_SNSMEM_SHIFT 12
#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
#define ID_AA64MMFR0_ASID_SHIFT 4
#define ID_AA64MMFR0_PARANGE_SHIFT 0
#define ID_AA64MMFR0_ASID_8 0x0
#define ID_AA64MMFR0_ASID_16 0x2
#define ID_AA64MMFR0_TGRAN4_NI 0xf
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_NI 0xf
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_PARANGE_32 0x0
#define ID_AA64MMFR0_PARANGE_36 0x1
#define ID_AA64MMFR0_PARANGE_40 0x2
#define ID_AA64MMFR0_PARANGE_42 0x3
#define ID_AA64MMFR0_PARANGE_44 0x4
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
#define ARM64_MIN_PARANGE_BITS 32 #define ARM64_MIN_PARANGE_BITS 32
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52 #ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
#else #else
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
#endif #endif
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_ECBHB_SHIFT 60
#define ID_AA64MMFR1_TIDCP1_SHIFT 52
#define ID_AA64MMFR1_HCX_SHIFT 40
#define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32
#define ID_AA64MMFR1_XNX_SHIFT 28
#define ID_AA64MMFR1_SPECSEI_SHIFT 24
#define ID_AA64MMFR1_PAN_SHIFT 20
#define ID_AA64MMFR1_LOR_SHIFT 16
#define ID_AA64MMFR1_HPD_SHIFT 12
#define ID_AA64MMFR1_VHE_SHIFT 8
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
#define ID_AA64MMFR1_VMIDBITS_8 0
#define ID_AA64MMFR1_VMIDBITS_16 2
#define ID_AA64MMFR1_TIDCP1_NI 0
#define ID_AA64MMFR1_TIDCP1_IMP 1
/* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_EVT_SHIFT 56
#define ID_AA64MMFR2_BBM_SHIFT 52
#define ID_AA64MMFR2_TTL_SHIFT 48
#define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_IDS_SHIFT 36
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_ST_SHIFT 28
#define ID_AA64MMFR2_NV_SHIFT 24
#define ID_AA64MMFR2_CCIDX_SHIFT 20
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8
#define ID_AA64MMFR2_UAO_SHIFT 4
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
#define ID_AA64DFR0_MTPMU_SHIFT 48
#define ID_AA64DFR0_TRBE_SHIFT 44
#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
#define ID_AA64DFR0_BRPS_SHIFT 12
#define ID_AA64DFR0_PMUVER_SHIFT 8
#define ID_AA64DFR0_TRACEVER_SHIFT 4
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
#define ID_AA64DFR0_PMUVER_8_0 0x1
#define ID_AA64DFR0_PMUVER_8_1 0x4
#define ID_AA64DFR0_PMUVER_8_4 0x5
#define ID_AA64DFR0_PMUVER_8_5 0x6
#define ID_AA64DFR0_PMUVER_8_7 0x7
#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
#define ID_AA64DFR0_PMSVER_8_2 0x1
#define ID_AA64DFR0_PMSVER_8_3 0x2
#define ID_DFR0_PERFMON_SHIFT 24 #define ID_DFR0_PERFMON_SHIFT 24
#define ID_DFR0_PERFMON_8_0 0x3 #define ID_DFR0_PERFMON_8_0 0x3
@ -955,20 +799,20 @@
#define ID_PFR1_PROGMOD_SHIFT 0 #define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES) #if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES) #elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
#elif defined(CONFIG_ARM64_64K_PAGES) #elif defined(CONFIG_ARM64_64K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN64_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
#endif #endif
#define MVFR2_FPMISC_SHIFT 4 #define MVFR2_FPMISC_SHIFT 4
@ -1028,9 +872,6 @@
#define TRFCR_ELx_ExTRE BIT(1) #define TRFCR_ELx_ExTRE BIT(1)
#define TRFCR_ELx_E0TRE BIT(0) #define TRFCR_ELx_E0TRE BIT(0)
/* HCRX_EL2 definitions */
#define HCRX_EL2_SMPME_MASK (1 << 5)
/* GIC Hypervisor interface registers */ /* GIC Hypervisor interface registers */
/* ICH_MISR_EL2 bit definitions */ /* ICH_MISR_EL2 bit definitions */
#define ICH_MISR_EOI (1 << 0) #define ICH_MISR_EOI (1 << 0)

View File

@ -243,35 +243,35 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
ARM64_FTR_END, ARM64_FTR_END,
}; };
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
@ -316,9 +316,9 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0),
/* /*
* Page size not being supported at Stage-2 is not fatal. You * Page size not being supported at Stage-2 is not fatal. You
* just give up KVM if PAGE_SIZE isn't supported there. Go fix * just give up KVM if PAGE_SIZE isn't supported there. Go fix
@ -334,9 +334,9 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* fields are inconsistent across vCPUs, then it isn't worth * fields are inconsistent across vCPUs, then it isn't worth
* trying to bring KVM up. * trying to bring KVM up.
*/ */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1),
/* /*
* We already refuse to boot CPUs that don't support our configured * We already refuse to boot CPUs that don't support our configured
* page size, so we can only detect mismatches for a page size other * page size, so we can only detect mismatches for a page size other
@ -344,55 +344,55 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* exist in the wild so, even though we don't like it, we'll have to go * exist in the wild so, even though we don't like it, we'll have to go
* along with it and treat them as non-strict. * along with it and treat them as non-strict.
*/ */
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */ /* Linux shouldn't care about secure memory */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0),
/* /*
* Differing PARange is fine as long as all peripherals and memory are mapped * Differing PARange is fine as long as all peripherals and memory are mapped
* within the minimum PARange of all CPUs * within the minimum PARange of all CPUs
*/ */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TIDCP1_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
@ -434,17 +434,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0),
/* /*
* We can instantiate multiple PMU instances with different levels * We can instantiate multiple PMU instances with different levels
* of support. * of support.
*/ */
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6),
ARM64_FTR_END, ARM64_FTR_END,
}; };
@ -1492,7 +1492,7 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
return cpuid_feature_extract_signed_field(pfr0, return cpuid_feature_extract_signed_field(pfr0,
ID_AA64PFR0_FP_SHIFT) < 0; ID_AA64PFR0_EL1_FP_SHIFT) < 0;
} }
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
@ -1571,7 +1571,7 @@ bool kaslr_requires_kpti(void)
if (IS_ENABLED(CONFIG_ARM64_E0PD)) { if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
if (cpuid_feature_extract_unsigned_field(mmfr2, if (cpuid_feature_extract_unsigned_field(mmfr2,
ID_AA64MMFR2_E0PD_SHIFT)) ID_AA64MMFR2_EL1_E0PD_SHIFT))
return false; return false;
} }
@ -2093,7 +2093,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif, .matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT, .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 1, .min_field_value = 1,
@ -2104,7 +2104,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR0_EL1, .sys_reg = SYS_ID_AA64MMFR0_EL1,
.field_pos = ID_AA64MMFR0_ECV_SHIFT, .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 1, .min_field_value = 1,
@ -2116,7 +2116,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1, .sys_reg = SYS_ID_AA64MMFR1_EL1,
.field_pos = ID_AA64MMFR1_PAN_SHIFT, .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 1, .min_field_value = 1,
@ -2130,7 +2130,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1, .sys_reg = SYS_ID_AA64MMFR1_EL1,
.field_pos = ID_AA64MMFR1_PAN_SHIFT, .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 3, .min_field_value = 3,
@ -2168,9 +2168,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_32bit_el0, .matches = has_32bit_el0,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL0_SHIFT, .field_pos = ID_AA64PFR0_EL1_EL0_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
}, },
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
{ {
@ -2180,9 +2180,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL1_SHIFT, .field_pos = ID_AA64PFR0_EL1_EL1_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
}, },
{ {
.desc = "Protected KVM", .desc = "Protected KVM",
@ -2201,7 +2201,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
* more details. * more details.
*/ */
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_CSV3_SHIFT, .field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = 1, .min_field_value = 1,
.matches = unmap_kernel_at_el0, .matches = unmap_kernel_at_el0,
@ -2244,9 +2244,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_SVE, .capability = ARM64_SVE,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_SVE_SHIFT, .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_SVE, .min_field_value = ID_AA64PFR0_EL1_SVE_IMP,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = sve_kernel_enable, .cpu_enable = sve_kernel_enable,
}, },
@ -2259,9 +2259,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_RAS_SHIFT, .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_RAS_V1, .min_field_value = ID_AA64PFR0_EL1_RAS_IMP,
.cpu_enable = cpu_clear_disr, .cpu_enable = cpu_clear_disr,
}, },
#endif /* CONFIG_ARM64_RAS_EXTN */ #endif /* CONFIG_ARM64_RAS_EXTN */
@ -2278,9 +2278,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_amu, .matches = has_amu,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_AMU_SHIFT, .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_AMU, .min_field_value = ID_AA64PFR0_EL1_AMU_IMP,
.cpu_enable = cpu_amu_enable, .cpu_enable = cpu_amu_enable,
}, },
#endif /* CONFIG_ARM64_AMU_EXTN */ #endif /* CONFIG_ARM64_AMU_EXTN */
@ -2303,7 +2303,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_STAGE2_FWB, .capability = ARM64_HAS_STAGE2_FWB,
.sys_reg = SYS_ID_AA64MMFR2_EL1, .sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_FWB_SHIFT, .field_pos = ID_AA64MMFR2_EL1_FWB_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = 1, .min_field_value = 1,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
@ -2314,7 +2314,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_ARMv8_4_TTL, .capability = ARM64_HAS_ARMv8_4_TTL,
.sys_reg = SYS_ID_AA64MMFR2_EL1, .sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_TTL_SHIFT, .field_pos = ID_AA64MMFR2_EL1_TTL_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = 1, .min_field_value = 1,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
@ -2344,7 +2344,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HW_DBM, .capability = ARM64_HW_DBM,
.sys_reg = SYS_ID_AA64MMFR1_EL1, .sys_reg = SYS_ID_AA64MMFR1_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR1_HADBS_SHIFT, .field_pos = ID_AA64MMFR1_EL1_HAFDBS_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = 2, .min_field_value = 2,
.matches = has_hw_dbm, .matches = has_hw_dbm,
@ -2367,10 +2367,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_SSBS_SHIFT, .field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, .min_field_value = ID_AA64PFR1_EL1_SSBS_IMP,
}, },
#ifdef CONFIG_ARM64_CNP #ifdef CONFIG_ARM64_CNP
{ {
@ -2380,7 +2380,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_useable_cnp, .matches = has_useable_cnp,
.sys_reg = SYS_ID_AA64MMFR2_EL1, .sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_CNP_SHIFT, .field_pos = ID_AA64MMFR2_EL1_CnP_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = 1, .min_field_value = 1,
.cpu_enable = cpu_enable_cnp, .cpu_enable = cpu_enable_cnp,
@ -2485,7 +2485,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = can_use_gic_priorities, .matches = can_use_gic_priorities,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT, .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 1, .min_field_value = 1,
@ -2499,7 +2499,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sys_reg = SYS_ID_AA64MMFR2_EL1, .sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_width = 4, .field_width = 4,
.field_pos = ID_AA64MMFR2_E0PD_SHIFT, .field_pos = ID_AA64MMFR2_EL1_E0PD_SHIFT,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.min_field_value = 1, .min_field_value = 1,
.cpu_enable = cpu_enable_e0pd, .cpu_enable = cpu_enable_e0pd,
@ -2528,9 +2528,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = bti_enable, .cpu_enable = bti_enable,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_BT_SHIFT, .field_pos = ID_AA64PFR1_EL1_BT_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR1_BT_BTI, .min_field_value = ID_AA64PFR1_EL1_BT_IMP,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
}, },
#endif #endif
@ -2541,9 +2541,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT, .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR1_MTE, .min_field_value = ID_AA64PFR1_EL1_MTE_MTE2,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.cpu_enable = cpu_enable_mte, .cpu_enable = cpu_enable_mte,
}, },
@ -2553,9 +2553,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT, .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR1_MTE_ASYMM, .min_field_value = ID_AA64PFR1_EL1_MTE_MTE3,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
}, },
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
@ -2577,9 +2577,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_SME, .capability = ARM64_SME,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR1_SME_SHIFT, .field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR1_SME, .min_field_value = ID_AA64PFR1_EL1_SME_IMP,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = sme_kernel_enable, .cpu_enable = sme_kernel_enable,
}, },
@ -2614,9 +2614,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.sys_reg = SYS_ID_AA64MMFR1_EL1, .sys_reg = SYS_ID_AA64MMFR1_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR1_TIDCP1_SHIFT, .field_pos = ID_AA64MMFR1_EL1_TIDCP1_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64MMFR1_TIDCP1_IMP, .min_field_value = ID_AA64MMFR1_EL1_TIDCP1_IMP,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = cpu_trap_el0_impdef, .cpu_enable = cpu_trap_el0_impdef,
}, },
@ -2708,11 +2708,11 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
@ -2725,9 +2725,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
@ -2739,24 +2739,24 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
#endif #endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_BTI #ifdef CONFIG_ARM64_BTI
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_IMP, CAP_HWCAP, KERNEL_HWCAP_BTI),
#endif #endif
#ifdef CONFIG_ARM64_PTR_AUTH #ifdef CONFIG_ARM64_PTR_AUTH
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif #endif
#ifdef CONFIG_ARM64_MTE #ifdef CONFIG_ARM64_MTE
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3),
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
#ifdef CONFIG_ARM64_SME #ifdef CONFIG_ARM64_SME
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
@ -3102,7 +3102,7 @@ static void verify_hyp_capabilities(void)
/* Verify IPA range */ /* Verify IPA range */
parange = cpuid_feature_extract_unsigned_field(mmfr0, parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT); ID_AA64MMFR0_EL1_PARANGE_SHIFT);
ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange); ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
if (ipa_max < get_kvm_ipa_limit()) { if (ipa_max < get_kvm_ipa_limit()) {
pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id()); pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());

View File

@ -28,7 +28,7 @@
u8 debug_monitors_arch(void) u8 debug_monitors_arch(void)
{ {
return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
ID_AA64DFR0_DEBUGVER_SHIFT); ID_AA64DFR0_EL1_DebugVer_SHIFT);
} }
/* /*

View File

@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry)
*/ */
#if VA_BITS > 48 #if VA_BITS > 48
mrs_s x0, SYS_ID_AA64MMFR2_EL1 mrs_s x0, SYS_ID_AA64MMFR2_EL1
tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
mov x0, #VA_BITS mov x0, #VA_BITS
mov x25, #VA_BITS_MIN mov x25, #VA_BITS_MIN
csel x25, x25, x0, eq csel x25, x25, x0, eq
@ -658,10 +658,10 @@ SYM_FUNC_END(__secondary_too_slow)
*/ */
SYM_FUNC_START(__enable_mmu) SYM_FUNC_START(__enable_mmu)
mrs x3, ID_AA64MMFR0_EL1 mrs x3, ID_AA64MMFR0_EL1
ubfx x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN
b.lt __no_granule_support b.lt __no_granule_support
cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX
b.gt __no_granule_support b.gt __no_granule_support
phys_to_ttbr x2, x2 phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0 msr ttbr0_el1, x2 // load TTBR0
@ -679,7 +679,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
b.ne 2f b.ne 2f
mrs_s x0, SYS_ID_AA64MMFR2_EL1 mrs_s x0, SYS_ID_AA64MMFR2_EL1
and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz x0, 2f cbnz x0, 2f
update_early_cpu_boot_status \ update_early_cpu_boot_status \

View File

@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync)
SYM_CODE_END(elx_sync) SYM_CODE_END(elx_sync)
SYM_CODE_START_LOCAL(__finalise_el2) SYM_CODE_START_LOCAL(__finalise_el2)
check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve
.Linit_sve: /* SVE register access */ .Linit_sve: /* SVE register access */
mrs x0, cptr_el2 // Disable SVE traps mrs x0, cptr_el2 // Disable SVE traps
@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_ZCR_EL2, x1 // length for EL1. msr_s SYS_ZCR_EL2, x1 // length for EL1.
.Lskip_sve: .Lskip_sve:
check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme
.Linit_sme: /* SME register access and priority mapping */ .Linit_sme: /* SME register access and priority mapping */
mrs x0, cptr_el2 // Disable SME traps mrs x0, cptr_el2 // Disable SME traps
@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4 ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x1, .Lskip_sme cbz x1, .Lskip_sme
mrs_s x1, SYS_HCRX_EL2 mrs_s x1, SYS_HCRX_EL2
@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
tbnz x1, #0, 1f tbnz x1, #0, 1f
// Needs to be VHE capable, obviously // Needs to be VHE capable, obviously
check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f
1: mov_q x0, HVC_STUB_ERR 1: mov_q x0, HVC_STUB_ERR
eret eret

View File

@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = {
.name = "id_aa64mmfr1", .name = "id_aa64mmfr1",
.override = &id_aa64mmfr1_override, .override = &id_aa64mmfr1_override,
.fields = { .fields = {
FIELD("vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter), FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter),
{} {}
}, },
}; };
@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = {
.name = "id_aa64pfr0", .name = "id_aa64pfr0",
.override = &id_aa64pfr0_override, .override = &id_aa64pfr0_override,
.fields = { .fields = {
FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter), FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
{} {}
}, },
}; };
@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = {
.name = "id_aa64pfr1", .name = "id_aa64pfr1",
.override = &id_aa64pfr1_override, .override = &id_aa64pfr1_override,
.fields = { .fields = {
FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ), FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ),
FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL), FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL),
FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter), FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter),
{} {}
}, },
}; };

View File

@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
*/ */
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{ {
return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
} }
static inline bool armv8pmu_event_has_user_read(struct perf_event *event) static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info)
dfr0 = read_sysreg(id_aa64dfr0_el1); dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_unsigned_field(dfr0, pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT); ID_AA64DFR0_EL1_PMUVer_SHIFT);
if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0) if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0)
return; return;
cpu_pmu->pmuver = pmuver; cpu_pmu->pmuver = pmuver;
@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info)
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR_EL1 register for sysfs */ /* store PMMIR_EL1 register for sysfs */
if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31))) if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
else else
cpu_pmu->reg_pmmir = 0; cpu_pmu->reg_pmmir = 0;

View File

@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
/* If the CPU has CSV2 set, we're safe */ /* If the CPU has CSV2 set, we're safe */
pfr0 = read_cpuid(ID_AA64PFR0_EL1); pfr0 = read_cpuid(ID_AA64PFR0_EL1);
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
return SPECTRE_UNAFFECTED; return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */ /* Alternatively, we have a list of unaffected CPUs */
@ -945,7 +945,7 @@ static bool supports_ecbhb(int scope)
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_ECBHB_SHIFT); ID_AA64MMFR1_EL1_ECBHB_SHIFT);
} }
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,

View File

@ -2269,6 +2269,16 @@ static int __init early_kvm_mode_cfg(char *arg)
if (!arg) if (!arg)
return -EINVAL; return -EINVAL;
if (strcmp(arg, "none") == 0) {
kvm_mode = KVM_MODE_NONE;
return 0;
}
if (!is_hyp_mode_available()) {
pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
return 0;
}
if (strcmp(arg, "protected") == 0) { if (strcmp(arg, "protected") == 0) {
if (!is_kernel_in_hyp_mode()) if (!is_kernel_in_hyp_mode())
kvm_mode = KVM_MODE_PROTECTED; kvm_mode = KVM_MODE_PROTECTED;
@ -2283,11 +2293,6 @@ static int __init early_kvm_mode_cfg(char *arg)
return 0; return 0;
} }
if (strcmp(arg, "none") == 0) {
kvm_mode = KVM_MODE_NONE;
return 0;
}
return -EINVAL; return -EINVAL;
} }
early_param("kvm-arm.mode", early_kvm_mode_cfg); early_param("kvm-arm.mode", early_kvm_mode_cfg);

View File

@ -32,6 +32,10 @@ static DEFINE_PER_CPU(u64, mdcr_el2);
* *
* Guest access to MDSCR_EL1 is trapped by the hypervisor and handled * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
* after we have restored the preserved value to the main context. * after we have restored the preserved value to the main context.
*
* When single-step is enabled by userspace, we tweak PSTATE.SS on every
* guest entry. Preserve PSTATE.SS so we can restore the original value
* for the vcpu after the single-step is disabled.
*/ */
static void save_guest_debug_regs(struct kvm_vcpu *vcpu) static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
{ {
@ -41,6 +45,9 @@ static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
trace_kvm_arm_set_dreg32("Saved MDSCR_EL1", trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
vcpu->arch.guest_debug_preserved.mdscr_el1); vcpu->arch.guest_debug_preserved.mdscr_el1);
vcpu->arch.guest_debug_preserved.pstate_ss =
(*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
} }
static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
@ -51,6 +58,11 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
trace_kvm_arm_set_dreg32("Restored MDSCR_EL1", trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
vcpu_read_sys_reg(vcpu, MDSCR_EL1)); vcpu_read_sys_reg(vcpu, MDSCR_EL1));
if (vcpu->arch.guest_debug_preserved.pstate_ss)
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
else
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
} }
/** /**
@ -188,7 +200,18 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
* debugging the system. * debugging the system.
*/ */
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS; /*
* If the software step state at the last guest exit
* was Active-pending, we don't set DBG_SPSR_SS so
* that the state is maintained (to not run another
* single-step until the pending Software Step
* exception is taken).
*/
if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
else
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
mdscr |= DBG_MDSCR_SS; mdscr |= DBG_MDSCR_SS;
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
@ -262,6 +285,15 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
* Restore the guest's debug registers if we were using them. * Restore the guest's debug registers if we were using them.
*/ */
if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) { if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
/*
* Mark the vcpu as ACTIVE_PENDING
* until Software Step exception is taken.
*/
vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
}
restore_guest_debug_regs(vcpu); restore_guest_debug_regs(vcpu);
/* /*
@ -295,12 +327,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
* If SPE is present on this CPU and is available at current EL, * If SPE is present on this CPU and is available at current EL,
* we may need to check if the host state needs to be saved. * we may need to check if the host state needs to be saved.
*/ */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) && if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
!(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
/* Check if we have TRBE implemented and available at the host */ /* Check if we have TRBE implemented and available at the host */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) && if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
} }

View File

@ -937,6 +937,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
} else { } else {
/* If not enabled clear all flags */ /* If not enabled clear all flags */
vcpu->guest_debug = 0; vcpu->guest_debug = 0;
vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
} }
out: out:

View File

@ -152,8 +152,14 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
run->debug.arch.hsr_high = upper_32_bits(esr); run->debug.arch.hsr_high = upper_32_bits(esr);
run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID; run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW) switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2; run->debug.arch.far = vcpu->arch.fault.far_el2;
break;
case ESR_ELx_EC_SOFTSTP_LOW:
vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
break;
}
return 0; return 0;
} }

View File

@ -35,9 +35,9 @@
* - Data Independent Timing * - Data Independent Timing
*/ */
#define PVM_ID_AA64PFR0_ALLOW (\ #define PVM_ID_AA64PFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
) )
/* /*
@ -49,11 +49,11 @@
* Supported by KVM * Supported by KVM
*/ */
#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ #define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \
) )
/* /*
@ -62,8 +62,8 @@
* - Speculative Store Bypassing * - Speculative Store Bypassing
*/ */
#define PVM_ID_AA64PFR1_ALLOW (\ #define PVM_ID_AA64PFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \
ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \
) )
/* /*
@ -74,10 +74,10 @@
* - Non-context synchronizing exception entry and exit * - Non-context synchronizing exception entry and exit
*/ */
#define PVM_ID_AA64MMFR0_ALLOW (\ #define PVM_ID_AA64MMFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
) )
/* /*
@ -86,8 +86,8 @@
* - 16-bit ASID * - 16-bit ASID
*/ */
#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
) )
/* /*
@ -100,12 +100,12 @@
* - Enhanced Translation Synchronization * - Enhanced Translation Synchronization
*/ */
#define PVM_ID_AA64MMFR1_ALLOW (\ #define PVM_ID_AA64MMFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) \
) )
/* /*
@ -120,14 +120,14 @@
* - E0PDx mechanism * - E0PDx mechanism
*/ */
#define PVM_ID_AA64MMFR2_ALLOW (\ #define PVM_ID_AA64MMFR2_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \
) )
/* /*

View File

@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0; u64 cptr_set = 0;
/* Protected KVM does not support AArch32 guests. */ /* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
/* /*
* Linux guests assume support for floating-point and Advanced SIMD. Do * Linux guests assume support for floating-point and Advanced SIMD. Do
* not change the trapping behavior for these from the KVM default. * not change the trapping behavior for these from the KVM default.
*/ */
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
PVM_ID_AA64PFR0_ALLOW)); PVM_ID_AA64PFR0_ALLOW));
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
PVM_ID_AA64PFR0_ALLOW)); PVM_ID_AA64PFR0_ALLOW));
/* Trap RAS unless all current versions are supported */ /* Trap RAS unless all current versions are supported */
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) < if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
ID_AA64PFR0_RAS_V1P1) { ID_AA64PFR0_EL1_RAS_V1P1) {
hcr_set |= HCR_TERR | HCR_TEA; hcr_set |= HCR_TERR | HCR_TEA;
hcr_clear |= HCR_FIEN; hcr_clear |= HCR_FIEN;
} }
/* Trap AMU */ /* Trap AMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
hcr_clear |= HCR_AMVOFFEN; hcr_clear |= HCR_AMVOFFEN;
cptr_set |= CPTR_EL2_TAM; cptr_set |= CPTR_EL2_TAM;
} }
/* Trap SVE */ /* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
cptr_set |= CPTR_EL2_TZ; cptr_set |= CPTR_EL2_TZ;
vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 |= hcr_set;
@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
u64 hcr_clear = 0; u64 hcr_clear = 0;
/* Memory Tagging: Trap and Treat as Untagged if not supported. */ /* Memory Tagging: Trap and Treat as Untagged if not supported. */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
hcr_set |= HCR_TID5; hcr_set |= HCR_TID5;
hcr_clear |= HCR_DCT | HCR_ATA; hcr_clear |= HCR_DCT | HCR_ATA;
} }
@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0; u64 cptr_set = 0;
/* Trap/constrain PMU */ /* Trap/constrain PMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
MDCR_EL2_HPMN_MASK; MDCR_EL2_HPMN_MASK;
} }
/* Trap Debug */ /* Trap Debug */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
/* Trap OS Double Lock */ /* Trap OS Double Lock */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
mdcr_set |= MDCR_EL2_TDOSA; mdcr_set |= MDCR_EL2_TDOSA;
/* Trap SPE */ /* Trap SPE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPMS; mdcr_set |= MDCR_EL2_TPMS;
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
} }
/* Trap Trace Filter */ /* Trap Trace Filter */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF; mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */ /* Trap Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
cptr_set |= CPTR_EL2_TTA; cptr_set |= CPTR_EL2_TTA;
vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 |= mdcr_set;
@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
u64 mdcr_set = 0; u64 mdcr_set = 0;
/* Trap Debug Communications Channel registers */ /* Trap Debug Communications Channel registers */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
mdcr_set |= MDCR_EL2_TDCC; mdcr_set |= MDCR_EL2_TDCC;
vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 |= mdcr_set;
@ -143,7 +143,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
u64 hcr_set = 0; u64 hcr_set = 0;
/* Trap LOR */ /* Trap LOR */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
hcr_set |= HCR_TLOR; hcr_set |= HCR_TLOR;
vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 |= hcr_set;

View File

@ -143,7 +143,7 @@ static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
} }
} }
/* Restore VGICv3 state on non_VEH systems */ /* Restore VGICv3 state on non-VHE systems */
static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{ {
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {

View File

@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
/* Spectre and Meltdown mitigation in KVM */ /* Spectre and Meltdown mitigation in KVM */
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
(u64)kvm->arch.pfr0_csv2); (u64)kvm->arch.pfr0_csv2);
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
(u64)kvm->arch.pfr0_csv3); (u64)kvm->arch.pfr0_csv3);
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
if (!kvm_has_mte(kvm)) if (!kvm_has_mte(kvm))
allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
return id_aa64pfr1_el1_sys_val & allow_mask; return id_aa64pfr1_el1_sys_val & allow_mask;
} }
@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
* No support for AArch32 guests, therefore, pKVM has no sanitized copy * No support for AArch32 guests, therefore, pKVM has no sanitized copy
* of AArch32 feature id registers. * of AArch32 feature id registers.
*/ */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
return pvm_access_raz_wi(vcpu, p, r); return pvm_access_raz_wi(vcpu, p, r);
} }

View File

@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data {
static bool kvm_phys_is_valid(u64 phys) static bool kvm_phys_is_valid(u64 phys)
{ {
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
} }
static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)

View File

@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
pmuver = kvm->arch.arm_pmu->pmuver; pmuver = kvm->arch.arm_pmu->pmuver;
switch (pmuver) { switch (pmuver) {
case ID_AA64DFR0_PMUVER_8_0: case ID_AA64DFR0_EL1_PMUVer_IMP:
return GENMASK(9, 0); return GENMASK(9, 0);
case ID_AA64DFR0_PMUVER_8_1: case ID_AA64DFR0_EL1_PMUVer_V3P1:
case ID_AA64DFR0_PMUVER_8_4: case ID_AA64DFR0_EL1_PMUVer_V3P4:
case ID_AA64DFR0_PMUVER_8_5: case ID_AA64DFR0_EL1_PMUVer_V3P5:
case ID_AA64DFR0_PMUVER_8_7: case ID_AA64DFR0_EL1_PMUVer_V3P7:
return GENMASK(15, 0); return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */ default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{ {
struct arm_pmu_entry *entry; struct arm_pmu_entry *entry;
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
return; return;
mutex_lock(&arm_pmus_lock); mutex_lock(&arm_pmus_lock);
@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
if (event->pmu) { if (event->pmu) {
pmu = to_arm_pmu(event->pmu); pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver == 0 || if (pmu->pmuver == 0 ||
pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL; pmu = NULL;
} }
@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* as RAZ * as RAZ
*/ */
if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4) if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32; base = 32;
} }

View File

@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
parange = cpuid_feature_extract_unsigned_field(mmfr0, parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT); ID_AA64MMFR0_EL1_PARANGE_SHIFT);
/* /*
* IPA size beyond 48 bits could not be supported * IPA size beyond 48 bits could not be supported
* on either 4K or 16K page size. Hence let's cap * on either 4K or 16K page size. Hence let's cap
@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void)
* on the system. * on the system.
*/ */
if (PAGE_SIZE != SZ_64K) if (PAGE_SIZE != SZ_64K)
parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48); parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
/* /*
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
* Stage-2. If not, things will stop very quickly. * Stage-2. If not, things will stop very quickly.
*/ */
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) { switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL; return -EINVAL;
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break; break;
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break; break;
default: default:

View File

@ -273,7 +273,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r); u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
return false; return false;
} }
@ -1063,13 +1063,12 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
} }
/* Read a sanitised cpufeature ID register by sys_reg_desc */ /* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu, static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
struct sys_reg_desc const *r, bool raz)
{ {
u32 id = reg_to_encoding(r); u32 id = reg_to_encoding(r);
u64 val; u64 val;
if (raz) if (sysreg_visible_as_raz(vcpu, r))
return 0; return 0;
val = read_sanitised_ftr_reg(id); val = read_sanitised_ftr_reg(id);
@ -1077,22 +1076,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
switch (id) { switch (id) {
case SYS_ID_AA64PFR0_EL1: case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu)) if (!vcpu_has_sve(vcpu))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
if (kvm_vgic_global_state.type == VGIC_V3) { if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
} }
break; break;
case SYS_ID_AA64PFR1_EL1: case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm)) if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
break; break;
case SYS_ID_AA64ISAR1_EL1: case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu)) if (!vcpu_has_ptrauth(vcpu))
@ -1110,14 +1109,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break; break;
case SYS_ID_AA64DFR0_EL1: case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */ /* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
/* Limit guests to PMUv3 for ARMv8.4 */ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val, val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT, ID_AA64DFR0_EL1_PMUVer_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
/* Hide SPE from guests */ /* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break; break;
case SYS_ID_DFR0_EL1: case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */ /* Limit guests to PMUv3 for ARMv8.4 */
@ -1145,34 +1144,37 @@ static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
return 0; return 0;
} }
/* cpufeature ID register access trap handlers */ static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
static bool __access_id_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r,
bool raz)
{ {
if (p->is_write) /*
return write_to_read_only(vcpu, p, r); * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
* EL. Promote to RAZ/WI in order to guarantee consistency between
* systems.
*/
if (!kvm_supports_32bit_el0())
return REG_RAZ | REG_USER_WI;
p->regval = read_id_reg(vcpu, r, raz); return id_visibility(vcpu, r);
return true;
} }
static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
return REG_RAZ;
}
/* cpufeature ID register access trap handlers */
static bool access_id_reg(struct kvm_vcpu *vcpu, static bool access_id_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
bool raz = sysreg_visible_as_raz(vcpu, r); if (p->is_write)
return write_to_read_only(vcpu, p, r);
return __access_id_reg(vcpu, p, r, raz); p->regval = read_id_reg(vcpu, r);
} return true;
static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
return __access_id_reg(vcpu, p, r, true);
} }
/* Visibility overrides for SVE-specific control registers */ /* Visibility overrides for SVE-specific control registers */
@ -1196,21 +1198,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
* it doesn't promise more than what is actually provided (the * it doesn't promise more than what is actually provided (the
* guest could otherwise be covered in ectoplasmic residue). * guest could otherwise be covered in ectoplasmic residue).
*/ */
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT); csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
if (csv2 > 1 || if (csv2 > 1 ||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
return -EINVAL; return -EINVAL;
/* Same thing for CSV3 */ /* Same thing for CSV3 */
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT); csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
if (csv3 > 1 || if (csv3 > 1 ||
(csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED)) (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
return -EINVAL; return -EINVAL;
/* We can only differ with CSV[23], and anything else is an error */ /* We can only differ with CSV[23], and anything else is an error */
val ^= read_id_reg(vcpu, rd, false); val ^= read_id_reg(vcpu, rd);
val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) | val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
(0xFUL << ID_AA64PFR0_CSV3_SHIFT)); ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
if (val) if (val)
return -EINVAL; return -EINVAL;
@ -1227,45 +1229,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
* are stored, and for set_id_reg() we don't allow the effective value * are stored, and for set_id_reg() we don't allow the effective value
* to be changed. * to be changed.
*/ */
static int __get_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 *val,
bool raz)
{
*val = read_id_reg(vcpu, rd, raz);
return 0;
}
static int __set_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 val,
bool raz)
{
/* This is what we mean by invariant: you can't change it. */
if (val != read_id_reg(vcpu, rd, raz))
return -EINVAL;
return 0;
}
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 *val) u64 *val)
{ {
bool raz = sysreg_visible_as_raz(vcpu, rd); *val = read_id_reg(vcpu, rd);
return 0;
return __get_id_reg(vcpu, rd, val, raz);
} }
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 val) u64 val)
{ {
bool raz = sysreg_visible_as_raz(vcpu, rd); /* This is what we mean by invariant: you can't change it. */
if (val != read_id_reg(vcpu, rd))
return -EINVAL;
return __set_id_reg(vcpu, rd, val, raz); return 0;
}
static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 val)
{
return __set_id_reg(vcpu, rd, val, true);
} }
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
@ -1367,6 +1345,15 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
.visibility = id_visibility, \ .visibility = id_visibility, \
} }
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define AA32_ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
.access = access_id_reg, \
.get_user = get_id_reg, \
.set_user = set_id_reg, \
.visibility = aa32_id_visibility, \
}
/* /*
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
@ -1374,9 +1361,10 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
*/ */
#define ID_UNALLOCATED(crm, op2) { \ #define ID_UNALLOCATED(crm, op2) { \
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
.access = access_raz_id_reg, \ .access = access_id_reg, \
.get_user = get_raz_reg, \ .get_user = get_id_reg, \
.set_user = set_raz_id_reg, \ .set_user = set_id_reg, \
.visibility = raz_visibility \
} }
/* /*
@ -1386,9 +1374,10 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
*/ */
#define ID_HIDDEN(name) { \ #define ID_HIDDEN(name) { \
SYS_DESC(SYS_##name), \ SYS_DESC(SYS_##name), \
.access = access_raz_id_reg, \ .access = access_id_reg, \
.get_user = get_raz_reg, \ .get_user = get_id_reg, \
.set_user = set_raz_id_reg, \ .set_user = set_id_reg, \
.visibility = raz_visibility, \
} }
/* /*
@ -1452,33 +1441,33 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 mappings of the AArch32 ID registers */ /* AArch64 mappings of the AArch32 ID registers */
/* CRm=1 */ /* CRm=1 */
ID_SANITISED(ID_PFR0_EL1), AA32_ID_SANITISED(ID_PFR0_EL1),
ID_SANITISED(ID_PFR1_EL1), AA32_ID_SANITISED(ID_PFR1_EL1),
ID_SANITISED(ID_DFR0_EL1), AA32_ID_SANITISED(ID_DFR0_EL1),
ID_HIDDEN(ID_AFR0_EL1), ID_HIDDEN(ID_AFR0_EL1),
ID_SANITISED(ID_MMFR0_EL1), AA32_ID_SANITISED(ID_MMFR0_EL1),
ID_SANITISED(ID_MMFR1_EL1), AA32_ID_SANITISED(ID_MMFR1_EL1),
ID_SANITISED(ID_MMFR2_EL1), AA32_ID_SANITISED(ID_MMFR2_EL1),
ID_SANITISED(ID_MMFR3_EL1), AA32_ID_SANITISED(ID_MMFR3_EL1),
/* CRm=2 */ /* CRm=2 */
ID_SANITISED(ID_ISAR0_EL1), AA32_ID_SANITISED(ID_ISAR0_EL1),
ID_SANITISED(ID_ISAR1_EL1), AA32_ID_SANITISED(ID_ISAR1_EL1),
ID_SANITISED(ID_ISAR2_EL1), AA32_ID_SANITISED(ID_ISAR2_EL1),
ID_SANITISED(ID_ISAR3_EL1), AA32_ID_SANITISED(ID_ISAR3_EL1),
ID_SANITISED(ID_ISAR4_EL1), AA32_ID_SANITISED(ID_ISAR4_EL1),
ID_SANITISED(ID_ISAR5_EL1), AA32_ID_SANITISED(ID_ISAR5_EL1),
ID_SANITISED(ID_MMFR4_EL1), AA32_ID_SANITISED(ID_MMFR4_EL1),
ID_SANITISED(ID_ISAR6_EL1), AA32_ID_SANITISED(ID_ISAR6_EL1),
/* CRm=3 */ /* CRm=3 */
ID_SANITISED(MVFR0_EL1), AA32_ID_SANITISED(MVFR0_EL1),
ID_SANITISED(MVFR1_EL1), AA32_ID_SANITISED(MVFR1_EL1),
ID_SANITISED(MVFR2_EL1), AA32_ID_SANITISED(MVFR2_EL1),
ID_UNALLOCATED(3,3), ID_UNALLOCATED(3,3),
ID_SANITISED(ID_PFR2_EL1), AA32_ID_SANITISED(ID_PFR2_EL1),
ID_HIDDEN(ID_DFR1_EL1), ID_HIDDEN(ID_DFR1_EL1),
ID_SANITISED(ID_MMFR5_EL1), AA32_ID_SANITISED(ID_MMFR5_EL1),
ID_UNALLOCATED(3,7), ID_UNALLOCATED(3,7),
/* AArch64 ID registers */ /* AArch64 ID registers */
@ -1825,11 +1814,11 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
} else { } else {
u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
| (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
return true; return true;
} }
@ -2809,6 +2798,9 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
if (!r) if (!r)
return -ENOENT; return -ENOENT;
if (sysreg_user_write_ignore(vcpu, r))
return 0;
if (r->set_user) { if (r->set_user) {
ret = (r->set_user)(vcpu, r, val); ret = (r->set_user)(vcpu, r, val);
} else { } else {

View File

@ -86,6 +86,7 @@ struct sys_reg_desc {
#define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */ #define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */
#define REG_RAZ (1 << 1) /* RAZ from userspace and guest */ #define REG_RAZ (1 << 1) /* RAZ from userspace and guest */
#define REG_USER_WI (1 << 2) /* WI from userspace only */
static __printf(2, 3) static __printf(2, 3)
inline void print_sys_reg_msg(const struct sys_reg_params *p, inline void print_sys_reg_msg(const struct sys_reg_params *p,
@ -136,22 +137,31 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
__vcpu_sys_reg(vcpu, r->reg) = r->val; __vcpu_sys_reg(vcpu, r->reg) = r->val;
} }
static inline unsigned int sysreg_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (likely(!r->visibility))
return 0;
return r->visibility(vcpu, r);
}
static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu, static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
if (likely(!r->visibility)) return sysreg_visibility(vcpu, r) & REG_HIDDEN;
return false;
return r->visibility(vcpu, r) & REG_HIDDEN;
} }
static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu, static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
if (likely(!r->visibility)) return sysreg_visibility(vcpu, r) & REG_RAZ;
return false; }
return r->visibility(vcpu, r) & REG_RAZ; static inline bool sysreg_user_write_ignore(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
return sysreg_visibility(vcpu, r) & REG_USER_WI;
} }
static inline int cmp_sys_reg(const struct sys_reg_desc *i1, static inline int cmp_sys_reg(const struct sys_reg_desc *i1,

View File

@ -406,7 +406,7 @@ static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
struct its_ite *ite; struct its_ite *ite;
for_each_lpi_its(device, ite, its) { for_each_lpi_its(device, ite, its) {
if (!ite->collection || coll != ite->collection) if (ite->collection != coll)
continue; continue;
update_affinity_ite(kvm, ite); update_affinity_ite(kvm, ite);

View File

@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void)
{ {
u32 asid; u32 asid;
int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
ID_AA64MMFR0_ASID_SHIFT); ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
switch (fld) { switch (fld) {
default: default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld); smp_processor_id(), fld);
fallthrough; fallthrough;
case ID_AA64MMFR0_ASID_8: case ID_AA64MMFR0_EL1_ASIDBITS_8:
asid = 8; asid = 8;
break; break;
case ID_AA64MMFR0_ASID_16: case ID_AA64MMFR0_EL1_ASIDBITS_16:
asid = 16; asid = 16;
} }

View File

@ -360,7 +360,7 @@ void __init arm64_memblock_init(void)
extern u16 memstart_offset_seed; extern u16 memstart_offset_seed;
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field( int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size - s64 range = linear_region_size -
BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); BIT(id_aa64mmfr0_parange_to_phys_shift(parange));

View File

@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void)
pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
return cpuid_feature_extract_unsigned_field(pfr1, return cpuid_feature_extract_unsigned_field(pfr1,
ID_AA64PFR1_BT_SHIFT); ID_AA64PFR1_EL1_BT_SHIFT);
} }
/* /*

View File

@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup)
* (ID_AA64PFR1_EL1[11:8] > 1). * (ID_AA64PFR1_EL1[11:8] > 1).
*/ */
mrs x10, ID_AA64PFR1_EL1 mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_MTE cmp x10, #ID_AA64PFR1_EL1_MTE_MTE2
b.lt 1f b.lt 1f
/* Normal Tagged memory type at the corresponding MAIR index */ /* Normal Tagged memory type at the corresponding MAIR index */

View File

@ -46,6 +46,127 @@
# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration # feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
# item ACCDATA) though it may be more taseful to do something else. # item ACCDATA) though it may be more taseful to do something else.
Sysreg ID_AA64PFR0_EL1 3 0 0 4 0
Enum 63:60 CSV3
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 CSV2
0b0000 NI
0b0001 IMP
0b0010 CSV2_2
0b0011 CSV2_3
EndEnum
Enum 55:52 RME
0b0000 NI
0b0001 IMP
EndEnum
Enum 51:48 DIT
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 AMU
0b0000 NI
0b0001 IMP
0b0010 V1P1
EndEnum
Enum 43:40 MPAM
0b0000 0
0b0001 1
EndEnum
Enum 39:36 SEL2
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 SVE
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 RAS
0b0000 NI
0b0001 IMP
0b0010 V1P1
EndEnum
Enum 27:24 GIC
0b0000 NI
0b0001 IMP
0b0010 V4P1
EndEnum
Enum 23:20 AdvSIMD
0b0000 IMP
0b0001 FP16
0b1111 NI
EndEnum
Enum 19:16 FP
0b0000 IMP
0b0001 FP16
0b1111 NI
EndEnum
Enum 15:12 EL3
0b0000 NI
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 11:8 EL2
0b0000 NI
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 7:4 EL1
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 3:0 EL0
0b0001 IMP
0b0010 AARCH32
EndEnum
EndSysreg
Sysreg ID_AA64PFR1_EL1 3 0 0 4 1
Res0 63:40
Enum 39:36 NMI
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 CSV2_frac
0b0000 NI
0b0001 CSV2_1p1
0b0010 CSV2_1p2
EndEnum
Enum 31:28 RNDR_trap
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 SME
0b0000 NI
0b0001 IMP
EndEnum
Res0 23:20
Enum 19:16 MPAM_frac
0b0000 MINOR_0
0b0001 MINOR_1
EndEnum
Enum 15:12 RAS_frac
0b0000 NI
0b0001 RASv1p1
EndEnum
Enum 11:8 MTE
0b0000 NI
0b0001 IMP
0b0010 MTE2
0b0011 MTE3
EndEnum
Enum 7:4 SSBS
0b0000 NI
0b0001 IMP
0b0010 SSBS2
EndEnum
Enum 3:0 BT
0b0000 NI
0b0001 IMP
EndEnum
EndSysreg
Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4 Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4
Res0 63:60 Res0 63:60
Enum 59:56 F64MM Enum 59:56 F64MM
@ -98,7 +219,9 @@ Enum 63 FA64
0b1 IMP 0b1 IMP
EndEnum EndEnum
Res0 62:60 Res0 62:60
Field 59:56 SMEver Enum 59:56 SMEver
0b0000 IMP
EndEnum
Enum 55:52 I16I64 Enum 55:52 I16I64
0b0000 NI 0b0000 NI
0b1111 IMP 0b1111 IMP
@ -129,6 +252,89 @@ EndEnum
Res0 31:0 Res0 31:0
EndSysreg EndSysreg
Sysreg ID_AA64DFR0_EL1 3 0 0 5 0
Enum 63:60 HPMN0
0b0000 UNPREDICTABLE
0b0001 DEF
EndEnum
Res0 59:56
Enum 55:52 BRBE
0b0000 NI
0b0001 IMP
0b0010 BRBE_V1P1
EndEnum
Enum 51:48 MTPMU
0b0000 NI_IMPDEF
0b0001 IMP
0b1111 NI
EndEnum
Enum 47:44 TraceBuffer
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 TraceFilt
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 DoubleLock
0b0000 IMP
0b1111 NI
EndEnum
Enum 35:32 PMSVer
0b0000 NI
0b0001 IMP
0b0010 V1P1
0b0011 V1P2
0b0100 V1P3
EndEnum
Field 31:28 CTX_CMPs
Res0 27:24
Field 23:20 WRPs
Res0 19:16
Field 15:12 BRPs
Enum 11:8 PMUVer
0b0000 NI
0b0001 IMP
0b0100 V3P1
0b0101 V3P4
0b0110 V3P5
0b0111 V3P7
0b1000 V3P8
0b1111 IMP_DEF
EndEnum
Enum 7:4 TraceVer
0b0000 NI
0b0001 IMP
EndEnum
Enum 3:0 DebugVer
0b0110 IMP
0b0111 VHE
0b1000 V8P2
0b1001 V8P4
0b1010 V8P8
EndEnum
EndSysreg
Sysreg ID_AA64DFR1_EL1 3 0 0 5 1
Res0 63:0
EndSysreg
Sysreg ID_AA64AFR0_EL1 3 0 0 5 4
Res0 63:32
Field 31:28 IMPDEF7
Field 27:24 IMPDEF6
Field 23:20 IMPDEF5
Field 19:16 IMPDEF4
Field 15:12 IMPDEF3
Field 11:8 IMPDEF2
Field 7:4 IMPDEF1
Field 3:0 IMPDEF0
EndSysreg
Sysreg ID_AA64AFR1_EL1 3 0 0 5 5
Res0 63:0
EndSysreg
Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0
Enum 63:60 RNDR Enum 63:60 RNDR
0b0000 NI 0b0000 NI
@ -313,6 +519,217 @@ Enum 3:0 WFxT
EndEnum EndEnum
EndSysreg EndSysreg
Sysreg ID_AA64MMFR0_EL1 3 0 0 7 0
Enum 63:60 ECV
0b0000 NI
0b0001 IMP
0b0010 CNTPOFF
EndEnum
Enum 59:56 FGT
0b0000 NI
0b0001 IMP
EndEnum
Res0 55:48
Enum 47:44 EXS
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 TGRAN4_2
0b0000 TGRAN4
0b0001 NI
0b0010 IMP
0b0011 52_BIT
EndEnum
Enum 39:36 TGRAN64_2
0b0000 TGRAN64
0b0001 NI
0b0010 IMP
EndEnum
Enum 35:32 TGRAN16_2
0b0000 TGRAN16
0b0001 NI
0b0010 IMP
0b0011 52_BIT
EndEnum
Enum 31:28 TGRAN4
0b0000 IMP
0b0001 52_BIT
0b1111 NI
EndEnum
Enum 27:24 TGRAN64
0b0000 IMP
0b1111 NI
EndEnum
Enum 23:20 TGRAN16
0b0000 NI
0b0001 IMP
0b0010 52_BIT
EndEnum
Enum 19:16 BIGENDEL0
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 SNSMEM
0b0000 NI
0b0001 IMP
EndEnum
Enum 11:8 BIGEND
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 ASIDBITS
0b0000 8
0b0010 16
EndEnum
Enum 3:0 PARANGE
0b0000 32
0b0001 36
0b0010 40
0b0011 42
0b0100 44
0b0101 48
0b0110 52
EndEnum
EndSysreg
Sysreg ID_AA64MMFR1_EL1 3 0 0 7 1
Enum 63:60 ECBHB
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 CMOW
0b0000 NI
0b0001 IMP
EndEnum
Enum 55:52 TIDCP1
0b0000 NI
0b0001 IMP
EndEnum
Enum 51:48 nTLBPA
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 AFP
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 HCX
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 ETS
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 TWED
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 XNX
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 SpecSEI
0b0000 NI
0b0001 IMP
EndEnum
Enum 23:20 PAN
0b0000 NI
0b0001 IMP
0b0010 PAN2
0b0011 PAN3
EndEnum
Enum 19:16 LO
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 HPDS
0b0000 NI
0b0001 IMP
0b0010 HPDS2
EndEnum
Enum 11:8 VH
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 VMIDBits
0b0000 8
0b0010 16
EndEnum
Enum 3:0 HAFDBS
0b0000 NI
0b0001 AF
0b0010 DBM
EndEnum
EndSysreg
Sysreg ID_AA64MMFR2_EL1 3 0 0 7 2
Enum 63:60 E0PD
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 EVT
0b0000 NI
0b0001 IMP
0b0010 TTLBxS
EndEnum
Enum 55:52 BBM
0b0000 0
0b0001 1
0b0010 2
EndEnum
Enum 51:48 TTL
0b0000 NI
0b0001 IMP
EndEnum
Res0 47:44
Enum 43:40 FWB
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 IDS
0b0000 0x0
0b0001 0x18
EndEnum
Enum 35:32 AT
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 ST
0b0000 39
0b0001 48_47
EndEnum
Enum 27:24 NV
0b0000 NI
0b0001 IMP
0b0010 NV2
EndEnum
Enum 23:20 CCIDX
0b0000 32
0b0001 64
EndEnum
Enum 19:16 VARange
0b0000 48
0b0001 52
EndEnum
Enum 15:12 IESB
0b0000 NI
0b0001 IMP
EndEnum
Enum 11:8 LSM
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 UAO
0b0000 NI
0b0001 IMP
EndEnum
Enum 3:0 CnP
0b0000 NI
0b0001 IMP
EndEnum
EndSysreg
Sysreg SCTLR_EL1 3 0 1 0 0 Sysreg SCTLR_EL1 3 0 1 0 0
Field 63 TIDCP Field 63 TIDCP
Field 62 SPINMASK Field 62 SPINMASK
@ -427,6 +844,12 @@ Sysreg SMCR_EL1 3 0 1 2 6
Fields SMCR_ELx Fields SMCR_ELx
EndSysreg EndSysreg
Sysreg ALLINT 3 0 4 3 0
Res0 63:14
Field 13 ALLINT
Res0 12:0
EndSysreg
Sysreg FAR_EL1 3 0 6 0 0 Sysreg FAR_EL1 3 0 6 0 0
Field 63:0 ADDR Field 63:0 ADDR
EndSysreg EndSysreg
@ -440,6 +863,14 @@ Sysreg CONTEXTIDR_EL1 3 0 13 0 1
Fields CONTEXTIDR_ELx Fields CONTEXTIDR_ELx
EndSysreg EndSysreg
Sysreg TPIDR_EL1 3 0 13 0 4
Field 63:0 ThreadID
EndSysreg
Sysreg SCXTNUM_EL1 3 0 13 0 7
Field 63:0 SoftwareContextNumber
EndSysreg
Sysreg CLIDR_EL1 3 1 0 0 1 Sysreg CLIDR_EL1 3 1 0 0 1
Res0 63:47 Res0 63:47
Field 46:33 Ttypen Field 46:33 Ttypen
@ -514,6 +945,22 @@ Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx Fields ZCR_ELx
EndSysreg EndSysreg
Sysreg HCRX_EL2 3 4 1 2 2
Res0 63:12
Field 11 MSCEn
Field 10 MCE2
Field 9 CMOW
Field 8 VFNMI
Field 7 VINMI
Field 6 TALLINT
Field 5 SMPME
Field 4 FGTnXS
Field 3 FnXS
Field 2 EnASR
Field 1 EnALS
Field 0 EnAS0
EndSysreg
Sysreg SMPRIMAP_EL2 3 4 1 2 5 Sysreg SMPRIMAP_EL2 3 4 1 2 5
Field 63:60 P15 Field 63:60 P15
Field 59:56 P14 Field 59:56 P14

View File

@ -28,7 +28,8 @@ config KVM
select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQCHIP
select HAVE_KVM_PFNCACHE select HAVE_KVM_PFNCACHE
select HAVE_KVM_IRQFD select HAVE_KVM_IRQFD
select HAVE_KVM_DIRTY_RING select HAVE_KVM_DIRTY_RING_TSO
select HAVE_KVM_DIRTY_RING_ACQ_REL
select IRQ_BYPASS_MANAGER select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_IRQ_ROUTING

View File

@ -23,8 +23,8 @@ efi_status_t check_platform_features(void)
if (IS_ENABLED(CONFIG_ARM64_4K_PAGES)) if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return EFI_SUCCESS; return EFI_SUCCESS;
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
efi_err("This 64 KB granular kernel is not supported by your CPU\n"); efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else else

View File

@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
} }
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par); tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
cd->ttbr = virt_to_phys(mm->pgd); cd->ttbr = virt_to_phys(mm->pgd);
@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
* addresses larger than what we support. * addresses larger than what we support.
*/ */
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
oas = id_aa64mmfr0_parange_to_phys_shift(fld); oas = id_aa64mmfr0_parange_to_phys_shift(fld);
if (smmu->oas < oas) if (smmu->oas < oas)
return false; return false;
/* We can support bigger ASIDs than the CPU, but not smaller */ /* We can support bigger ASIDs than the CPU, but not smaller */
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
asid_bits = fld ? 16 : 8; asid_bits = fld ? 16 : 8;
if (smmu->asid_bits < asid_bits) if (smmu->asid_bits < asid_bits)
return false; return false;

View File

@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void)
{ {
unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
return fld >= 0x3; return fld >= 0x3;
} }

View File

@ -1177,6 +1177,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220 #define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
#define KVM_CAP_S390_ZPCI_OP 221 #define KVM_CAP_S390_ZPCI_OP 221
#define KVM_CAP_S390_CPU_TOPOLOGY 222 #define KVM_CAP_S390_CPU_TOPOLOGY 222
#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING

View File

@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
/aarch64/aarch32_id_regs
/aarch64/arch_timer /aarch64/arch_timer
/aarch64/debug-exceptions /aarch64/debug-exceptions
/aarch64/get-reg-list /aarch64/get-reg-list

View File

@ -147,6 +147,7 @@ TEST_GEN_PROGS_x86_64 += system_counter_offset_test
# Compiled outputs used by test targets # Compiled outputs used by test targets
TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list

View File

@ -0,0 +1,169 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aarch32_id_regs - Test for ID register behavior on AArch64-only systems
*
* Copyright (c) 2022 Google LLC.
*
* Test that KVM handles the AArch64 views of the AArch32 ID registers as RAZ
* and WI from userspace.
*/
#include <stdint.h>
#include "kvm_util.h"
#include "processor.h"
#include "test_util.h"
#define BAD_ID_REG_VAL 0x1badc0deul
#define GUEST_ASSERT_REG_RAZ(reg) GUEST_ASSERT_EQ(read_sysreg_s(reg), 0)
static void guest_main(void)
{
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_DFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_AFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR2_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR3_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR2_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR3_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR4_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR5_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR4_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR6_EL1);
GUEST_ASSERT_REG_RAZ(SYS_MVFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_MVFR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_MVFR2_EL1);
GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 3));
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR2_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_DFR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR5_EL1);
GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 7));
GUEST_DONE();
}
static void test_guest_raz(struct kvm_vcpu *vcpu)
{
struct ucall uc;
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
break;
default:
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
}
}
static uint64_t raz_wi_reg_ids[] = {
KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1),
KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR1_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR2_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR3_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR1_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR2_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR3_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR4_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR5_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR4_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR6_EL1),
KVM_ARM64_SYS_REG(SYS_MVFR0_EL1),
KVM_ARM64_SYS_REG(SYS_MVFR1_EL1),
KVM_ARM64_SYS_REG(SYS_MVFR2_EL1),
KVM_ARM64_SYS_REG(SYS_ID_PFR2_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR5_EL1),
};
static void test_user_raz_wi(struct kvm_vcpu *vcpu)
{
int i;
for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) {
uint64_t reg_id = raz_wi_reg_ids[i];
uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val);
ASSERT_EQ(val, 0);
/*
* Expect the ioctl to succeed with no effect on the register
* value.
*/
vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
vcpu_get_reg(vcpu, reg_id, &val);
ASSERT_EQ(val, 0);
}
}
static uint64_t raz_invariant_reg_ids[] = {
KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1),
KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)),
KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1),
KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 7)),
};
static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
{
int i, r;
for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) {
uint64_t reg_id = raz_invariant_reg_ids[i];
uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val);
ASSERT_EQ(val, 0);
r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
TEST_ASSERT(r < 0 && errno == EINVAL,
"unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
vcpu_get_reg(vcpu, reg_id, &val);
ASSERT_EQ(val, 0);
}
}
static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
{
uint64_t val, el0;
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
el0 = (val & ARM64_FEATURE_MASK(ID_AA64PFR0_EL0)) >> ID_AA64PFR0_EL0_SHIFT;
return el0 == ID_AA64PFR0_ELx_64BIT_ONLY;
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
TEST_REQUIRE(vcpu_aarch64_only(vcpu));
ucall_init(vm, NULL);
test_user_raz_wi(vcpu);
test_user_raz_invariant(vcpu);
test_guest_raz(vcpu);
ucall_uninit(vm);
kvm_vm_free(vm);
}

View File

@ -22,6 +22,7 @@
#define SPSR_SS (1 << 21) #define SPSR_SS (1 << 21)
extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start; extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start;
extern unsigned char iter_ss_begin, iter_ss_end;
static volatile uint64_t sw_bp_addr, hw_bp_addr; static volatile uint64_t sw_bp_addr, hw_bp_addr;
static volatile uint64_t wp_addr, wp_data_addr; static volatile uint64_t wp_addr, wp_data_addr;
static volatile uint64_t svc_addr; static volatile uint64_t svc_addr;
@ -238,6 +239,46 @@ static void guest_svc_handler(struct ex_regs *regs)
svc_addr = regs->pc; svc_addr = regs->pc;
} }
enum single_step_op {
SINGLE_STEP_ENABLE = 0,
SINGLE_STEP_DISABLE = 1,
};
static void guest_code_ss(int test_cnt)
{
uint64_t i;
uint64_t bvr, wvr, w_bvr, w_wvr;
for (i = 0; i < test_cnt; i++) {
/* Bits [1:0] of dbg{b,w}vr are RES0 */
w_bvr = i << 2;
w_wvr = i << 2;
/* Enable Single Step execution */
GUEST_SYNC(SINGLE_STEP_ENABLE);
/*
* The userspace will veriry that the pc is as expected during
* single step execution between iter_ss_begin and iter_ss_end.
*/
asm volatile("iter_ss_begin:nop\n");
write_sysreg(w_bvr, dbgbvr0_el1);
write_sysreg(w_wvr, dbgwvr0_el1);
bvr = read_sysreg(dbgbvr0_el1);
wvr = read_sysreg(dbgwvr0_el1);
asm volatile("iter_ss_end:\n");
/* Disable Single Step execution */
GUEST_SYNC(SINGLE_STEP_DISABLE);
GUEST_ASSERT(bvr == w_bvr);
GUEST_ASSERT(wvr == w_wvr);
}
GUEST_DONE();
}
static int debug_version(struct kvm_vcpu *vcpu) static int debug_version(struct kvm_vcpu *vcpu)
{ {
uint64_t id_aa64dfr0; uint64_t id_aa64dfr0;
@ -246,7 +287,7 @@ static int debug_version(struct kvm_vcpu *vcpu)
return id_aa64dfr0 & 0xf; return id_aa64dfr0 & 0xf;
} }
int main(int argc, char *argv[]) static void test_guest_debug_exceptions(void)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
@ -259,9 +300,6 @@ int main(int argc, char *argv[])
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu); vcpu_init_descriptor_tables(vcpu);
__TEST_REQUIRE(debug_version(vcpu) >= 6,
"Armv8 debug architecture not supported.");
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_BRK_INS, guest_sw_bp_handler); ESR_EC_BRK_INS, guest_sw_bp_handler);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
@ -294,5 +332,108 @@ int main(int argc, char *argv[])
done: done:
kvm_vm_free(vm); kvm_vm_free(vm);
}
void test_single_step_from_userspace(int test_cnt)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
struct kvm_run *run;
uint64_t pc, cmd;
uint64_t test_pc = 0;
bool ss_enable = false;
struct kvm_guest_debug debug = {};
vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
ucall_init(vm, NULL);
run = vcpu->run;
vcpu_args_set(vcpu, 1, test_cnt);
while (1) {
vcpu_run(vcpu);
if (run->exit_reason != KVM_EXIT_DEBUG) {
cmd = get_ucall(vcpu, &uc);
if (cmd == UCALL_ABORT) {
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
} else if (cmd == UCALL_DONE) {
break;
}
TEST_ASSERT(cmd == UCALL_SYNC,
"Unexpected ucall cmd 0x%lx", cmd);
if (uc.args[1] == SINGLE_STEP_ENABLE) {
debug.control = KVM_GUESTDBG_ENABLE |
KVM_GUESTDBG_SINGLESTEP;
ss_enable = true;
} else {
debug.control = SINGLE_STEP_DISABLE;
ss_enable = false;
}
vcpu_guest_debug_set(vcpu, &debug);
continue;
}
TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
/* Check if the current pc is expected. */
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
TEST_ASSERT(!test_pc || pc == test_pc,
"Unexpected pc 0x%lx (expected 0x%lx)",
pc, test_pc);
/*
* If the current pc is between iter_ss_bgin and
* iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
* be the current pc + 4.
*/
if ((pc >= (uint64_t)&iter_ss_begin) &&
(pc < (uint64_t)&iter_ss_end))
test_pc = pc + 4;
else
test_pc = 0;
}
kvm_vm_free(vm);
}
static void help(char *name)
{
puts("");
printf("Usage: %s [-h] [-i iterations of the single step test]\n", name);
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int opt;
int ss_iteration = 10000;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
__TEST_REQUIRE(debug_version(vcpu) >= 6,
"Armv8 debug architecture not supported.");
kvm_vm_free(vm);
while ((opt = getopt(argc, argv, "i:")) != -1) {
switch (opt) {
case 'i':
ss_iteration = atoi(optarg);
break;
case 'h':
default:
help(argv[0]);
break;
}
}
test_guest_debug_exceptions();
test_single_step_from_userspace(ss_iteration);
return 0; return 0;
} }

View File

@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* psci_cpu_on_test - Test that the observable state of a vCPU targeted by the * psci_test - Tests relating to KVM's PSCI implementation.
* CPU_ON PSCI call matches what the caller requested.
* *
* Copyright (c) 2021 Google LLC. * Copyright (c) 2021 Google LLC.
* *
* This is a regression test for a race between KVM servicing the PSCI call and * This test includes:
* userspace reading the vCPUs registers. * - A regression test for a race between KVM servicing the PSCI CPU_ON call
* and userspace reading the targeted vCPU's registers.
* - A test for KVM's handling of PSCI SYSTEM_SUSPEND and the associated
* KVM_SYSTEM_EVENT_SUSPEND UAPI.
*/ */
#define _GNU_SOURCE #define _GNU_SOURCE

View File

@ -17,6 +17,7 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/barrier.h>
#include "kvm_util.h" #include "kvm_util.h"
#include "test_util.h" #include "test_util.h"
@ -264,7 +265,8 @@ static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
static bool dirty_ring_supported(void) static bool dirty_ring_supported(void)
{ {
return kvm_has_cap(KVM_CAP_DIRTY_LOG_RING); return (kvm_has_cap(KVM_CAP_DIRTY_LOG_RING) ||
kvm_has_cap(KVM_CAP_DIRTY_LOG_RING_ACQ_REL));
} }
static void dirty_ring_create_vm_done(struct kvm_vm *vm) static void dirty_ring_create_vm_done(struct kvm_vm *vm)
@ -279,12 +281,12 @@ static void dirty_ring_create_vm_done(struct kvm_vm *vm)
static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
{ {
return gfn->flags == KVM_DIRTY_GFN_F_DIRTY; return smp_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
} }
static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
{ {
gfn->flags = KVM_DIRTY_GFN_F_RESET; smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
} }
static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,

View File

@ -121,7 +121,10 @@ unsigned int kvm_check_cap(long cap)
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
{ {
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
else
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
vm->dirty_ring_size = ring_size; vm->dirty_ring_size = ring_size;
} }

View File

@ -19,6 +19,20 @@ config HAVE_KVM_IRQ_ROUTING
config HAVE_KVM_DIRTY_RING config HAVE_KVM_DIRTY_RING
bool bool
# Only strongly ordered architectures can select this, as it doesn't
# put any explicit constraint on userspace ordering. They can also
# select the _ACQ_REL version.
config HAVE_KVM_DIRTY_RING_TSO
bool
select HAVE_KVM_DIRTY_RING
depends on X86
# Weakly ordered architectures can only select this, advertising
# to userspace the additional ordering requirements.
config HAVE_KVM_DIRTY_RING_ACQ_REL
bool
select HAVE_KVM_DIRTY_RING
config HAVE_KVM_EVENTFD config HAVE_KVM_EVENTFD
bool bool
select EVENTFD select EVENTFD

View File

@ -74,7 +74,7 @@ int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
{ {
gfn->flags = 0; smp_store_release(&gfn->flags, 0);
} }
static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
@ -84,7 +84,7 @@ static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
{ {
return gfn->flags & KVM_DIRTY_GFN_F_RESET; return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
} }
int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring) int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)

View File

@ -4473,7 +4473,13 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_NR_MEMSLOTS: case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS; return KVM_USER_MEM_SLOTS;
case KVM_CAP_DIRTY_LOG_RING: case KVM_CAP_DIRTY_LOG_RING:
#ifdef CONFIG_HAVE_KVM_DIRTY_RING #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
#else
return 0;
#endif
case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
#ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
#else #else
return 0; return 0;
@ -4578,6 +4584,7 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
return 0; return 0;
} }
case KVM_CAP_DIRTY_LOG_RING: case KVM_CAP_DIRTY_LOG_RING:
case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
default: default:
return kvm_vm_ioctl_enable_cap(kvm, cap); return kvm_vm_ioctl_enable_cap(kvm, cap);