mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
Merge branch 'for-next/cpus_have_const_cap' into for-next/core
* for-next/cpus_have_const_cap: (38 commits) : cpus_have_const_cap() removal arm64: Remove cpus_have_const_cap() arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_REPEAT_TLBI arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_NVIDIA_CARMEL_CNP arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_CAVIUM_23154 arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_2645198 arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1742098 arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1542419 arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_843419 arm64: Avoid cpus_have_const_cap() for ARM64_UNMAP_KERNEL_AT_EL0 arm64: Avoid cpus_have_const_cap() for ARM64_{SVE,SME,SME2,FA64} arm64: Avoid cpus_have_const_cap() for ARM64_SPECTRE_V2 arm64: Avoid cpus_have_const_cap() for ARM64_SSBS arm64: Avoid cpus_have_const_cap() for ARM64_MTE arm64: Avoid cpus_have_const_cap() for ARM64_HAS_TLB_RANGE arm64: Avoid cpus_have_const_cap() for ARM64_HAS_WFXT arm64: Avoid cpus_have_const_cap() for ARM64_HAS_RNG arm64: Avoid cpus_have_const_cap() for ARM64_HAS_EPAN arm64: Avoid cpus_have_const_cap() for ARM64_HAS_PAN arm64: Avoid cpus_have_const_cap() for ARM64_HAS_GIC_PRIO_MASKING arm64: Avoid cpus_have_const_cap() for ARM64_HAS_DIT ...
This commit is contained in:
commit
14dcf78a6c
@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
|
||||
BUG_ON(err);
|
||||
per_cpu(xen_vcpu, cpu) = vcpup;
|
||||
|
||||
if (!xen_kernel_unmapped_at_usr())
|
||||
xen_setup_runstate_info(cpu);
|
||||
|
||||
after_register_vcpu_info:
|
||||
enable_percpu_irq(xen_events_irq, 0);
|
||||
return 0;
|
||||
@ -523,9 +520,6 @@ static int __init xen_guest_init(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!xen_kernel_unmapped_at_usr())
|
||||
xen_time_setup_guest();
|
||||
|
||||
if (xen_initial_domain())
|
||||
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
|
||||
|
||||
@ -535,7 +529,13 @@ static int __init xen_guest_init(void)
|
||||
}
|
||||
early_initcall(xen_guest_init);
|
||||
|
||||
static int __init xen_pm_init(void)
|
||||
static int xen_starting_runstate_cpu(unsigned int cpu)
|
||||
{
|
||||
xen_setup_runstate_info(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init xen_late_init(void)
|
||||
{
|
||||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
@ -548,9 +548,16 @@ static int __init xen_pm_init(void)
|
||||
do_settimeofday64(&ts);
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (xen_kernel_unmapped_at_usr())
|
||||
return 0;
|
||||
|
||||
xen_time_setup_guest();
|
||||
|
||||
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
|
||||
"arm/xen_runstate:starting",
|
||||
xen_starting_runstate_cpu, NULL);
|
||||
}
|
||||
late_initcall(xen_pm_init);
|
||||
late_initcall(xen_late_init);
|
||||
|
||||
|
||||
/* empty stubs */
|
||||
|
@ -6,5 +6,5 @@ generic-y += qspinlock.h
|
||||
generic-y += parport.h
|
||||
generic-y += user.h
|
||||
|
||||
generated-y += cpucaps.h
|
||||
generated-y += cpucap-defs.h
|
||||
generated-y += sysreg-defs.h
|
||||
|
@ -226,8 +226,8 @@ alternative_endif
|
||||
static __always_inline bool
|
||||
alternative_has_cap_likely(const unsigned long cpucap)
|
||||
{
|
||||
compiletime_assert(cpucap < ARM64_NCAPS,
|
||||
"cpucap must be < ARM64_NCAPS");
|
||||
if (!cpucap_is_possible(cpucap))
|
||||
return false;
|
||||
|
||||
asm_volatile_goto(
|
||||
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
|
||||
@ -244,8 +244,8 @@ l_no:
|
||||
static __always_inline bool
|
||||
alternative_has_cap_unlikely(const unsigned long cpucap)
|
||||
{
|
||||
compiletime_assert(cpucap < ARM64_NCAPS,
|
||||
"cpucap must be < ARM64_NCAPS");
|
||||
if (!cpucap_is_possible(cpucap))
|
||||
return false;
|
||||
|
||||
asm_volatile_goto(
|
||||
ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
|
||||
|
@ -79,6 +79,14 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
|
||||
return 0x3ff;
|
||||
}
|
||||
|
||||
static u64 __maybe_unused gic_read_iar(void)
|
||||
{
|
||||
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_CAVIUM_23154))
|
||||
return gic_read_iar_cavium_thunderx();
|
||||
else
|
||||
return gic_read_iar_common();
|
||||
}
|
||||
|
||||
static inline void gic_write_ctlr(u32 val)
|
||||
{
|
||||
write_sysreg_s(val, SYS_ICC_CTLR_EL1);
|
||||
|
@ -63,7 +63,7 @@ static __always_inline bool __cpu_has_rng(void)
|
||||
{
|
||||
if (unlikely(!system_capabilities_finalized() && !preemptible()))
|
||||
return this_cpu_has_cap(ARM64_HAS_RNG);
|
||||
return cpus_have_const_cap(ARM64_HAS_RNG);
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_RNG);
|
||||
}
|
||||
|
||||
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
|
||||
|
@ -132,7 +132,7 @@ void flush_dcache_folio(struct folio *);
|
||||
|
||||
static __always_inline void icache_inval_all_pou(void)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
|
||||
if (alternative_has_cap_unlikely(ARM64_HAS_CACHE_DIC))
|
||||
return;
|
||||
|
||||
asm("ic ialluis");
|
||||
|
67
arch/arm64/include/asm/cpucaps.h
Normal file
67
arch/arm64/include/asm/cpucaps.h
Normal file
@ -0,0 +1,67 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_CPUCAPS_H
|
||||
#define __ASM_CPUCAPS_H
|
||||
|
||||
#include <asm/cpucap-defs.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
/*
|
||||
* Check whether a cpucap is possible at compiletime.
|
||||
*/
|
||||
static __always_inline bool
|
||||
cpucap_is_possible(const unsigned int cap)
|
||||
{
|
||||
compiletime_assert(__builtin_constant_p(cap),
|
||||
"cap must be a constant");
|
||||
compiletime_assert(cap < ARM64_NCAPS,
|
||||
"cap must be < ARM64_NCAPS");
|
||||
|
||||
switch (cap) {
|
||||
case ARM64_HAS_PAN:
|
||||
return IS_ENABLED(CONFIG_ARM64_PAN);
|
||||
case ARM64_HAS_EPAN:
|
||||
return IS_ENABLED(CONFIG_ARM64_EPAN);
|
||||
case ARM64_SVE:
|
||||
return IS_ENABLED(CONFIG_ARM64_SVE);
|
||||
case ARM64_SME:
|
||||
case ARM64_SME2:
|
||||
case ARM64_SME_FA64:
|
||||
return IS_ENABLED(CONFIG_ARM64_SME);
|
||||
case ARM64_HAS_CNP:
|
||||
return IS_ENABLED(CONFIG_ARM64_CNP);
|
||||
case ARM64_HAS_ADDRESS_AUTH:
|
||||
case ARM64_HAS_GENERIC_AUTH:
|
||||
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH);
|
||||
case ARM64_HAS_GIC_PRIO_MASKING:
|
||||
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI);
|
||||
case ARM64_MTE:
|
||||
return IS_ENABLED(CONFIG_ARM64_MTE);
|
||||
case ARM64_BTI:
|
||||
return IS_ENABLED(CONFIG_ARM64_BTI);
|
||||
case ARM64_HAS_TLB_RANGE:
|
||||
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
|
||||
case ARM64_UNMAP_KERNEL_AT_EL0:
|
||||
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
|
||||
case ARM64_WORKAROUND_843419:
|
||||
return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419);
|
||||
case ARM64_WORKAROUND_1742098:
|
||||
return IS_ENABLED(CONFIG_ARM64_ERRATUM_1742098);
|
||||
case ARM64_WORKAROUND_2645198:
|
||||
return IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198);
|
||||
case ARM64_WORKAROUND_2658417:
|
||||
return IS_ENABLED(CONFIG_ARM64_ERRATUM_2658417);
|
||||
case ARM64_WORKAROUND_CAVIUM_23154:
|
||||
return IS_ENABLED(CONFIG_CAVIUM_ERRATUM_23154);
|
||||
case ARM64_WORKAROUND_NVIDIA_CARMEL_CNP:
|
||||
return IS_ENABLED(CONFIG_NVIDIA_CARMEL_CNP_ERRATUM);
|
||||
case ARM64_WORKAROUND_REPEAT_TLBI:
|
||||
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
@ -440,6 +440,11 @@ unsigned long cpu_get_elf_hwcap2(void);
|
||||
#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
|
||||
#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
|
||||
|
||||
static __always_inline bool boot_capabilities_finalized(void)
|
||||
{
|
||||
return alternative_has_cap_likely(ARM64_ALWAYS_BOOT);
|
||||
}
|
||||
|
||||
static __always_inline bool system_capabilities_finalized(void)
|
||||
{
|
||||
return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
|
||||
@ -452,6 +457,8 @@ static __always_inline bool system_capabilities_finalized(void)
|
||||
*/
|
||||
static __always_inline bool cpus_have_cap(unsigned int num)
|
||||
{
|
||||
if (__builtin_constant_p(num) && !cpucap_is_possible(num))
|
||||
return false;
|
||||
if (num >= ARM64_NCAPS)
|
||||
return false;
|
||||
return arch_test_bit(num, system_cpucaps);
|
||||
@ -460,55 +467,37 @@ static __always_inline bool cpus_have_cap(unsigned int num)
|
||||
/*
|
||||
* Test for a capability without a runtime check.
|
||||
*
|
||||
* Before capabilities are finalized, this returns false.
|
||||
* After capabilities are finalized, this is patched to avoid a runtime check.
|
||||
* Before boot capabilities are finalized, this will BUG().
|
||||
* After boot capabilities are finalized, this is patched to avoid a runtime
|
||||
* check.
|
||||
*
|
||||
* @num must be a compile-time constant.
|
||||
*/
|
||||
static __always_inline bool __cpus_have_const_cap(int num)
|
||||
static __always_inline bool cpus_have_final_boot_cap(int num)
|
||||
{
|
||||
if (num >= ARM64_NCAPS)
|
||||
return false;
|
||||
return alternative_has_cap_unlikely(num);
|
||||
if (boot_capabilities_finalized())
|
||||
return alternative_has_cap_unlikely(num);
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Test for a capability without a runtime check.
|
||||
*
|
||||
* Before capabilities are finalized, this will BUG().
|
||||
* After capabilities are finalized, this is patched to avoid a runtime check.
|
||||
* Before system capabilities are finalized, this will BUG().
|
||||
* After system capabilities are finalized, this is patched to avoid a runtime
|
||||
* check.
|
||||
*
|
||||
* @num must be a compile-time constant.
|
||||
*/
|
||||
static __always_inline bool cpus_have_final_cap(int num)
|
||||
{
|
||||
if (system_capabilities_finalized())
|
||||
return __cpus_have_const_cap(num);
|
||||
return alternative_has_cap_unlikely(num);
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Test for a capability, possibly with a runtime check for non-hyp code.
|
||||
*
|
||||
* For hyp code, this behaves the same as cpus_have_final_cap().
|
||||
*
|
||||
* For non-hyp code:
|
||||
* Before capabilities are finalized, this behaves as cpus_have_cap().
|
||||
* After capabilities are finalized, this is patched to avoid a runtime check.
|
||||
*
|
||||
* @num must be a compile-time constant.
|
||||
*/
|
||||
static __always_inline bool cpus_have_const_cap(int num)
|
||||
{
|
||||
if (is_hyp_code())
|
||||
return cpus_have_final_cap(num);
|
||||
else if (system_capabilities_finalized())
|
||||
return __cpus_have_const_cap(num);
|
||||
else
|
||||
return cpus_have_cap(num);
|
||||
}
|
||||
|
||||
static inline int __attribute_const__
|
||||
cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
|
||||
{
|
||||
@ -628,7 +617,9 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
|
||||
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
|
||||
}
|
||||
|
||||
void __init setup_cpu_features(void);
|
||||
void __init setup_system_features(void);
|
||||
void __init setup_user_features(void);
|
||||
|
||||
void check_local_cpu_capabilities(void);
|
||||
|
||||
u64 read_sanitised_ftr_reg(u32 id);
|
||||
@ -737,13 +728,12 @@ static inline bool system_supports_mixed_endian(void)
|
||||
|
||||
static __always_inline bool system_supports_fpsimd(void)
|
||||
{
|
||||
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
|
||||
return alternative_has_cap_likely(ARM64_HAS_FPSIMD);
|
||||
}
|
||||
|
||||
static inline bool system_uses_hw_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_PAN) &&
|
||||
cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
static inline bool system_uses_ttbr0_pan(void)
|
||||
@ -754,26 +744,22 @@ static inline bool system_uses_ttbr0_pan(void)
|
||||
|
||||
static __always_inline bool system_supports_sve(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SVE) &&
|
||||
cpus_have_const_cap(ARM64_SVE);
|
||||
return alternative_has_cap_unlikely(ARM64_SVE);
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_sme(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SME) &&
|
||||
cpus_have_const_cap(ARM64_SME);
|
||||
return alternative_has_cap_unlikely(ARM64_SME);
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_sme2(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SME) &&
|
||||
cpus_have_const_cap(ARM64_SME2);
|
||||
return alternative_has_cap_unlikely(ARM64_SME2);
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_fa64(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SME) &&
|
||||
cpus_have_const_cap(ARM64_SME_FA64);
|
||||
return alternative_has_cap_unlikely(ARM64_SME_FA64);
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_tpidr2(void)
|
||||
@ -783,20 +769,17 @@ static __always_inline bool system_supports_tpidr2(void)
|
||||
|
||||
static __always_inline bool system_supports_cnp(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_CNP) &&
|
||||
cpus_have_const_cap(ARM64_HAS_CNP);
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_CNP);
|
||||
}
|
||||
|
||||
static inline bool system_supports_address_auth(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
|
||||
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
|
||||
return cpus_have_final_boot_cap(ARM64_HAS_ADDRESS_AUTH);
|
||||
}
|
||||
|
||||
static inline bool system_supports_generic_auth(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
|
||||
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_GENERIC_AUTH);
|
||||
}
|
||||
|
||||
static inline bool system_has_full_ptr_auth(void)
|
||||
@ -806,14 +789,12 @@ static inline bool system_has_full_ptr_auth(void)
|
||||
|
||||
static __always_inline bool system_uses_irq_prio_masking(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
|
||||
cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING);
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
|
||||
}
|
||||
|
||||
static inline bool system_supports_mte(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_MTE) &&
|
||||
cpus_have_const_cap(ARM64_MTE);
|
||||
return alternative_has_cap_unlikely(ARM64_MTE);
|
||||
}
|
||||
|
||||
static inline bool system_has_prio_mask_debugging(void)
|
||||
@ -824,13 +805,18 @@ static inline bool system_has_prio_mask_debugging(void)
|
||||
|
||||
static inline bool system_supports_bti(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI);
|
||||
return cpus_have_final_cap(ARM64_BTI);
|
||||
}
|
||||
|
||||
static inline bool system_supports_bti_kernel(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
|
||||
cpus_have_final_boot_cap(ARM64_BTI);
|
||||
}
|
||||
|
||||
static inline bool system_supports_tlb_range(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) &&
|
||||
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
|
||||
}
|
||||
|
||||
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
|
||||
|
@ -32,6 +32,32 @@
|
||||
#define VFP_STATE_SIZE ((32 * 8) + 4)
|
||||
#endif
|
||||
|
||||
static inline unsigned long cpacr_save_enable_kernel_sve(void)
|
||||
{
|
||||
unsigned long old = read_sysreg(cpacr_el1);
|
||||
unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_ZEN_EL1EN;
|
||||
|
||||
write_sysreg(old | set, cpacr_el1);
|
||||
isb();
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline unsigned long cpacr_save_enable_kernel_sme(void)
|
||||
{
|
||||
unsigned long old = read_sysreg(cpacr_el1);
|
||||
unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_SMEN_EL1EN;
|
||||
|
||||
write_sysreg(old | set, cpacr_el1);
|
||||
isb();
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline void cpacr_restore(unsigned long cpacr)
|
||||
{
|
||||
write_sysreg(cpacr, cpacr_el1);
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* When we defined the maximum SVE vector length we defined the ABI so
|
||||
* that the maximum vector length included all the reserved for future
|
||||
@ -123,10 +149,11 @@ extern void sme_save_state(void *state, int zt);
|
||||
extern void sme_load_state(void const *state, int zt);
|
||||
|
||||
struct arm64_cpu_capabilities;
|
||||
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
|
||||
|
||||
extern u64 read_smcr_features(void);
|
||||
|
||||
|
@ -21,12 +21,6 @@
|
||||
* exceptions should be unmasked.
|
||||
*/
|
||||
|
||||
static __always_inline bool __irqflags_uses_pmr(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
|
||||
alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
|
||||
}
|
||||
|
||||
static __always_inline void __daif_local_irq_enable(void)
|
||||
{
|
||||
barrier();
|
||||
@ -49,7 +43,7 @@ static __always_inline void __pmr_local_irq_enable(void)
|
||||
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
if (__irqflags_uses_pmr()) {
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
__pmr_local_irq_enable();
|
||||
} else {
|
||||
__daif_local_irq_enable();
|
||||
@ -77,7 +71,7 @@ static __always_inline void __pmr_local_irq_disable(void)
|
||||
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
if (__irqflags_uses_pmr()) {
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
__pmr_local_irq_disable();
|
||||
} else {
|
||||
__daif_local_irq_disable();
|
||||
@ -99,7 +93,7 @@ static __always_inline unsigned long __pmr_local_save_flags(void)
|
||||
*/
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
if (__irqflags_uses_pmr()) {
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
return __pmr_local_save_flags();
|
||||
} else {
|
||||
return __daif_local_save_flags();
|
||||
@ -118,7 +112,7 @@ static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
|
||||
|
||||
static inline bool arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
if (__irqflags_uses_pmr()) {
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
return __pmr_irqs_disabled_flags(flags);
|
||||
} else {
|
||||
return __daif_irqs_disabled_flags(flags);
|
||||
@ -137,7 +131,7 @@ static __always_inline bool __pmr_irqs_disabled(void)
|
||||
|
||||
static inline bool arch_irqs_disabled(void)
|
||||
{
|
||||
if (__irqflags_uses_pmr()) {
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
return __pmr_irqs_disabled();
|
||||
} else {
|
||||
return __daif_irqs_disabled();
|
||||
@ -169,7 +163,7 @@ static __always_inline unsigned long __pmr_local_irq_save(void)
|
||||
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
if (__irqflags_uses_pmr()) {
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
return __pmr_local_irq_save();
|
||||
} else {
|
||||
return __daif_local_irq_save();
|
||||
@ -196,7 +190,7 @@ static __always_inline void __pmr_local_irq_restore(unsigned long flags)
|
||||
*/
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
if (__irqflags_uses_pmr()) {
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
__pmr_local_irq_restore(flags);
|
||||
} else {
|
||||
__daif_local_irq_restore(flags);
|
||||
|
@ -71,14 +71,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
if (has_vhe() || has_hvhe())
|
||||
vcpu->arch.hcr_el2 |= HCR_E2H;
|
||||
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
|
||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
|
||||
/* route synchronous external abort exceptions to EL2 */
|
||||
vcpu->arch.hcr_el2 |= HCR_TEA;
|
||||
/* trap error record accesses */
|
||||
vcpu->arch.hcr_el2 |= HCR_TERR;
|
||||
}
|
||||
|
||||
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
|
||||
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
|
||||
vcpu->arch.hcr_el2 |= HCR_FWB;
|
||||
} else {
|
||||
/*
|
||||
|
@ -1052,7 +1052,7 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
|
||||
|
||||
static inline bool kvm_system_needs_idmapped_vectors(void)
|
||||
{
|
||||
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
|
||||
return cpus_have_final_cap(ARM64_SPECTRE_V3A);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
|
@ -218,7 +218,7 @@ static inline void __clean_dcache_guest_page(void *va, size_t size)
|
||||
* faulting in pages. Furthermore, FWB implies IDC, so cleaning to
|
||||
* PoU is not required either in this case.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
|
||||
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
||||
return;
|
||||
|
||||
kvm_flush_dcache_to_poc(va, size);
|
||||
|
@ -57,7 +57,7 @@ typedef struct {
|
||||
|
||||
static inline bool arm64_kernel_unmapped_at_el0(void)
|
||||
{
|
||||
return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
|
||||
return alternative_has_cap_unlikely(ARM64_UNMAP_KERNEL_AT_EL0);
|
||||
}
|
||||
|
||||
extern void arm64_memblock_init(void);
|
||||
|
@ -152,7 +152,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
|
||||
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
|
||||
* avoiding the possibility of conflicting TLB entries being allocated.
|
||||
*/
|
||||
static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
|
||||
static inline void __cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap, bool cnp)
|
||||
{
|
||||
typedef void (ttbr_replace_func)(phys_addr_t);
|
||||
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
|
||||
@ -162,17 +162,8 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
|
||||
/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
|
||||
phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
|
||||
|
||||
if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
|
||||
/*
|
||||
* cpu_replace_ttbr1() is used when there's a boot CPU
|
||||
* up (i.e. cpufeature framework is not up yet) and
|
||||
* latter only when we enable CNP via cpufeature's
|
||||
* enable() callback.
|
||||
* Also we rely on the system_cpucaps bit being set before
|
||||
* calling the enable() function.
|
||||
*/
|
||||
if (cnp)
|
||||
ttbr1 |= TTBR_CNP_BIT;
|
||||
}
|
||||
|
||||
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
|
||||
|
||||
@ -189,6 +180,21 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
|
||||
cpu_uninstall_idmap();
|
||||
}
|
||||
|
||||
static inline void cpu_enable_swapper_cnp(void)
|
||||
{
|
||||
__cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir, true);
|
||||
}
|
||||
|
||||
static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
|
||||
{
|
||||
/*
|
||||
* Only for early TTBR1 replacement before cpucaps are finalized and
|
||||
* before we've decided whether to use CNP.
|
||||
*/
|
||||
WARN_ON(system_capabilities_finalized());
|
||||
__cpu_replace_ttbr1(pgdp, idmap, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* It would be nice to return ASIDs back to the allocator, but unfortunately
|
||||
* that introduces a race with a generation rollover where we could erroneously
|
||||
|
@ -44,8 +44,7 @@ struct plt_entry {
|
||||
|
||||
static inline bool is_forbidden_offset_for_adrp(void *place)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
|
||||
return cpus_have_final_cap(ARM64_WORKAROUND_843419) &&
|
||||
((u64)place & 0xfff) >= 0xff8;
|
||||
}
|
||||
|
||||
|
@ -75,11 +75,7 @@ extern bool arm64_use_ng_mappings;
|
||||
* If we have userspace only BTI we don't want to mark kernel pages
|
||||
* guarded even if the system does support BTI.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_BTI_KERNEL
|
||||
#define PTE_MAYBE_GP (system_supports_bti() ? PTE_GP : 0)
|
||||
#else
|
||||
#define PTE_MAYBE_GP 0
|
||||
#endif
|
||||
#define PTE_MAYBE_GP (system_supports_bti_kernel() ? PTE_GP : 0)
|
||||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
|
||||
|
@ -73,7 +73,7 @@ static __always_inline void arm64_apply_bp_hardening(void)
|
||||
{
|
||||
struct bp_hardening_data *d;
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
|
||||
if (!alternative_has_cap_unlikely(ARM64_SPECTRE_V2))
|
||||
return;
|
||||
|
||||
d = this_cpu_ptr(&bp_hardening_data);
|
||||
|
@ -105,7 +105,7 @@ static inline unsigned long get_trans_granule(void)
|
||||
#define __tlbi_level(op, addr, level) do { \
|
||||
u64 arg = addr; \
|
||||
\
|
||||
if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \
|
||||
if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
|
||||
level) { \
|
||||
u64 ttl = level & 3; \
|
||||
ttl |= get_trans_granule() << 2; \
|
||||
@ -284,16 +284,15 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
|
||||
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
|
||||
/*
|
||||
* TLB flush deferral is not required on systems which are affected by
|
||||
* ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
|
||||
* will have two consecutive TLBI instructions with a dsb(ish) in between
|
||||
* defeating the purpose (i.e save overall 'dsb ish' cost).
|
||||
*/
|
||||
if (unlikely(cpus_have_const_cap(ARM64_WORKAROUND_REPEAT_TLBI)))
|
||||
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
|
||||
return false;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
|
||||
static inline const char *
|
||||
arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
|
||||
{
|
||||
if (arm64_kernel_unmapped_at_el0())
|
||||
if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
|
||||
return (char *)(TRAMP_VALIAS + SZ_2K * slot);
|
||||
|
||||
WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
|
||||
|
@ -121,22 +121,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
|
||||
static void __maybe_unused
|
||||
cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
struct arm64_ftr_reg *regp;
|
||||
|
||||
regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
|
||||
if (!regp)
|
||||
return;
|
||||
|
||||
raw_spin_lock(®_user_mask_modification);
|
||||
if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
|
||||
regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
|
||||
raw_spin_unlock(®_user_mask_modification);
|
||||
}
|
||||
|
||||
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
|
||||
.matches = is_affected_midr_range, \
|
||||
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
|
||||
@ -727,7 +711,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
/* Cortex-A510 r0p0 - r1p1 */
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
|
||||
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
|
||||
.cpu_enable = cpu_clear_bf16_from_user_emulation,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
|
||||
|
@ -1026,13 +1026,16 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
|
||||
id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
|
||||
sve_kernel_enable(NULL);
|
||||
unsigned long cpacr = cpacr_save_enable_kernel_sve();
|
||||
|
||||
vec_init_vq_map(ARM64_VEC_SVE);
|
||||
|
||||
cpacr_restore(cpacr);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_SME) &&
|
||||
id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
|
||||
sme_kernel_enable(NULL);
|
||||
unsigned long cpacr = cpacr_save_enable_kernel_sme();
|
||||
|
||||
/*
|
||||
* We mask out SMPS since even if the hardware
|
||||
@ -1041,6 +1044,8 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
||||
*/
|
||||
info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
|
||||
vec_init_vq_map(ARM64_VEC_SME);
|
||||
|
||||
cpacr_restore(cpacr);
|
||||
}
|
||||
|
||||
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
|
||||
@ -1278,14 +1283,17 @@ void update_cpu_features(int cpu,
|
||||
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
|
||||
id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
|
||||
if (!system_capabilities_finalized()) {
|
||||
sve_kernel_enable(NULL);
|
||||
unsigned long cpacr = cpacr_save_enable_kernel_sve();
|
||||
|
||||
vec_update_vq_map(ARM64_VEC_SVE);
|
||||
|
||||
cpacr_restore(cpacr);
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_SME) &&
|
||||
id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
|
||||
sme_kernel_enable(NULL);
|
||||
unsigned long cpacr = cpacr_save_enable_kernel_sme();
|
||||
|
||||
/*
|
||||
* We mask out SMPS since even if the hardware
|
||||
@ -1297,6 +1305,8 @@ void update_cpu_features(int cpu,
|
||||
/* Probe vector lengths */
|
||||
if (!system_capabilities_finalized())
|
||||
vec_update_vq_map(ARM64_VEC_SME);
|
||||
|
||||
cpacr_restore(cpacr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1546,14 +1556,6 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
|
||||
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
|
||||
}
|
||||
|
||||
static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
return cpuid_feature_extract_signed_field(pfr0,
|
||||
ID_AA64PFR0_EL1_FP_SHIFT) < 0;
|
||||
}
|
||||
|
||||
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
@ -1603,7 +1605,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
if (is_kdump_kernel())
|
||||
return false;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
|
||||
if (cpus_have_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
|
||||
return false;
|
||||
|
||||
return has_cpuid_feature(entry, scope);
|
||||
@ -1736,16 +1738,15 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
|
||||
phys_addr_t size, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(int), int flags);
|
||||
|
||||
static phys_addr_t kpti_ng_temp_alloc;
|
||||
static phys_addr_t __initdata kpti_ng_temp_alloc;
|
||||
|
||||
static phys_addr_t kpti_ng_pgd_alloc(int shift)
|
||||
static phys_addr_t __init kpti_ng_pgd_alloc(int shift)
|
||||
{
|
||||
kpti_ng_temp_alloc -= PAGE_SIZE;
|
||||
return kpti_ng_temp_alloc;
|
||||
}
|
||||
|
||||
static void
|
||||
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
||||
static int __init __kpti_install_ng_mappings(void *__unused)
|
||||
{
|
||||
typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
|
||||
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
|
||||
@ -1758,20 +1759,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
||||
pgd_t *kpti_ng_temp_pgd;
|
||||
u64 alloc = 0;
|
||||
|
||||
if (__this_cpu_read(this_cpu_vector) == vectors) {
|
||||
const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
|
||||
|
||||
__this_cpu_write(this_cpu_vector, v);
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't need to rewrite the page-tables if either we've done
|
||||
* it already or we have KASLR enabled and therefore have not
|
||||
* created any global mappings at all.
|
||||
*/
|
||||
if (arm64_use_ng_mappings)
|
||||
return;
|
||||
|
||||
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
|
||||
|
||||
if (!cpu) {
|
||||
@ -1808,14 +1795,39 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
||||
free_pages(alloc, order);
|
||||
arm64_use_ng_mappings = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init kpti_install_ng_mappings(void)
|
||||
{
|
||||
/*
|
||||
* We don't need to rewrite the page-tables if either we've done
|
||||
* it already or we have KASLR enabled and therefore have not
|
||||
* created any global mappings at all.
|
||||
*/
|
||||
if (arm64_use_ng_mappings)
|
||||
return;
|
||||
|
||||
stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask);
|
||||
}
|
||||
|
||||
#else
|
||||
static void
|
||||
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
||||
static inline void kpti_install_ng_mappings(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
static void cpu_enable_kpti(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
if (__this_cpu_read(this_cpu_vector) == vectors) {
|
||||
const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
|
||||
|
||||
__this_cpu_write(this_cpu_vector, v);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int __init parse_kpti(char *str)
|
||||
{
|
||||
bool enabled;
|
||||
@ -2159,12 +2171,23 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
||||
}
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
||||
static void user_feature_fixup(void)
|
||||
{
|
||||
if (cpus_have_cap(ARM64_WORKAROUND_2658417)) {
|
||||
struct arm64_ftr_reg *regp;
|
||||
|
||||
regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
|
||||
if (regp)
|
||||
regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
static void elf_hwcap_fixup(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (cpus_have_cap(ARM64_WORKAROUND_1742098))
|
||||
compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
|
||||
#endif /* ARM64_ERRATUM_1742098 */
|
||||
#endif /* CONFIG_COMPAT */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
@ -2320,7 +2343,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.desc = "Kernel page table isolation (KPTI)",
|
||||
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
|
||||
.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
|
||||
.cpu_enable = kpti_install_ng_mappings,
|
||||
.cpu_enable = cpu_enable_kpti,
|
||||
.matches = unmap_kernel_at_el0,
|
||||
/*
|
||||
* The ID feature fields below are used to indicate that
|
||||
@ -2330,11 +2353,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, CSV3, IMP)
|
||||
},
|
||||
{
|
||||
/* FP/SIMD is not implemented */
|
||||
.capability = ARM64_HAS_NO_FPSIMD,
|
||||
.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
|
||||
.min_field_value = 0,
|
||||
.matches = has_no_fpsimd,
|
||||
.capability = ARM64_HAS_FPSIMD,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = cpu_enable_fpsimd,
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, FP, IMP)
|
||||
},
|
||||
#ifdef CONFIG_ARM64_PMEM
|
||||
{
|
||||
@ -2357,7 +2380,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.desc = "Scalable Vector Extension",
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.capability = ARM64_SVE,
|
||||
.cpu_enable = sve_kernel_enable,
|
||||
.cpu_enable = cpu_enable_sve,
|
||||
.matches = has_cpuid_feature,
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP)
|
||||
},
|
||||
@ -2600,7 +2623,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.capability = ARM64_SME,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = sme_kernel_enable,
|
||||
.cpu_enable = cpu_enable_sme,
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP)
|
||||
},
|
||||
/* FA64 should be sorted after the base SME capability */
|
||||
@ -2609,7 +2632,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.capability = ARM64_SME_FA64,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = fa64_kernel_enable,
|
||||
.cpu_enable = cpu_enable_fa64,
|
||||
ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP)
|
||||
},
|
||||
{
|
||||
@ -2617,7 +2640,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.capability = ARM64_SME2,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = sme2_kernel_enable,
|
||||
.cpu_enable = cpu_enable_sme2,
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2)
|
||||
},
|
||||
#endif /* CONFIG_ARM64_SME */
|
||||
@ -3115,20 +3138,28 @@ static void verify_local_elf_hwcaps(void)
|
||||
|
||||
static void verify_sve_features(void)
|
||||
{
|
||||
unsigned long cpacr = cpacr_save_enable_kernel_sve();
|
||||
|
||||
if (vec_verify_vq_map(ARM64_VEC_SVE)) {
|
||||
pr_crit("CPU%d: SVE: vector length support mismatch\n",
|
||||
smp_processor_id());
|
||||
cpu_die_early();
|
||||
}
|
||||
|
||||
cpacr_restore(cpacr);
|
||||
}
|
||||
|
||||
static void verify_sme_features(void)
|
||||
{
|
||||
unsigned long cpacr = cpacr_save_enable_kernel_sme();
|
||||
|
||||
if (vec_verify_vq_map(ARM64_VEC_SME)) {
|
||||
pr_crit("CPU%d: SME: vector length support mismatch\n",
|
||||
smp_processor_id());
|
||||
cpu_die_early();
|
||||
}
|
||||
|
||||
cpacr_restore(cpacr);
|
||||
}
|
||||
|
||||
static void verify_hyp_capabilities(void)
|
||||
@ -3235,7 +3266,6 @@ EXPORT_SYMBOL_GPL(this_cpu_has_cap);
|
||||
* This helper function is used in a narrow window when,
|
||||
* - The system wide safe registers are set with all the SMP CPUs and,
|
||||
* - The SYSTEM_FEATURE system_cpucaps may not have been set.
|
||||
* In all other cases cpus_have_{const_}cap() should be used.
|
||||
*/
|
||||
static bool __maybe_unused __system_matches_cap(unsigned int n)
|
||||
{
|
||||
@ -3274,18 +3304,36 @@ unsigned long cpu_get_elf_hwcap2(void)
|
||||
return elf_hwcap[1];
|
||||
}
|
||||
|
||||
static void __init setup_system_capabilities(void)
|
||||
void __init setup_system_features(void)
|
||||
{
|
||||
int i;
|
||||
/*
|
||||
* We have finalised the system-wide safe feature
|
||||
* registers, finalise the capabilities that depend
|
||||
* on it. Also enable all the available capabilities,
|
||||
* that are not enabled already.
|
||||
* The system-wide safe feature feature register values have been
|
||||
* finalized. Finalize and log the available system capabilities.
|
||||
*/
|
||||
update_cpu_capabilities(SCOPE_SYSTEM);
|
||||
if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_cap(ARM64_HAS_PAN))
|
||||
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
||||
|
||||
/*
|
||||
* Enable all the available capabilities which have not been enabled
|
||||
* already.
|
||||
*/
|
||||
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
|
||||
|
||||
kpti_install_ng_mappings();
|
||||
|
||||
sve_setup();
|
||||
sme_setup();
|
||||
|
||||
/*
|
||||
* Check for sane CTR_EL0.CWG value.
|
||||
*/
|
||||
if (!cache_type_cwg())
|
||||
pr_warn("No Cache Writeback Granule information, assuming %d\n",
|
||||
ARCH_DMA_MINALIGN);
|
||||
|
||||
for (i = 0; i < ARM64_NCAPS; i++) {
|
||||
const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
|
||||
|
||||
@ -3296,11 +3344,10 @@ static void __init setup_system_capabilities(void)
|
||||
}
|
||||
}
|
||||
|
||||
void __init setup_cpu_features(void)
|
||||
void __init setup_user_features(void)
|
||||
{
|
||||
u32 cwg;
|
||||
user_feature_fixup();
|
||||
|
||||
setup_system_capabilities();
|
||||
setup_elf_hwcaps(arm64_elf_hwcaps);
|
||||
|
||||
if (system_supports_32bit_el0()) {
|
||||
@ -3308,20 +3355,7 @@ void __init setup_cpu_features(void)
|
||||
elf_hwcap_fixup();
|
||||
}
|
||||
|
||||
if (system_uses_ttbr0_pan())
|
||||
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
||||
|
||||
sve_setup();
|
||||
sme_setup();
|
||||
minsigstksz_setup();
|
||||
|
||||
/*
|
||||
* Check for sane CTR_EL0.CWG value.
|
||||
*/
|
||||
cwg = cache_type_cwg();
|
||||
if (!cwg)
|
||||
pr_warn("No Cache Writeback Granule information, assuming %d\n",
|
||||
ARCH_DMA_MINALIGN);
|
||||
}
|
||||
|
||||
static int enable_mismatched_32bit_el0(unsigned int cpu)
|
||||
@ -3378,7 +3412,7 @@ subsys_initcall_sync(init_32bit_el0_mask);
|
||||
|
||||
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
|
||||
cpu_enable_swapper_cnp();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -113,8 +113,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||
if (md->attribute & EFI_MEMORY_XP)
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_PXN));
|
||||
else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
|
||||
system_supports_bti() && spd->has_bti)
|
||||
else if (system_supports_bti_kernel() && spd->has_bti)
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_GP));
|
||||
set_pte(ptep, pte);
|
||||
return 0;
|
||||
|
@ -1160,11 +1160,7 @@ fail:
|
||||
panic("Cannot allocate percpu memory for EFI SVE save/restore");
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable SVE for EL1.
|
||||
* Intended for use by the cpufeatures code during CPU boot.
|
||||
*/
|
||||
void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
{
|
||||
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
|
||||
isb();
|
||||
@ -1177,7 +1173,7 @@ void __init sve_setup(void)
|
||||
unsigned long b;
|
||||
int max_bit;
|
||||
|
||||
if (!system_supports_sve())
|
||||
if (!cpus_have_cap(ARM64_SVE))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -1267,7 +1263,7 @@ static void sme_free(struct task_struct *task)
|
||||
task->thread.sme_state = NULL;
|
||||
}
|
||||
|
||||
void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
{
|
||||
/* Set priority for all PEs to architecturally defined minimum */
|
||||
write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
|
||||
@ -1282,23 +1278,21 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called after sme_kernel_enable(), we rely on the
|
||||
* feature table being sorted to ensure this.
|
||||
*/
|
||||
void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
{
|
||||
/* This must be enabled after SME */
|
||||
BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME);
|
||||
|
||||
/* Allow use of ZT0 */
|
||||
write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK,
|
||||
SYS_SMCR_EL1);
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called after sme_kernel_enable(), we rely on the
|
||||
* feature table being sorted to ensure this.
|
||||
*/
|
||||
void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
{
|
||||
/* This must be enabled after SME */
|
||||
BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME);
|
||||
|
||||
/* Allow use of FA64 */
|
||||
write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK,
|
||||
SYS_SMCR_EL1);
|
||||
@ -1309,7 +1303,7 @@ void __init sme_setup(void)
|
||||
struct vl_info *info = &vl_info[ARM64_VEC_SME];
|
||||
int min_bit, max_bit;
|
||||
|
||||
if (!system_supports_sme())
|
||||
if (!cpus_have_cap(ARM64_SME))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -1470,8 +1464,17 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
|
||||
*/
|
||||
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs)
|
||||
{
|
||||
/* TODO: implement lazy context saving/restoring */
|
||||
WARN_ON(1);
|
||||
/* Even if we chose not to use FPSIMD, the hardware could still trap: */
|
||||
if (!system_supports_fpsimd()) {
|
||||
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* When FPSIMD is enabled, we should never take a trap unless something
|
||||
* has gone very wrong.
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1712,13 +1715,23 @@ void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state)
|
||||
void fpsimd_restore_current_state(void)
|
||||
{
|
||||
/*
|
||||
* For the tasks that were created before we detected the absence of
|
||||
* FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
|
||||
* e.g, init. This could be then inherited by the children processes.
|
||||
* If we later detect that the system doesn't support FP/SIMD,
|
||||
* we must clear the flag for all the tasks to indicate that the
|
||||
* FPSTATE is clean (as we can't have one) to avoid looping for ever in
|
||||
* do_notify_resume().
|
||||
* TIF_FOREIGN_FPSTATE is set on the init task and copied by
|
||||
* arch_dup_task_struct() regardless of whether FP/SIMD is detected.
|
||||
* Thus user threads can have this set even when FP/SIMD hasn't been
|
||||
* detected.
|
||||
*
|
||||
* When FP/SIMD is detected, begin_new_exec() will set
|
||||
* TIF_FOREIGN_FPSTATE via flush_thread() -> fpsimd_flush_thread(),
|
||||
* and fpsimd_thread_switch() will set TIF_FOREIGN_FPSTATE when
|
||||
* switching tasks. We detect FP/SIMD before we exec the first user
|
||||
* process, ensuring this has TIF_FOREIGN_FPSTATE set and
|
||||
* do_notify_resume() will call fpsimd_restore_current_state() to
|
||||
* install the user FP/SIMD context.
|
||||
*
|
||||
* When FP/SIMD is not detected, nothing else will clear or set
|
||||
* TIF_FOREIGN_FPSTATE prior to the first return to userspace, and
|
||||
* we must clear TIF_FOREIGN_FPSTATE to avoid do_notify_resume()
|
||||
* looping forever calling fpsimd_restore_current_state().
|
||||
*/
|
||||
if (!system_supports_fpsimd()) {
|
||||
clear_thread_flag(TIF_FOREIGN_FPSTATE);
|
||||
@ -2051,6 +2064,13 @@ static inline void fpsimd_hotplug_init(void)
|
||||
static inline void fpsimd_hotplug_init(void) { }
|
||||
#endif
|
||||
|
||||
void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__always_unused p)
|
||||
{
|
||||
unsigned long enable = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN;
|
||||
write_sysreg(read_sysreg(CPACR_EL1) | enable, CPACR_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* FP/SIMD support code initialisation.
|
||||
*/
|
||||
|
@ -200,8 +200,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
|
||||
break;
|
||||
case R_AARCH64_ADR_PREL_PG_HI21_NC:
|
||||
case R_AARCH64_ADR_PREL_PG_HI21:
|
||||
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
|
||||
!cpus_have_const_cap(ARM64_WORKAROUND_843419))
|
||||
if (!cpus_have_final_cap(ARM64_WORKAROUND_843419))
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -236,13 +235,13 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_843419))
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_843419)) {
|
||||
/*
|
||||
* Add some slack so we can skip PLT slots that may trigger
|
||||
* the erratum due to the placement of the ADRP instruction.
|
||||
*/
|
||||
ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -454,7 +454,7 @@ static void ssbs_thread_switch(struct task_struct *next)
|
||||
* If all CPUs implement the SSBS extension, then we just need to
|
||||
* context-switch the PSTATE field.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_SSBS))
|
||||
if (alternative_has_cap_unlikely(ARM64_SSBS))
|
||||
return;
|
||||
|
||||
spectre_v4_enable_task_mitigation(next);
|
||||
|
@ -972,7 +972,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
||||
* When KPTI is in use, the vectors are switched when exiting to
|
||||
* user-space.
|
||||
*/
|
||||
if (arm64_kernel_unmapped_at_el0())
|
||||
if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
|
||||
return;
|
||||
|
||||
write_sysreg(v, vbar_el1);
|
||||
|
@ -439,9 +439,10 @@ static void __init hyp_mode_check(void)
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
|
||||
setup_cpu_features();
|
||||
setup_system_features();
|
||||
hyp_mode_check();
|
||||
apply_alternatives_all();
|
||||
setup_user_features();
|
||||
mark_linear_text_alias_ro();
|
||||
}
|
||||
|
||||
|
@ -55,13 +55,13 @@ void notrace __cpu_suspend_exit(void)
|
||||
|
||||
/* Restore CnP bit in TTBR1_EL1 */
|
||||
if (system_supports_cnp())
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
|
||||
cpu_enable_swapper_cnp();
|
||||
|
||||
/*
|
||||
* PSTATE was not saved over suspend/resume, re-enable any detected
|
||||
* features that might not have been set correctly.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_HAS_DIT))
|
||||
if (alternative_has_cap_unlikely(ARM64_HAS_DIT))
|
||||
set_pstate_dit(1);
|
||||
__uaccess_enable_hw_pan();
|
||||
|
||||
@ -98,6 +98,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
struct sleep_stack_data state;
|
||||
struct arm_cpuidle_irq_context context;
|
||||
|
||||
/*
|
||||
* Some portions of CPU state (e.g. PSTATE.{PAN,DIT}) are initialized
|
||||
* before alternatives are patched, but are only restored by
|
||||
* __cpu_suspend_exit() after alternatives are patched. To avoid
|
||||
* accidentally losing these bits we must not attempt to suspend until
|
||||
* after alternatives have been patched.
|
||||
*/
|
||||
WARN_ON(!system_capabilities_finalized());
|
||||
|
||||
/* Report any MTE async fault before going to suspend */
|
||||
mte_suspend_enter();
|
||||
|
||||
|
@ -31,7 +31,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
|
||||
if (fatal_signal_pending(current))
|
||||
return 0;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) {
|
||||
/*
|
||||
* The workaround requires an inner-shareable tlbi.
|
||||
* We pick the reserved-ASID to minimise the impact.
|
||||
|
@ -631,7 +631,7 @@ static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
|
||||
int rt = ESR_ELx_SYS64_ISS_RT(esr);
|
||||
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) {
|
||||
/* Hide DIC so that we can trap the unnecessary maintenance...*/
|
||||
val &= ~BIT(CTR_EL0_DIC_SHIFT);
|
||||
|
||||
|
@ -212,7 +212,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
|
||||
if (IS_ERR(ret))
|
||||
goto up_fail;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
|
||||
if (system_supports_bti_kernel())
|
||||
gp_flags = VM_ARM64_BTI;
|
||||
|
||||
vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
|
||||
|
@ -284,7 +284,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = kvm_arm_pvtime_supported();
|
||||
break;
|
||||
case KVM_CAP_ARM_EL1_32BIT:
|
||||
r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1);
|
||||
r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
|
||||
break;
|
||||
case KVM_CAP_GUEST_DEBUG_HW_BPS:
|
||||
r = get_num_brps();
|
||||
@ -296,7 +296,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = kvm_arm_support_pmu_v3();
|
||||
break;
|
||||
case KVM_CAP_ARM_INJECT_SERROR_ESR:
|
||||
r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
|
||||
r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
|
||||
break;
|
||||
case KVM_CAP_ARM_VM_IPA_SIZE:
|
||||
r = get_kvm_ipa_limit();
|
||||
@ -1207,7 +1207,7 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
|
||||
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
|
||||
return 0;
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
|
||||
if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
|
||||
return -EINVAL;
|
||||
|
||||
/* MTE is incompatible with AArch32 */
|
||||
@ -1777,7 +1777,7 @@ static void hyp_install_host_vector(void)
|
||||
* Call initialization code, and switch to the full blown HYP code.
|
||||
* If the cpucaps haven't been finalized yet, something has gone very
|
||||
* wrong, and hyp will crash and burn when it uses any
|
||||
* cpus_have_const_cap() wrapper.
|
||||
* cpus_have_*_cap() wrapper.
|
||||
*/
|
||||
BUG_ON(!system_capabilities_finalized());
|
||||
params = this_cpu_ptr_nvhe_sym(kvm_init_params);
|
||||
@ -2310,7 +2310,7 @@ static int __init init_hyp_mode(void)
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
|
||||
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH))
|
||||
cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
|
||||
pkvm_hyp_init_ptrauth();
|
||||
|
||||
init_cpu_logical_map();
|
||||
|
@ -815,7 +815,7 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_events *events)
|
||||
{
|
||||
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
|
||||
events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
|
||||
events->exception.serror_has_esr = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
|
||||
|
||||
if (events->exception.serror_pending && events->exception.serror_has_esr)
|
||||
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
|
||||
@ -837,7 +837,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
||||
bool ext_dabt_pending = events->exception.ext_dabt_pending;
|
||||
|
||||
if (serror_pending && has_esr) {
|
||||
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
|
||||
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
|
||||
return -EINVAL;
|
||||
|
||||
if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
|
||||
|
@ -401,7 +401,7 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
|
||||
if (device)
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
|
||||
if (system_supports_bti_kernel())
|
||||
attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP;
|
||||
} else {
|
||||
attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
|
||||
@ -664,7 +664,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
|
||||
|
||||
static bool stage2_has_fwb(struct kvm_pgtable *pgt)
|
||||
{
|
||||
if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
|
||||
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
||||
return false;
|
||||
|
||||
return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
|
||||
|
@ -1578,7 +1578,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
|
||||
if (device)
|
||||
prot |= KVM_PGTABLE_PROT_DEVICE;
|
||||
else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
|
||||
else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
|
||||
prot |= KVM_PGTABLE_PROT_X;
|
||||
|
||||
/*
|
||||
|
@ -207,7 +207,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
|
||||
* CPU left in the system, and certainly not from non-secure
|
||||
* software).
|
||||
*/
|
||||
if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
|
||||
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
||||
kvm_set_way_flush(vcpu);
|
||||
|
||||
return true;
|
||||
|
@ -684,7 +684,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||
if (kvm_vgic_global_state.vcpu_base == 0)
|
||||
kvm_info("disabling GICv2 emulation\n");
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
|
||||
group0_trap = true;
|
||||
group1_trap = true;
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ void __delay(unsigned long cycles)
|
||||
{
|
||||
cycles_t start = get_cycles();
|
||||
|
||||
if (cpus_have_const_cap(ARM64_HAS_WFXT)) {
|
||||
if (alternative_has_cap_unlikely(ARM64_HAS_WFXT)) {
|
||||
u64 end = start + cycles;
|
||||
|
||||
/*
|
||||
|
@ -571,7 +571,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
|
||||
/* Write implies read */
|
||||
vm_flags |= VM_WRITE;
|
||||
/* If EPAN is absent then exec implies read */
|
||||
if (!cpus_have_const_cap(ARM64_HAS_EPAN))
|
||||
if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN))
|
||||
vm_flags |= VM_EXEC;
|
||||
}
|
||||
|
||||
|
@ -555,8 +555,7 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
|
||||
|
||||
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
|
||||
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
* when the permission changes from executable to non-executable
|
||||
|
@ -68,7 +68,7 @@ static int __init adjust_protection_map(void)
|
||||
* With Enhanced PAN we can honour the execute-only permissions as
|
||||
* there is no PAN override with such mappings.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_HAS_EPAN)) {
|
||||
if (cpus_have_cap(ARM64_HAS_EPAN)) {
|
||||
protection_map[VM_EXEC] = PAGE_EXECONLY;
|
||||
protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
|
||||
}
|
||||
|
@ -1469,8 +1469,7 @@ early_initcall(prevent_bootmem_remove_init);
|
||||
|
||||
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
|
||||
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
* when the permission changes from executable to non-executable
|
||||
|
@ -405,8 +405,7 @@ SYM_FUNC_START(__cpu_setup)
|
||||
tlbi vmalle1 // Invalidate local TLB
|
||||
dsb nsh
|
||||
|
||||
mov x1, #3 << 20
|
||||
msr cpacr_el1, x1 // Enable FP/ASIMD
|
||||
msr cpacr_el1, xzr // Reset cpacr_el1
|
||||
mov x1, #1 << 12 // Reset mdscr_el1 and disable
|
||||
msr mdscr_el1, x1 // access to the DCC from EL0
|
||||
isb // Unmask debug exceptions now,
|
||||
|
@ -3,7 +3,7 @@
|
||||
gen := arch/$(ARCH)/include/generated
|
||||
kapi := $(gen)/asm
|
||||
|
||||
kapi-hdrs-y := $(kapi)/cpucaps.h $(kapi)/sysreg-defs.h
|
||||
kapi-hdrs-y := $(kapi)/cpucap-defs.h $(kapi)/sysreg-defs.h
|
||||
|
||||
targets += $(addprefix ../../../, $(kapi-hdrs-y))
|
||||
|
||||
@ -17,7 +17,7 @@ quiet_cmd_gen_cpucaps = GEN $@
|
||||
quiet_cmd_gen_sysreg = GEN $@
|
||||
cmd_gen_sysreg = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
|
||||
|
||||
$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
|
||||
$(kapi)/cpucap-defs.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
|
||||
$(call if_changed,gen_cpucaps)
|
||||
|
||||
$(kapi)/sysreg-defs.h: $(src)/gen-sysreg.awk $(src)/sysreg FORCE
|
||||
|
@ -27,6 +27,7 @@ HAS_ECV_CNTPOFF
|
||||
HAS_EPAN
|
||||
HAS_EVT
|
||||
HAS_FGT
|
||||
HAS_FPSIMD
|
||||
HAS_GENERIC_AUTH
|
||||
HAS_GENERIC_AUTH_ARCH_QARMA3
|
||||
HAS_GENERIC_AUTH_ARCH_QARMA5
|
||||
@ -39,7 +40,6 @@ HAS_LDAPR
|
||||
HAS_LSE_ATOMICS
|
||||
HAS_MOPS
|
||||
HAS_NESTED_VIRT
|
||||
HAS_NO_FPSIMD
|
||||
HAS_NO_HW_PREFETCH
|
||||
HAS_PAN
|
||||
HAS_S1PIE
|
||||
|
@ -15,8 +15,8 @@ function fatal(msg) {
|
||||
/^#/ { next }
|
||||
|
||||
BEGIN {
|
||||
print "#ifndef __ASM_CPUCAPS_H"
|
||||
print "#define __ASM_CPUCAPS_H"
|
||||
print "#ifndef __ASM_CPUCAP_DEFS_H"
|
||||
print "#define __ASM_CPUCAP_DEFS_H"
|
||||
print ""
|
||||
print "/* Generated file - do not edit */"
|
||||
cap_num = 0
|
||||
@ -31,7 +31,7 @@ BEGIN {
|
||||
END {
|
||||
printf("#define ARM64_NCAPS\t\t\t\t\t%d\n", cap_num)
|
||||
print ""
|
||||
print "#endif /* __ASM_CPUCAPS_H */"
|
||||
print "#endif /* __ASM_CPUCAP_DEFS_H */"
|
||||
}
|
||||
|
||||
# Any lines not handled by previous rules are unexpected
|
||||
|
@ -918,7 +918,7 @@ static void arch_timer_evtstrm_enable(unsigned int divider)
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
/* ECV is likely to require a large divider. Use the EVNTIS flag. */
|
||||
if (cpus_have_const_cap(ARM64_HAS_ECV) && divider > 15) {
|
||||
if (cpus_have_final_cap(ARM64_HAS_ECV) && divider > 15) {
|
||||
cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE;
|
||||
divider -= 8;
|
||||
}
|
||||
@ -956,6 +956,30 @@ static void arch_timer_configure_evtstream(void)
|
||||
arch_timer_evtstrm_enable(max(0, lsb));
|
||||
}
|
||||
|
||||
static int arch_timer_evtstrm_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
arch_timer_configure_evtstream();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arch_timer_evtstrm_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init arch_timer_evtstrm_register(void)
|
||||
{
|
||||
if (!arch_timer_evt || !evtstrm_enable)
|
||||
return 0;
|
||||
|
||||
return cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
|
||||
"clockevents/arm/arch_timer_evtstrm:starting",
|
||||
arch_timer_evtstrm_starting_cpu,
|
||||
arch_timer_evtstrm_dying_cpu);
|
||||
}
|
||||
core_initcall(arch_timer_evtstrm_register);
|
||||
|
||||
static void arch_counter_set_user_access(void)
|
||||
{
|
||||
u32 cntkctl = arch_timer_get_cntkctl();
|
||||
@ -1017,8 +1041,6 @@ static int arch_timer_starting_cpu(unsigned int cpu)
|
||||
}
|
||||
|
||||
arch_counter_set_user_access();
|
||||
if (evtstrm_enable)
|
||||
arch_timer_configure_evtstream();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1165,8 +1187,6 @@ static int arch_timer_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
|
||||
|
||||
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
|
||||
|
||||
arch_timer_stop(clk);
|
||||
return 0;
|
||||
}
|
||||
@ -1280,6 +1300,7 @@ out_unreg_notify:
|
||||
|
||||
out_free:
|
||||
free_percpu(arch_timer_evt);
|
||||
arch_timer_evt = NULL;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -277,17 +277,6 @@ static void gic_redist_wait_for_rwp(void)
|
||||
gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
|
||||
static u64 __maybe_unused gic_read_iar(void)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
|
||||
return gic_read_iar_cavium_thunderx();
|
||||
else
|
||||
return gic_read_iar_common();
|
||||
}
|
||||
#endif
|
||||
|
||||
static void gic_enable_redist(bool enable)
|
||||
{
|
||||
void __iomem *rbase;
|
||||
|
@ -172,6 +172,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_ARM_L2X0_STARTING,
|
||||
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
|
||||
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
|
||||
CPUHP_AP_JCORE_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_TWD_STARTING,
|
||||
@ -189,6 +190,7 @@ enum cpuhp_state {
|
||||
/* Must be the last timer callback */
|
||||
CPUHP_AP_DUMMY_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_XEN_STARTING,
|
||||
CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
|
||||
CPUHP_AP_ARM_CORESIGHT_STARTING,
|
||||
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
|
||||
CPUHP_AP_ARM64_ISNDEP_STARTING,
|
||||
|
Loading…
Reference in New Issue
Block a user