2014-07-24 13:14:42 +00:00
|
|
|
/*
|
|
|
|
* Macros for accessing system registers with older binutils.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2014 ARM Ltd.
|
|
|
|
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
|
|
|
*
|
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ASM_SYSREG_H
|
|
|
|
#define __ASM_SYSREG_H
|
|
|
|
|
2018-01-15 19:38:55 +00:00
|
|
|
#include <asm/compiler.h>
|
2015-11-05 15:09:17 +00:00
|
|
|
#include <linux/stringify.h>
|
|
|
|
|
2015-07-22 10:38:14 +00:00
|
|
|
/*
|
|
|
|
* ARMv8 ARM reserves the following encoding for system registers:
|
|
|
|
* (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
|
|
|
|
* C5.2, version:ARM DDI 0487A.f)
|
|
|
|
* [20-19] : Op0
|
|
|
|
* [18-16] : Op1
|
|
|
|
* [15-12] : CRn
|
|
|
|
* [11-8] : CRm
|
|
|
|
* [7-5] : Op2
|
|
|
|
*/
|
2017-01-09 17:28:28 +00:00
|
|
|
#define Op0_shift 19
|
|
|
|
#define Op0_mask 0x3
|
|
|
|
#define Op1_shift 16
|
|
|
|
#define Op1_mask 0x7
|
|
|
|
#define CRn_shift 12
|
|
|
|
#define CRn_mask 0xf
|
|
|
|
#define CRm_shift 8
|
|
|
|
#define CRm_mask 0xf
|
|
|
|
#define Op2_shift 5
|
|
|
|
#define Op2_mask 0x7
|
|
|
|
|
2014-07-24 13:14:42 +00:00
|
|
|
#define sys_reg(op0, op1, crn, crm, op2) \
|
2017-01-09 17:28:28 +00:00
|
|
|
(((op0) << Op0_shift) | ((op1) << Op1_shift) | \
|
|
|
|
((crn) << CRn_shift) | ((crm) << CRm_shift) | \
|
|
|
|
((op2) << Op2_shift))
|
|
|
|
|
2017-01-13 17:47:46 +00:00
|
|
|
#define sys_insn sys_reg
|
|
|
|
|
2017-01-09 17:28:28 +00:00
|
|
|
#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask)
|
|
|
|
#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask)
|
|
|
|
#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask)
|
|
|
|
#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask)
|
|
|
|
#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask)
|
2014-07-24 13:14:42 +00:00
|
|
|
|
2016-12-06 15:27:45 +00:00
|
|
|
#ifndef CONFIG_BROKEN_GAS_INST
|
|
|
|
|
2016-12-01 10:44:33 +00:00
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define __emit_inst(x) .inst (x)
|
|
|
|
#else
|
|
|
|
#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
|
|
|
|
#endif
|
|
|
|
|
2016-12-06 15:27:45 +00:00
|
|
|
#else /* CONFIG_BROKEN_GAS_INST */
|
|
|
|
|
|
|
|
#ifndef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
#define __INSTR_BSWAP(x) (x)
|
|
|
|
#else /* CONFIG_CPU_BIG_ENDIAN */
|
|
|
|
#define __INSTR_BSWAP(x) ((((x) << 24) & 0xff000000) | \
|
|
|
|
(((x) << 8) & 0x00ff0000) | \
|
|
|
|
(((x) >> 8) & 0x0000ff00) | \
|
|
|
|
(((x) >> 24) & 0x000000ff))
|
|
|
|
#endif /* CONFIG_CPU_BIG_ENDIAN */
|
|
|
|
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define __emit_inst(x) .long __INSTR_BSWAP(x)
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* CONFIG_BROKEN_GAS_INST */
|
|
|
|
|
2017-01-19 17:18:30 +00:00
|
|
|
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
|
|
|
|
#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
|
2018-08-07 12:47:06 +00:00
|
|
|
#define REG_PSTATE_SSBS_IMM sys_reg(0, 3, 4, 0, 1)
|
2017-01-19 17:18:30 +00:00
|
|
|
|
|
|
|
#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
|
|
|
|
(!!x)<<8 | 0x1f)
|
|
|
|
#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
|
|
|
|
(!!x)<<8 | 0x1f)
|
2018-08-07 12:47:06 +00:00
|
|
|
#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \
|
|
|
|
(!!x)<<8 | 0x1f)
|
2017-01-19 17:18:30 +00:00
|
|
|
|
2017-01-13 17:47:46 +00:00
|
|
|
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
|
|
|
|
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
|
|
|
|
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
|
|
|
|
|
2017-01-13 16:55:01 +00:00
|
|
|
#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
|
|
|
|
#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
|
|
|
|
#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
|
|
|
|
#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2)
|
|
|
|
#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2)
|
|
|
|
#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4)
|
|
|
|
#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5)
|
|
|
|
#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6)
|
|
|
|
#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7)
|
|
|
|
#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0)
|
|
|
|
#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4)
|
|
|
|
#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4)
|
|
|
|
#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4)
|
|
|
|
#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4)
|
|
|
|
#define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6)
|
|
|
|
#define SYS_DBGCLAIMCLR_EL1 sys_reg(2, 0, 7, 9, 6)
|
|
|
|
#define SYS_DBGAUTHSTATUS_EL1 sys_reg(2, 0, 7, 14, 6)
|
|
|
|
#define SYS_MDCCSR_EL0 sys_reg(2, 3, 0, 1, 0)
|
|
|
|
#define SYS_DBGDTR_EL0 sys_reg(2, 3, 0, 4, 0)
|
|
|
|
#define SYS_DBGDTRRX_EL0 sys_reg(2, 3, 0, 5, 0)
|
|
|
|
#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
|
|
|
|
#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
|
|
|
|
|
2015-10-19 13:24:45 +00:00
|
|
|
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
|
|
|
|
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
|
|
|
|
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
|
|
|
|
|
|
|
|
#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
|
|
|
|
#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
|
|
|
|
#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3)
|
2015-10-19 13:24:45 +00:00
|
|
|
#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
|
|
|
|
#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
|
|
|
|
#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
|
|
|
|
#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7)
|
|
|
|
|
|
|
|
#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0)
|
|
|
|
#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1)
|
|
|
|
#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2)
|
|
|
|
#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3)
|
|
|
|
#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
|
|
|
|
#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
|
|
|
|
#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
|
|
|
|
|
|
|
|
#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
|
|
|
|
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
|
|
|
|
#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
|
|
|
|
|
|
|
|
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
|
|
|
|
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
|
2017-10-31 15:51:00 +00:00
|
|
|
#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
|
2015-10-19 13:24:45 +00:00
|
|
|
|
|
|
|
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
|
|
|
|
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
|
|
|
|
|
arm64: KVM: Hide unsupported AArch64 CPU features from guests
Currently, a guest kernel sees the true CPU feature registers
(ID_*_EL1) when it reads them using MRS instructions. This means
that the guest may observe features that are present in the
hardware but the host doesn't understand or doesn't provide support
for. A guest may legimitately try to use such a feature as per the
architecture, but use of the feature may trap instead of working
normally, triggering undef injection into the guest.
This is not a problem for the host, but the guest may go wrong when
running on newer hardware than the host knows about.
This patch hides from guest VMs any AArch64-specific CPU features
that the host doesn't support, by exposing to the guest the
sanitised versions of the registers computed by the cpufeatures
framework, instead of the true hardware registers. To achieve
this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation
code is added to KVM to report the sanitised versions of the
affected registers in response to MRS and register reads from
userspace.
The affected registers are removed from invariant_sys_regs[] (since
the invariant_sys_regs handling is no longer quite correct for
them) and added to sys_reg_desgs[], with appropriate access(),
get_user() and set_user() methods. No runtime vcpu storage is
allocated for the registers: instead, they are read on demand from
the cpufeatures framework. This may need modification in the
future if there is a need for userspace to customise the features
visible to the guest.
Attempts by userspace to write the registers are handled similarly
to the current invariant_sys_regs handling: writes are permitted,
but only if they don't attempt to change the value. This is
sufficient to support VM snapshot/restore from userspace.
Because of the additional registers, restoring a VM on an older
kernel may not work unless userspace knows how to handle the extra
VM registers exposed to the KVM user ABI by this patch.
Under the principle of least damage, this patch makes no attempt to
handle any of the other registers currently in
invariant_sys_regs[], or to emulate registers for AArch32: however,
these could be handled in a similar way in future, as necessary.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 15:50:56 +00:00
|
|
|
#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
|
|
|
|
#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
|
|
|
|
|
2015-10-19 13:24:45 +00:00
|
|
|
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
|
|
|
|
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
|
|
|
|
|
|
|
|
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
|
|
|
|
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
|
2016-02-05 14:58:47 +00:00
|
|
|
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
|
2015-10-19 13:24:45 +00:00
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
|
|
|
|
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
|
|
|
|
#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
|
|
|
|
|
2017-10-31 15:51:00 +00:00
|
|
|
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
|
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
|
|
|
|
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
|
|
|
|
#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
|
|
|
|
|
2017-01-19 17:57:43 +00:00
|
|
|
#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
|
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
|
|
|
|
#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1)
|
|
|
|
#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0)
|
2018-01-15 19:39:06 +00:00
|
|
|
|
|
|
|
#define SYS_ERRIDR_EL1 sys_reg(3, 0, 5, 3, 0)
|
|
|
|
#define SYS_ERRSELR_EL1 sys_reg(3, 0, 5, 3, 1)
|
|
|
|
#define SYS_ERXFR_EL1 sys_reg(3, 0, 5, 4, 0)
|
|
|
|
#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
|
|
|
|
#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
|
|
|
|
#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
|
|
|
|
#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
|
|
|
|
#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
|
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
|
|
|
|
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
|
|
|
|
|
2017-09-20 15:48:33 +00:00
|
|
|
/*** Statistical Profiling Extension ***/
|
|
|
|
/* ID registers */
|
|
|
|
#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
|
|
|
|
#define SYS_PMSIDR_EL1_FE_SHIFT 0
|
|
|
|
#define SYS_PMSIDR_EL1_FT_SHIFT 1
|
|
|
|
#define SYS_PMSIDR_EL1_FL_SHIFT 2
|
|
|
|
#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3
|
|
|
|
#define SYS_PMSIDR_EL1_LDS_SHIFT 4
|
|
|
|
#define SYS_PMSIDR_EL1_ERND_SHIFT 5
|
|
|
|
#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8
|
|
|
|
#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL
|
|
|
|
#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12
|
|
|
|
#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL
|
|
|
|
#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16
|
|
|
|
#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL
|
|
|
|
|
|
|
|
#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
|
|
|
|
#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0
|
|
|
|
#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU
|
|
|
|
#define SYS_PMBIDR_EL1_P_SHIFT 4
|
|
|
|
#define SYS_PMBIDR_EL1_F_SHIFT 5
|
|
|
|
|
|
|
|
/* Sampling controls */
|
|
|
|
#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
|
|
|
|
#define SYS_PMSCR_EL1_E0SPE_SHIFT 0
|
|
|
|
#define SYS_PMSCR_EL1_E1SPE_SHIFT 1
|
|
|
|
#define SYS_PMSCR_EL1_CX_SHIFT 3
|
|
|
|
#define SYS_PMSCR_EL1_PA_SHIFT 4
|
|
|
|
#define SYS_PMSCR_EL1_TS_SHIFT 5
|
|
|
|
#define SYS_PMSCR_EL1_PCT_SHIFT 6
|
|
|
|
|
|
|
|
#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0)
|
|
|
|
#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0
|
|
|
|
#define SYS_PMSCR_EL2_E2SPE_SHIFT 1
|
|
|
|
#define SYS_PMSCR_EL2_CX_SHIFT 3
|
|
|
|
#define SYS_PMSCR_EL2_PA_SHIFT 4
|
|
|
|
#define SYS_PMSCR_EL2_TS_SHIFT 5
|
|
|
|
#define SYS_PMSCR_EL2_PCT_SHIFT 6
|
|
|
|
|
|
|
|
#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2)
|
|
|
|
|
|
|
|
#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3)
|
|
|
|
#define SYS_PMSIRR_EL1_RND_SHIFT 0
|
|
|
|
#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8
|
|
|
|
#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL
|
|
|
|
|
|
|
|
/* Filtering controls */
|
|
|
|
#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4)
|
|
|
|
#define SYS_PMSFCR_EL1_FE_SHIFT 0
|
|
|
|
#define SYS_PMSFCR_EL1_FT_SHIFT 1
|
|
|
|
#define SYS_PMSFCR_EL1_FL_SHIFT 2
|
|
|
|
#define SYS_PMSFCR_EL1_B_SHIFT 16
|
|
|
|
#define SYS_PMSFCR_EL1_LD_SHIFT 17
|
|
|
|
#define SYS_PMSFCR_EL1_ST_SHIFT 18
|
|
|
|
|
|
|
|
#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5)
|
|
|
|
#define SYS_PMSEVFR_EL1_RES0 0x0000ffff00ff0f55UL
|
|
|
|
|
|
|
|
#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6)
|
|
|
|
#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0
|
|
|
|
|
|
|
|
/* Buffer controls */
|
|
|
|
#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
|
|
|
|
#define SYS_PMBLIMITR_EL1_E_SHIFT 0
|
|
|
|
#define SYS_PMBLIMITR_EL1_FM_SHIFT 1
|
|
|
|
#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL
|
|
|
|
#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT)
|
|
|
|
|
|
|
|
#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1)
|
|
|
|
|
|
|
|
/* Buffer error reporting */
|
|
|
|
#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3)
|
|
|
|
#define SYS_PMBSR_EL1_COLL_SHIFT 16
|
|
|
|
#define SYS_PMBSR_EL1_S_SHIFT 17
|
|
|
|
#define SYS_PMBSR_EL1_EA_SHIFT 18
|
|
|
|
#define SYS_PMBSR_EL1_DL_SHIFT 19
|
|
|
|
#define SYS_PMBSR_EL1_EC_SHIFT 26
|
|
|
|
#define SYS_PMBSR_EL1_EC_MASK 0x3fUL
|
|
|
|
|
|
|
|
#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT)
|
|
|
|
#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT)
|
|
|
|
#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT)
|
|
|
|
|
|
|
|
#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0
|
|
|
|
#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL
|
|
|
|
|
|
|
|
#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0
|
|
|
|
#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL
|
|
|
|
|
|
|
|
#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT)
|
|
|
|
|
|
|
|
/*** End of Statistical Profiling Extension ***/
|
|
|
|
|
2017-01-20 16:25:51 +00:00
|
|
|
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
|
|
|
|
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
|
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
|
|
|
|
#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
|
|
|
|
|
arm64/kvm: Prohibit guest LOR accesses
We don't currently limit guest accesses to the LOR registers, which we
neither virtualize nor context-switch. As such, guests are provided with
unusable information/controls, and are not isolated from each other (or
the host).
To prevent these issues, we can trap register accesses and present the
illusion LORegions are unssupported by the CPU. To do this, we mask
ID_AA64MMFR1.LO, and set HCR_EL2.TLOR to trap accesses to the following
registers:
* LORC_EL1
* LOREA_EL1
* LORID_EL1
* LORN_EL1
* LORSA_EL1
... when trapped, we inject an UNDEFINED exception to EL1, simulating
their non-existence.
As noted in D7.2.67, when no LORegions are implemented, LoadLOAcquire
and StoreLORelease must behave as LoadAcquire and StoreRelease
respectively. We can ensure this by clearing LORC_EL1.EN when a CPU's
EL2 is first initialized, as the host kernel will not modify this.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2018-02-13 13:39:23 +00:00
|
|
|
#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0)
|
|
|
|
#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1)
|
|
|
|
#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2)
|
|
|
|
#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3)
|
|
|
|
#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7)
|
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
|
2018-01-15 19:38:59 +00:00
|
|
|
#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1)
|
2017-01-13 18:36:51 +00:00
|
|
|
|
2017-06-09 11:49:44 +00:00
|
|
|
#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
|
|
|
|
#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
|
|
|
|
#define SYS_ICC_HPPIR0_EL1 sys_reg(3, 0, 12, 8, 2)
|
2017-06-09 11:49:42 +00:00
|
|
|
#define SYS_ICC_BPR0_EL1 sys_reg(3, 0, 12, 8, 3)
|
2017-06-09 11:49:44 +00:00
|
|
|
#define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n)
|
2017-06-05 13:20:01 +00:00
|
|
|
#define SYS_ICC_AP0R0_EL1 SYS_ICC_AP0Rn_EL1(0)
|
|
|
|
#define SYS_ICC_AP0R1_EL1 SYS_ICC_AP0Rn_EL1(1)
|
|
|
|
#define SYS_ICC_AP0R2_EL1 SYS_ICC_AP0Rn_EL1(2)
|
|
|
|
#define SYS_ICC_AP0R3_EL1 SYS_ICC_AP0Rn_EL1(3)
|
2017-06-09 11:49:38 +00:00
|
|
|
#define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n)
|
2017-06-05 13:20:01 +00:00
|
|
|
#define SYS_ICC_AP1R0_EL1 SYS_ICC_AP1Rn_EL1(0)
|
|
|
|
#define SYS_ICC_AP1R1_EL1 SYS_ICC_AP1Rn_EL1(1)
|
|
|
|
#define SYS_ICC_AP1R2_EL1 SYS_ICC_AP1Rn_EL1(2)
|
|
|
|
#define SYS_ICC_AP1R3_EL1 SYS_ICC_AP1Rn_EL1(3)
|
2017-01-19 17:57:43 +00:00
|
|
|
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
|
2017-06-09 11:49:50 +00:00
|
|
|
#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3)
|
2017-01-19 17:57:43 +00:00
|
|
|
#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
|
2018-08-06 12:03:36 +00:00
|
|
|
#define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6)
|
|
|
|
#define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7)
|
2017-01-19 17:57:43 +00:00
|
|
|
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
|
|
|
|
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
|
2017-06-09 11:49:39 +00:00
|
|
|
#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2)
|
2017-01-19 17:57:43 +00:00
|
|
|
#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
|
|
|
|
#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
|
|
|
|
#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
|
2017-06-05 13:20:00 +00:00
|
|
|
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
|
|
|
|
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
|
2017-01-19 17:57:43 +00:00
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
|
|
|
|
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
|
|
|
|
|
|
|
|
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
|
|
|
|
|
|
|
|
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
|
|
|
|
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
|
|
|
|
|
|
|
|
#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
|
|
|
|
|
2015-10-19 13:24:45 +00:00
|
|
|
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
|
|
|
|
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
|
|
|
|
|
2017-01-20 16:25:51 +00:00
|
|
|
#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
|
|
|
|
#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
|
|
|
|
#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
|
|
|
|
#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
|
|
|
|
#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
|
|
|
|
#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5)
|
|
|
|
#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
|
|
|
|
#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
|
|
|
|
#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
|
|
|
|
#define SYS_PMXEVTYPER_EL0 sys_reg(3, 3, 9, 13, 1)
|
|
|
|
#define SYS_PMXEVCNTR_EL0 sys_reg(3, 3, 9, 13, 2)
|
|
|
|
#define SYS_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0)
|
|
|
|
#define SYS_PMOVSSET_EL0 sys_reg(3, 3, 9, 14, 3)
|
2015-07-22 18:05:54 +00:00
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
|
|
|
|
#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
|
|
|
|
|
2017-01-19 17:18:30 +00:00
|
|
|
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
|
2015-07-22 18:05:54 +00:00
|
|
|
|
2017-03-09 16:47:06 +00:00
|
|
|
#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0)
|
|
|
|
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
|
|
|
|
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
|
|
|
|
|
2017-01-20 16:25:51 +00:00
|
|
|
#define __PMEV_op2(n) ((n) & 0x7)
|
|
|
|
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
|
|
|
|
#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
|
|
|
|
#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
|
|
|
|
#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
|
|
|
|
|
|
|
|
#define SYS_PMCCFILTR_EL0 sys_reg (3, 3, 14, 15, 7)
|
|
|
|
|
2017-10-31 15:51:00 +00:00
|
|
|
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
|
|
|
|
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
|
|
|
|
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
|
2018-01-15 19:39:01 +00:00
|
|
|
#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
|
2017-01-13 18:36:51 +00:00
|
|
|
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
|
|
|
|
|
2018-01-15 19:39:02 +00:00
|
|
|
#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
|
2017-01-19 17:57:43 +00:00
|
|
|
#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
|
|
|
|
#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0)
|
|
|
|
#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1)
|
|
|
|
#define SYS_ICH_AP0R2_EL2 __SYS__AP0Rx_EL2(2)
|
|
|
|
#define SYS_ICH_AP0R3_EL2 __SYS__AP0Rx_EL2(3)
|
|
|
|
|
|
|
|
#define __SYS__AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
|
|
|
|
#define SYS_ICH_AP1R0_EL2 __SYS__AP1Rx_EL2(0)
|
|
|
|
#define SYS_ICH_AP1R1_EL2 __SYS__AP1Rx_EL2(1)
|
|
|
|
#define SYS_ICH_AP1R2_EL2 __SYS__AP1Rx_EL2(2)
|
|
|
|
#define SYS_ICH_AP1R3_EL2 __SYS__AP1Rx_EL2(3)
|
|
|
|
|
|
|
|
#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
|
|
|
|
#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
|
|
|
|
#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
|
|
|
|
#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
|
|
|
|
#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
|
|
|
|
#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
|
|
|
|
#define SYS_ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
|
|
|
|
#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
|
|
|
|
|
|
|
|
#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
|
|
|
|
#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0)
|
|
|
|
#define SYS_ICH_LR1_EL2 __SYS__LR0_EL2(1)
|
|
|
|
#define SYS_ICH_LR2_EL2 __SYS__LR0_EL2(2)
|
|
|
|
#define SYS_ICH_LR3_EL2 __SYS__LR0_EL2(3)
|
|
|
|
#define SYS_ICH_LR4_EL2 __SYS__LR0_EL2(4)
|
|
|
|
#define SYS_ICH_LR5_EL2 __SYS__LR0_EL2(5)
|
|
|
|
#define SYS_ICH_LR6_EL2 __SYS__LR0_EL2(6)
|
|
|
|
#define SYS_ICH_LR7_EL2 __SYS__LR0_EL2(7)
|
|
|
|
|
|
|
|
#define __SYS__LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
|
|
|
|
#define SYS_ICH_LR8_EL2 __SYS__LR8_EL2(0)
|
|
|
|
#define SYS_ICH_LR9_EL2 __SYS__LR8_EL2(1)
|
|
|
|
#define SYS_ICH_LR10_EL2 __SYS__LR8_EL2(2)
|
|
|
|
#define SYS_ICH_LR11_EL2 __SYS__LR8_EL2(3)
|
|
|
|
#define SYS_ICH_LR12_EL2 __SYS__LR8_EL2(4)
|
|
|
|
#define SYS_ICH_LR13_EL2 __SYS__LR8_EL2(5)
|
|
|
|
#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6)
|
|
|
|
#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
|
2015-07-22 18:05:54 +00:00
|
|
|
|
2016-04-27 16:47:01 +00:00
|
|
|
/* Common SCTLR_ELx flags. */
|
2018-06-15 10:37:34 +00:00
|
|
|
#define SCTLR_ELx_DSSBS (1UL << 44)
|
2016-04-27 16:47:01 +00:00
|
|
|
#define SCTLR_ELx_EE (1 << 25)
|
2018-01-15 19:38:58 +00:00
|
|
|
#define SCTLR_ELx_IESB (1 << 21)
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_ELx_WXN (1 << 19)
|
2016-04-27 16:47:01 +00:00
|
|
|
#define SCTLR_ELx_I (1 << 12)
|
|
|
|
#define SCTLR_ELx_SA (1 << 3)
|
|
|
|
#define SCTLR_ELx_C (1 << 2)
|
|
|
|
#define SCTLR_ELx_A (1 << 1)
|
|
|
|
#define SCTLR_ELx_M 1
|
|
|
|
|
2018-01-15 19:38:58 +00:00
|
|
|
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
|
|
|
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB)
|
2018-01-15 19:38:55 +00:00
|
|
|
|
|
|
|
/* SCTLR_EL2 specific flags. */
|
2017-06-06 18:08:33 +00:00
|
|
|
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
|
2017-06-20 13:30:42 +00:00
|
|
|
(1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \
|
|
|
|
(1 << 29))
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
|
|
|
|
(1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
|
2018-01-15 19:38:58 +00:00
|
|
|
(1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
|
arm64: move SCTLR_EL{1,2} assertions to <asm/sysreg.h>
Currently we assert that the SCTLR_EL{1,2}_{SET,CLEAR} bits are
self-consistent with an assertion in config_sctlr_el1(). This is a bit
unusual, since config_sctlr_el1() doesn't make use of these definitions,
and is far away from the definitions themselves.
We can use the CPP #error directive to have equivalent assertions in
<asm/sysreg.h>, next to the definitions of the set/clear bits, which is
a bit clearer and simpler.
At the same time, lets fill in the upper 32 bits for both registers in
their respective RES0 definitions. This could be a little nicer with
GENMASK_ULL(63, 32), but this currently lives in <linux/bitops.h>, which
cannot safely be included from assembly, as <asm/sysreg.h> can.
Note the when the preprocessor evaluates an expression for an #if
directive, all signed or unsigned values are treated as intmax_t or
uintmax_t respectively. To avoid ambiguity, we define explicitly define
the mask of all 64 bits.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Martin <dave.martin@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-07-11 13:56:37 +00:00
|
|
|
(1 << 27) | (1 << 30) | (1 << 31) | \
|
2018-06-15 10:37:34 +00:00
|
|
|
(0xffffefffUL << 32))
|
2018-01-15 19:38:55 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
|
|
|
|
#define ENDIAN_CLEAR_EL2 0
|
|
|
|
#else
|
|
|
|
#define ENDIAN_SET_EL2 0
|
|
|
|
#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* SCTLR_EL2 value used for the hyp-stub */
|
2018-01-15 19:38:58 +00:00
|
|
|
#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
|
|
|
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
|
2018-06-15 10:37:34 +00:00
|
|
|
SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
|
2018-01-15 19:38:55 +00:00
|
|
|
|
arm64: move SCTLR_EL{1,2} assertions to <asm/sysreg.h>
Currently we assert that the SCTLR_EL{1,2}_{SET,CLEAR} bits are
self-consistent with an assertion in config_sctlr_el1(). This is a bit
unusual, since config_sctlr_el1() doesn't make use of these definitions,
and is far away from the definitions themselves.
We can use the CPP #error directive to have equivalent assertions in
<asm/sysreg.h>, next to the definitions of the set/clear bits, which is
a bit clearer and simpler.
At the same time, lets fill in the upper 32 bits for both registers in
their respective RES0 definitions. This could be a little nicer with
GENMASK_ULL(63, 32), but this currently lives in <linux/bitops.h>, which
cannot safely be included from assembly, as <asm/sysreg.h> can.
Note the when the preprocessor evaluates an expression for an #if
directive, all signed or unsigned values are treated as intmax_t or
uintmax_t respectively. To avoid ambiguity, we define explicitly define
the mask of all 64 bits.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Martin <dave.martin@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-07-11 13:56:37 +00:00
|
|
|
#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
|
|
|
|
#error "Inconsistent SCTLR_EL2 set/clear bits"
|
|
|
|
#endif
|
2016-04-27 16:47:01 +00:00
|
|
|
|
|
|
|
/* SCTLR_EL1 specific flags. */
|
2016-06-28 17:07:32 +00:00
|
|
|
#define SCTLR_EL1_UCI (1 << 26)
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_EL1_E0E (1 << 24)
|
2016-04-27 16:47:01 +00:00
|
|
|
#define SCTLR_EL1_SPAN (1 << 23)
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_EL1_NTWE (1 << 18)
|
|
|
|
#define SCTLR_EL1_NTWI (1 << 16)
|
2016-09-09 13:07:16 +00:00
|
|
|
#define SCTLR_EL1_UCT (1 << 15)
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_EL1_DZE (1 << 14)
|
|
|
|
#define SCTLR_EL1_UMA (1 << 9)
|
2016-04-27 16:47:01 +00:00
|
|
|
#define SCTLR_EL1_SED (1 << 8)
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_EL1_ITD (1 << 7)
|
2016-04-27 16:47:01 +00:00
|
|
|
#define SCTLR_EL1_CP15BEN (1 << 5)
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_EL1_SA0 (1 << 4)
|
|
|
|
|
|
|
|
#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
|
|
|
|
(1 << 29))
|
|
|
|
#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
|
arm64: move SCTLR_EL{1,2} assertions to <asm/sysreg.h>
Currently we assert that the SCTLR_EL{1,2}_{SET,CLEAR} bits are
self-consistent with an assertion in config_sctlr_el1(). This is a bit
unusual, since config_sctlr_el1() doesn't make use of these definitions,
and is far away from the definitions themselves.
We can use the CPP #error directive to have equivalent assertions in
<asm/sysreg.h>, next to the definitions of the set/clear bits, which is
a bit clearer and simpler.
At the same time, lets fill in the upper 32 bits for both registers in
their respective RES0 definitions. This could be a little nicer with
GENMASK_ULL(63, 32), but this currently lives in <linux/bitops.h>, which
cannot safely be included from assembly, as <asm/sysreg.h> can.
Note the when the preprocessor evaluates an expression for an #if
directive, all signed or unsigned values are treated as intmax_t or
uintmax_t respectively. To avoid ambiguity, we define explicitly define
the mask of all 64 bits.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Martin <dave.martin@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-07-11 13:56:37 +00:00
|
|
|
(1 << 27) | (1 << 30) | (1 << 31) | \
|
2018-06-15 10:37:34 +00:00
|
|
|
(0xffffefffUL << 32))
|
2018-01-15 19:38:55 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
|
|
|
|
#define ENDIAN_CLEAR_EL1 0
|
|
|
|
#else
|
|
|
|
#define ENDIAN_SET_EL1 0
|
|
|
|
#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
|
|
|
|
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
|
|
|
|
SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_NTWI |\
|
2018-01-15 19:38:58 +00:00
|
|
|
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
|
|
|
|
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
|
2018-01-15 19:38:55 +00:00
|
|
|
#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
|
|
|
|
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
|
2018-06-15 10:37:34 +00:00
|
|
|
SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
|
2018-01-15 19:38:55 +00:00
|
|
|
|
arm64: move SCTLR_EL{1,2} assertions to <asm/sysreg.h>
Currently we assert that the SCTLR_EL{1,2}_{SET,CLEAR} bits are
self-consistent with an assertion in config_sctlr_el1(). This is a bit
unusual, since config_sctlr_el1() doesn't make use of these definitions,
and is far away from the definitions themselves.
We can use the CPP #error directive to have equivalent assertions in
<asm/sysreg.h>, next to the definitions of the set/clear bits, which is
a bit clearer and simpler.
At the same time, lets fill in the upper 32 bits for both registers in
their respective RES0 definitions. This could be a little nicer with
GENMASK_ULL(63, 32), but this currently lives in <linux/bitops.h>, which
cannot safely be included from assembly, as <asm/sysreg.h> can.
Note the when the preprocessor evaluates an expression for an #if
directive, all signed or unsigned values are treated as intmax_t or
uintmax_t respectively. To avoid ambiguity, we define explicitly define
the mask of all 64 bits.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Martin <dave.martin@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-07-11 13:56:37 +00:00
|
|
|
#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
|
|
|
|
#error "Inconsistent SCTLR_EL1 set/clear bits"
|
|
|
|
#endif
|
2015-10-19 13:24:45 +00:00
|
|
|
|
|
|
|
/* id_aa64isar0 */
|
2018-03-12 10:04:14 +00:00
|
|
|
#define ID_AA64ISAR0_TS_SHIFT 52
|
2017-12-13 10:13:56 +00:00
|
|
|
#define ID_AA64ISAR0_FHM_SHIFT 48
|
2017-10-11 13:01:02 +00:00
|
|
|
#define ID_AA64ISAR0_DP_SHIFT 44
|
|
|
|
#define ID_AA64ISAR0_SM4_SHIFT 40
|
|
|
|
#define ID_AA64ISAR0_SM3_SHIFT 36
|
|
|
|
#define ID_AA64ISAR0_SHA3_SHIFT 32
|
2015-10-19 13:24:45 +00:00
|
|
|
#define ID_AA64ISAR0_RDM_SHIFT 28
|
|
|
|
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
|
|
|
|
#define ID_AA64ISAR0_CRC32_SHIFT 16
|
|
|
|
#define ID_AA64ISAR0_SHA2_SHIFT 12
|
|
|
|
#define ID_AA64ISAR0_SHA1_SHIFT 8
|
|
|
|
#define ID_AA64ISAR0_AES_SHIFT 4
|
|
|
|
|
2017-03-14 18:13:25 +00:00
|
|
|
/* id_aa64isar1 */
|
2017-03-14 18:13:27 +00:00
|
|
|
#define ID_AA64ISAR1_LRCPC_SHIFT 20
|
2017-03-14 18:13:26 +00:00
|
|
|
#define ID_AA64ISAR1_FCMA_SHIFT 16
|
2017-03-14 18:13:25 +00:00
|
|
|
#define ID_AA64ISAR1_JSCVT_SHIFT 12
|
2017-07-25 10:55:40 +00:00
|
|
|
#define ID_AA64ISAR1_DPB_SHIFT 0
|
2017-03-14 18:13:25 +00:00
|
|
|
|
2015-10-19 13:24:45 +00:00
|
|
|
/* id_aa64pfr0 */
|
2017-11-27 18:29:30 +00:00
|
|
|
#define ID_AA64PFR0_CSV3_SHIFT 60
|
2018-01-03 11:17:58 +00:00
|
|
|
#define ID_AA64PFR0_CSV2_SHIFT 56
|
2018-03-12 10:04:14 +00:00
|
|
|
#define ID_AA64PFR0_DIT_SHIFT 48
|
2017-10-31 15:51:00 +00:00
|
|
|
#define ID_AA64PFR0_SVE_SHIFT 32
|
2018-01-15 19:38:56 +00:00
|
|
|
#define ID_AA64PFR0_RAS_SHIFT 28
|
2015-10-19 13:24:45 +00:00
|
|
|
#define ID_AA64PFR0_GIC_SHIFT 24
|
|
|
|
#define ID_AA64PFR0_ASIMD_SHIFT 20
|
|
|
|
#define ID_AA64PFR0_FP_SHIFT 16
|
|
|
|
#define ID_AA64PFR0_EL3_SHIFT 12
|
|
|
|
#define ID_AA64PFR0_EL2_SHIFT 8
|
|
|
|
#define ID_AA64PFR0_EL1_SHIFT 4
|
|
|
|
#define ID_AA64PFR0_EL0_SHIFT 0
|
|
|
|
|
2017-10-31 15:51:00 +00:00
|
|
|
#define ID_AA64PFR0_SVE 0x1
|
2018-01-15 19:38:56 +00:00
|
|
|
#define ID_AA64PFR0_RAS_V1 0x1
|
2015-10-19 13:24:45 +00:00
|
|
|
#define ID_AA64PFR0_FP_NI 0xf
|
|
|
|
#define ID_AA64PFR0_FP_SUPPORTED 0x0
|
|
|
|
#define ID_AA64PFR0_ASIMD_NI 0xf
|
|
|
|
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
|
|
|
|
#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1
|
|
|
|
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
|
2016-04-18 09:28:34 +00:00
|
|
|
#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
|
2015-10-19 13:24:45 +00:00
|
|
|
|
2018-06-15 10:37:34 +00:00
|
|
|
/* id_aa64pfr1 */
|
|
|
|
#define ID_AA64PFR1_SSBS_SHIFT 4
|
|
|
|
|
|
|
|
#define ID_AA64PFR1_SSBS_PSTATE_NI 0
|
|
|
|
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
|
|
|
|
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
|
|
|
|
|
2015-10-19 13:24:45 +00:00
|
|
|
/* id_aa64mmfr0 */
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
|
|
|
|
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
|
|
|
|
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
|
2015-10-19 13:24:42 +00:00
|
|
|
#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
|
2015-10-19 13:24:45 +00:00
|
|
|
#define ID_AA64MMFR0_SNSMEM_SHIFT 12
|
2015-10-19 13:24:42 +00:00
|
|
|
#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
|
2015-10-19 13:24:45 +00:00
|
|
|
#define ID_AA64MMFR0_ASID_SHIFT 4
|
|
|
|
#define ID_AA64MMFR0_PARANGE_SHIFT 0
|
|
|
|
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_NI 0xf
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
|
|
|
|
#define ID_AA64MMFR0_TGRAN64_NI 0xf
|
|
|
|
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
|
|
|
|
#define ID_AA64MMFR0_TGRAN16_NI 0x0
|
|
|
|
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
|
2017-12-13 17:07:17 +00:00
|
|
|
#define ID_AA64MMFR0_PARANGE_48 0x5
|
|
|
|
#define ID_AA64MMFR0_PARANGE_52 0x6
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
|
|
|
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
|
|
|
|
#else
|
|
|
|
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
|
|
|
|
#endif
|
2015-10-19 13:24:45 +00:00
|
|
|
|
|
|
|
/* id_aa64mmfr1 */
|
|
|
|
#define ID_AA64MMFR1_PAN_SHIFT 20
|
|
|
|
#define ID_AA64MMFR1_LOR_SHIFT 16
|
|
|
|
#define ID_AA64MMFR1_HPD_SHIFT 12
|
|
|
|
#define ID_AA64MMFR1_VHE_SHIFT 8
|
|
|
|
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
|
|
|
|
#define ID_AA64MMFR1_HADBS_SHIFT 0
|
|
|
|
|
2016-03-30 13:33:59 +00:00
|
|
|
#define ID_AA64MMFR1_VMIDBITS_8 0
|
|
|
|
#define ID_AA64MMFR1_VMIDBITS_16 2
|
|
|
|
|
2016-02-05 14:58:47 +00:00
|
|
|
/* id_aa64mmfr2 */
|
2018-04-06 11:27:28 +00:00
|
|
|
#define ID_AA64MMFR2_FWB_SHIFT 40
|
2018-03-12 10:04:14 +00:00
|
|
|
#define ID_AA64MMFR2_AT_SHIFT 32
|
2016-03-25 09:30:07 +00:00
|
|
|
#define ID_AA64MMFR2_LVA_SHIFT 16
|
|
|
|
#define ID_AA64MMFR2_IESB_SHIFT 12
|
|
|
|
#define ID_AA64MMFR2_LSM_SHIFT 8
|
2016-02-05 14:58:47 +00:00
|
|
|
#define ID_AA64MMFR2_UAO_SHIFT 4
|
2016-03-25 09:30:07 +00:00
|
|
|
#define ID_AA64MMFR2_CNP_SHIFT 0
|
2016-02-05 14:58:47 +00:00
|
|
|
|
2015-10-19 13:24:45 +00:00
|
|
|
/* id_aa64dfr0 */
|
2016-09-22 10:23:07 +00:00
|
|
|
#define ID_AA64DFR0_PMSVER_SHIFT 32
|
2015-10-19 13:24:45 +00:00
|
|
|
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
|
|
|
|
#define ID_AA64DFR0_WRPS_SHIFT 20
|
|
|
|
#define ID_AA64DFR0_BRPS_SHIFT 12
|
|
|
|
#define ID_AA64DFR0_PMUVER_SHIFT 8
|
|
|
|
#define ID_AA64DFR0_TRACEVER_SHIFT 4
|
|
|
|
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
|
|
|
|
|
|
|
|
#define ID_ISAR5_RDM_SHIFT 24
|
|
|
|
#define ID_ISAR5_CRC32_SHIFT 16
|
|
|
|
#define ID_ISAR5_SHA2_SHIFT 12
|
|
|
|
#define ID_ISAR5_SHA1_SHIFT 8
|
|
|
|
#define ID_ISAR5_AES_SHIFT 4
|
|
|
|
#define ID_ISAR5_SEVL_SHIFT 0
|
|
|
|
|
|
|
|
#define MVFR0_FPROUND_SHIFT 28
|
|
|
|
#define MVFR0_FPSHVEC_SHIFT 24
|
|
|
|
#define MVFR0_FPSQRT_SHIFT 20
|
|
|
|
#define MVFR0_FPDIVIDE_SHIFT 16
|
|
|
|
#define MVFR0_FPTRAP_SHIFT 12
|
|
|
|
#define MVFR0_FPDP_SHIFT 8
|
|
|
|
#define MVFR0_FPSP_SHIFT 4
|
|
|
|
#define MVFR0_SIMD_SHIFT 0
|
|
|
|
|
|
|
|
#define MVFR1_SIMDFMAC_SHIFT 28
|
|
|
|
#define MVFR1_FPHP_SHIFT 24
|
|
|
|
#define MVFR1_SIMDHP_SHIFT 20
|
|
|
|
#define MVFR1_SIMDSP_SHIFT 16
|
|
|
|
#define MVFR1_SIMDINT_SHIFT 12
|
|
|
|
#define MVFR1_SIMDLS_SHIFT 8
|
|
|
|
#define MVFR1_FPDNAN_SHIFT 4
|
|
|
|
#define MVFR1_FPFTZ_SHIFT 0
|
|
|
|
|
2015-10-19 13:19:35 +00:00
|
|
|
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
|
|
|
|
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
|
|
|
|
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
|
|
|
|
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_NI 0xf
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
|
|
|
|
#define ID_AA64MMFR0_TGRAN64_NI 0xf
|
|
|
|
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
|
|
|
|
#define ID_AA64MMFR0_TGRAN16_NI 0x0
|
|
|
|
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
|
|
|
|
|
|
|
|
#if defined(CONFIG_ARM64_4K_PAGES)
|
|
|
|
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
|
|
|
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
|
2015-10-19 13:19:37 +00:00
|
|
|
#elif defined(CONFIG_ARM64_16K_PAGES)
|
|
|
|
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
|
|
|
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
|
2015-10-19 13:19:35 +00:00
|
|
|
#elif defined(CONFIG_ARM64_64K_PAGES)
|
|
|
|
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
|
|
|
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
|
|
|
|
#endif
|
|
|
|
|
2017-01-09 17:28:31 +00:00
|
|
|
|
2017-10-31 15:51:00 +00:00
|
|
|
/*
|
|
|
|
* The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
|
|
|
|
* are reserved by the SVE architecture for future expansion of the LEN
|
|
|
|
* field, with compatible semantics.
|
|
|
|
*/
|
|
|
|
#define ZCR_ELx_LEN_SHIFT 0
|
|
|
|
#define ZCR_ELx_LEN_SIZE 9
|
|
|
|
#define ZCR_ELx_LEN_MASK 0x1ff
|
|
|
|
|
|
|
|
#define CPACR_EL1_ZEN_EL1EN (1 << 16) /* enable EL1 access */
|
|
|
|
#define CPACR_EL1_ZEN_EL0EN (1 << 17) /* enable EL0 access, if EL1EN set */
|
|
|
|
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
|
|
|
|
|
|
|
|
|
2017-01-09 17:28:31 +00:00
|
|
|
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
|
|
|
|
#define SYS_MPIDR_SAFE_VAL (1UL << 31)
|
|
|
|
|
2014-07-24 13:14:42 +00:00
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
|
|
|
|
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
|
2016-02-15 08:51:49 +00:00
|
|
|
.equ .L__reg_num_x\num, \num
|
2014-07-24 13:14:42 +00:00
|
|
|
.endr
|
2016-02-15 08:51:49 +00:00
|
|
|
.equ .L__reg_num_xzr, 31
|
2014-07-24 13:14:42 +00:00
|
|
|
|
|
|
|
.macro mrs_s, rt, sreg
|
2016-12-06 15:27:45 +00:00
|
|
|
__emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
|
2014-07-24 13:14:42 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro msr_s, sreg, rt
|
2016-12-06 15:27:45 +00:00
|
|
|
__emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
|
2014-07-24 13:14:42 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2018-01-15 19:38:55 +00:00
|
|
|
#include <linux/build_bug.h>
|
2015-11-05 15:09:17 +00:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
2014-07-24 13:14:42 +00:00
|
|
|
asm(
|
|
|
|
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
|
2016-02-15 08:51:49 +00:00
|
|
|
" .equ .L__reg_num_x\\num, \\num\n"
|
2014-07-24 13:14:42 +00:00
|
|
|
" .endr\n"
|
2016-02-15 08:51:49 +00:00
|
|
|
" .equ .L__reg_num_xzr, 31\n"
|
2014-07-24 13:14:42 +00:00
|
|
|
"\n"
|
|
|
|
" .macro mrs_s, rt, sreg\n"
|
2016-12-06 15:27:45 +00:00
|
|
|
__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
|
2014-07-24 13:14:42 +00:00
|
|
|
" .endm\n"
|
|
|
|
"\n"
|
|
|
|
" .macro msr_s, sreg, rt\n"
|
2016-12-06 15:27:45 +00:00
|
|
|
__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
|
2014-07-24 13:14:42 +00:00
|
|
|
" .endm\n"
|
|
|
|
);
|
|
|
|
|
2015-11-05 15:09:17 +00:00
|
|
|
/*
|
|
|
|
* Unlike read_cpuid, calls to read_sysreg are never expected to be
|
|
|
|
* optimized away or replaced with synthetic values.
|
|
|
|
*/
|
|
|
|
#define read_sysreg(r) ({ \
|
|
|
|
u64 __val; \
|
|
|
|
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
|
|
|
|
__val; \
|
|
|
|
})
|
|
|
|
|
2016-09-08 12:55:34 +00:00
|
|
|
/*
|
|
|
|
* The "Z" constraint normally means a zero immediate, but when combined with
|
|
|
|
* the "%x0" template means XZR.
|
|
|
|
*/
|
2015-11-05 15:09:17 +00:00
|
|
|
#define write_sysreg(v, r) do { \
|
2017-07-25 11:52:41 +00:00
|
|
|
u64 __val = (u64)(v); \
|
2016-09-08 12:55:34 +00:00
|
|
|
asm volatile("msr " __stringify(r) ", %x0" \
|
|
|
|
: : "rZ" (__val)); \
|
2015-11-05 15:09:17 +00:00
|
|
|
} while (0)
|
|
|
|
|
2016-09-06 13:04:45 +00:00
|
|
|
/*
|
|
|
|
* For registers without architectural names, or simply unsupported by
|
|
|
|
* GAS.
|
|
|
|
*/
|
|
|
|
#define read_sysreg_s(r) ({ \
|
|
|
|
u64 __val; \
|
|
|
|
asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \
|
|
|
|
__val; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define write_sysreg_s(v, r) do { \
|
2017-07-25 11:52:41 +00:00
|
|
|
u64 __val = (u64)(v); \
|
2016-10-17 12:38:14 +00:00
|
|
|
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
|
2016-09-06 13:04:45 +00:00
|
|
|
} while (0)
|
|
|
|
|
arm64: Introduce sysreg_clear_set()
Currently we have a couple of helpers to manipulate bits in particular
sysregs:
* config_sctlr_el1(u32 clear, u32 set)
* change_cpacr(u64 val, u64 mask)
The parameters of these differ in naming convention, order, and size,
which is unfortunate. They also differ slightly in behaviour, as
change_cpacr() skips the sysreg write if the bits are unchanged, which
is a useful optimization when sysreg writes are expensive.
Before we gain yet another sysreg manipulation function, let's
unify these with a common helper, providing a consistent order for
clear/set operands, and the write skipping behaviour from
change_cpacr(). Code will be migrated to the new helper in subsequent
patches.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Dave Martin <dave.martin@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2018-06-15 15:47:23 +00:00
|
|
|
/*
|
|
|
|
* Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
|
|
|
|
* set mask are set. Other bits are left as-is.
|
|
|
|
*/
|
|
|
|
#define sysreg_clear_set(sysreg, clear, set) do { \
|
|
|
|
u64 __scs_val = read_sysreg(sysreg); \
|
|
|
|
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
|
|
|
|
if (__scs_new != __scs_val) \
|
|
|
|
write_sysreg(__scs_new, sysreg); \
|
|
|
|
} while (0)
|
|
|
|
|
2014-07-24 13:14:42 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* __ASM_SYSREG_H */
|