From c56c599d9002d44f559be3852b371db46adac87c Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 30 Sep 2024 17:10:47 +0100 Subject: [PATCH 1/6] arm64: probes: Disable kprobes/uprobes on MOPS instructions FEAT_MOPS instructions require that all three instructions (prologue, main and epilogue) appear consecutively in memory. Placing a kprobe/uprobe on one of them doesn't work as only a single instruction gets executed out-of-line or simulated. So don't allow placing a probe on a MOPS instruction. Fixes: b7564127ffcb ("arm64: mops: detect and enable FEAT_MOPS") Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20240930161051.3777828-2-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/insn.h | 1 + arch/arm64/kernel/probes/decode-insn.c | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 8c0a36f72d6f..bc77869dbd43 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -353,6 +353,7 @@ __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) __AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000) __AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000) __AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000) +__AARCH64_INSN_FUNCS(mops, 0x3B200C00, 0x19000400) __AARCH64_INSN_FUNCS(stp, 0x7FC00000, 0x29000000) __AARCH64_INSN_FUNCS(ldp, 0x7FC00000, 0x29400000) __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c index 968d5fffe233..77f3c8eb0916 100644 --- a/arch/arm64/kernel/probes/decode-insn.c +++ b/arch/arm64/kernel/probes/decode-insn.c @@ -58,10 +58,13 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn) * Instructions which load PC relative literals are not going to work * when executed from an XOL slot. Instructions doing an exclusive * load/store are not going to complete successfully when single-step - * exception handling happens in the middle of the sequence. + * exception handling happens in the middle of the sequence. Memory + * copy/set instructions require that all three instructions be placed + * consecutively in memory. */ if (aarch64_insn_uses_literal(insn) || - aarch64_insn_is_exclusive(insn)) + aarch64_insn_is_exclusive(insn) || + aarch64_insn_is_mops(insn)) return false; return true; From 13840229d6bd5c191a9ca68ceba0af0fa03d7645 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 30 Sep 2024 17:10:48 +0100 Subject: [PATCH 2/6] arm64: mops: Handle MOPS exceptions from EL1 We will soon be using MOPS instructions in the kernel, so wire up the exception handler to handle exceptions from EL1 caused by the copy/set operation being stopped and resumed on a different type of CPU. Add a helper for advancing the single step state machine, similarly to what the EL0 exception handler does. Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20240930161051.3777828-3-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/debug-monitors.h | 1 + arch/arm64/include/asm/exception.h | 1 + arch/arm64/kernel/debug-monitors.c | 5 +++++ arch/arm64/kernel/entry-common.c | 12 ++++++++++++ arch/arm64/kernel/traps.c | 7 +++++++ 5 files changed, 26 insertions(+) diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 13d437bcbf58..8f6ba31b8658 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -105,6 +105,7 @@ void kernel_enable_single_step(struct pt_regs *regs); void kernel_disable_single_step(void); int kernel_active_single_step(void); void kernel_rewind_single_step(struct pt_regs *regs); +void kernel_fastforward_single_step(struct pt_regs *regs); #ifdef CONFIG_HAVE_HW_BREAKPOINT int reinstall_suspended_bps(struct pt_regs *regs); diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index f296662590c7..8689b95f6b53 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -73,6 +73,7 @@ void do_el0_svc_compat(struct pt_regs *regs); void do_el0_fpac(struct pt_regs *regs, unsigned long esr); void do_el1_fpac(struct pt_regs *regs, unsigned long esr); void do_el0_mops(struct pt_regs *regs, unsigned long esr); +void do_el1_mops(struct pt_regs *regs, unsigned long esr); void do_serror(struct pt_regs *regs, unsigned long esr); void do_signal(struct pt_regs *regs); diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 024a7b245056..c60a4a90c6a5 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -441,6 +441,11 @@ void kernel_rewind_single_step(struct pt_regs *regs) set_regs_spsr_ss(regs); } +void kernel_fastforward_single_step(struct pt_regs *regs) +{ + clear_regs_spsr_ss(regs); +} + /* ptrace API */ void user_enable_single_step(struct task_struct *task) { diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3fcd9d080bf2..9d174cd541ef 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -463,6 +463,15 @@ static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) exit_to_kernel_mode(regs); } +static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr) +{ + enter_from_kernel_mode(regs); + local_daif_inherit(regs); + do_el1_mops(regs, esr); + local_daif_mask(); + exit_to_kernel_mode(regs); +} + static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); @@ -505,6 +514,9 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) case ESR_ELx_EC_BTI: el1_bti(regs, esr); break; + case ESR_ELx_EC_MOPS: + el1_mops(regs, esr); + break; case ESR_ELx_EC_BREAKPT_CUR: case ESR_ELx_EC_SOFTSTP_CUR: case ESR_ELx_EC_WATCHPT_CUR: diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 563cbce11126..fc6d44e06b8d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -531,6 +531,13 @@ void do_el0_mops(struct pt_regs *regs, unsigned long esr) user_fastforward_single_step(current); } +void do_el1_mops(struct pt_regs *regs, unsigned long esr) +{ + arm64_mops_reset_regs(®s->user_regs, esr); + + kernel_fastforward_single_step(regs); +} + #define __user_cache_maint(insn, address, res) \ if (address >= TASK_SIZE_MAX) { \ res = -EFAULT; \ From b616058c6613e1fd3e2bc8d4c05b558c8854aab3 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 30 Sep 2024 17:10:49 +0100 Subject: [PATCH 3/6] arm64: mops: Document booting requirement for HCR_EL2.MCE2 Document that hypervisors must set HCR_EL2.MCE2 and handle MOPS exceptions when they migrate a vCPU to another type of CPU, as Linux may not be able to handle the exception at all times. As one example, when running under nested virtualization, KVM does not handle MOPS exceptions from the nVHE/hVHE EL2 hyp as the hyp is never migrated, so the host hypervisor needs to handle them. There may be other situations (now or in the future) where the kernel can't handle an unexpected MOPS exception, so require that the hypervisor handles them. Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20240930161051.3777828-4-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- Documentation/arch/arm64/booting.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst index b57776a68f15..db46af5b9f0f 100644 --- a/Documentation/arch/arm64/booting.rst +++ b/Documentation/arch/arm64/booting.rst @@ -385,6 +385,9 @@ Before jumping into the kernel, the following conditions must be met: - HCRX_EL2.MSCEn (bit 11) must be initialised to 0b1. + - HCRX_EL2.MCE2 (bit 10) must be initialised to 0b1. The exception + handler must set PSTATE.SS to 0b0. + For CPUs with the Extended Translation Control Register feature (FEAT_TCR2): - If EL3 is present: From 836ed3c4e473fef9e0814a2ba6dd40f9656c03f1 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 30 Sep 2024 17:10:50 +0100 Subject: [PATCH 4/6] arm64: lib: Use MOPS for memcpy() routines Make memcpy(), memmove() and memset() use the Armv8.8 FEAT_MOPS instructions when implemented on the CPU. The CPY*/SET* instructions copy or set a block of memory of arbitrary size and alignment. They can be interrupted by the CPU and the copying resumed later. Their performance is expected to be close to the best generic copy/set sequence of loads/stores for a given CPU. Using them in the kernel's copy/set routines therefore avoids the need to periodically rewrite the routines to optimize for new microarchitectures. It could also lead to a performance improvement for some CPUs and systems. With this change the kernel will always use the instructions if they are implemented on the CPU (and have not been disabled by the arm64.nomops command line parameter). When not implemented the usual routines will be used (patched via alternatives). Note, we need to patch B/NOP instead of the whole sequence to avoid executing a partially patched sequence in case the compiler generates a mem*() call inside the alternatives patching code. Note that MOPS instructions have relaxed behavior on Device memory, but it is expected that these routines are not generally used on MMIO. Note: For memcpy(), this uses the CPY* instructions instead of CPYF*, as CPY* allows overlaps between the source and destination buffers, and despite contradicting the C standard, compilers require that memcpy() work on exactly overlapping source and destination: https://gcc.gnu.org/onlinedocs/gcc/Standards.html#C-Language https://reviews.llvm.org/D86993 Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20240930161051.3777828-5-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 3 +++ arch/arm64/lib/memcpy.S | 19 ++++++++++++++++++- arch/arm64/lib/memset.S | 20 +++++++++++++++++++- 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3e29b44d2d7b..d0fe90ea704d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2155,6 +2155,9 @@ config ARM64_EPAN if the cpu does not implement the feature. endmenu # "ARMv8.7 architectural features" +config AS_HAS_MOPS + def_bool $(as-instr,.arch_extension mops) + menu "ARMv8.9 architectural features" config ARM64_POE diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 4ab48d49c451..9b99106fb95f 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -57,7 +57,7 @@ The loop tail is handled by always copying 64 bytes from the end. */ -SYM_FUNC_START(__pi_memcpy) +SYM_FUNC_START_LOCAL(__pi_memcpy_generic) add srcend, src, count add dstend, dstin, count cmp count, 128 @@ -238,7 +238,24 @@ L(copy64_from_start): stp B_l, B_h, [dstin, 16] stp C_l, C_h, [dstin] ret +SYM_FUNC_END(__pi_memcpy_generic) + +#ifdef CONFIG_AS_HAS_MOPS + .arch_extension mops +SYM_FUNC_START(__pi_memcpy) +alternative_if_not ARM64_HAS_MOPS + b __pi_memcpy_generic +alternative_else_nop_endif + + mov dst, dstin + cpyp [dst]!, [src]!, count! + cpym [dst]!, [src]!, count! + cpye [dst]!, [src]!, count! + ret SYM_FUNC_END(__pi_memcpy) +#else +SYM_FUNC_ALIAS(__pi_memcpy, __pi_memcpy_generic) +#endif SYM_FUNC_ALIAS(__memcpy, __pi_memcpy) EXPORT_SYMBOL(__memcpy) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index a5aebe82ad73..97157da65ec6 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -26,6 +26,7 @@ */ dstin .req x0 +val_x .req x1 val .req w1 count .req x2 tmp1 .req x3 @@ -42,7 +43,7 @@ dst .req x8 tmp3w .req w9 tmp3 .req x9 -SYM_FUNC_START(__pi_memset) +SYM_FUNC_START_LOCAL(__pi_memset_generic) mov dst, dstin /* Preserve return value. */ and A_lw, val, #255 orr A_lw, A_lw, A_lw, lsl #8 @@ -201,7 +202,24 @@ SYM_FUNC_START(__pi_memset) ands count, count, zva_bits_x b.ne .Ltail_maybe_long ret +SYM_FUNC_END(__pi_memset_generic) + +#ifdef CONFIG_AS_HAS_MOPS + .arch_extension mops +SYM_FUNC_START(__pi_memset) +alternative_if_not ARM64_HAS_MOPS + b __pi_memset_generic +alternative_else_nop_endif + + mov dst, dstin + setp [dst]!, count!, val_x + setm [dst]!, count!, val_x + sete [dst]!, count!, val_x + ret SYM_FUNC_END(__pi_memset) +#else +SYM_FUNC_ALIAS(__pi_memset, __pi_memset_generic) +#endif SYM_FUNC_ALIAS(__memset, __pi_memset) EXPORT_SYMBOL(__memset) From ce6b5ff5f16dd9267d62d09b3af3f0c7dc3c24f0 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 30 Sep 2024 17:10:51 +0100 Subject: [PATCH 5/6] arm64: lib: Use MOPS for copy_page() and clear_page() Similarly to what was done to the memcpy() routines, make copy_page() and clear_page() also use the Armv8.8 FEAT_MOPS instructions. Note: For copy_page() this uses the CPY* instructions instead of CPYF* as CPYF* doesn't allow src and dst to be equal. It's not clear if copy_page() needs to allow equal src and dst but it has worked so far with the current implementation and there is no documentation forbidding it. Note, the unoptimized version of copy_page() in assembler.h is left as it is. Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20240930161051.3777828-6-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/lib/clear_page.S | 13 +++++++++++++ arch/arm64/lib/copy_page.S | 13 +++++++++++++ 2 files changed, 26 insertions(+) diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S index ebde40e7fa2b..bd6f7d5eb6eb 100644 --- a/arch/arm64/lib/clear_page.S +++ b/arch/arm64/lib/clear_page.S @@ -15,6 +15,19 @@ * x0 - dest */ SYM_FUNC_START(__pi_clear_page) +#ifdef CONFIG_AS_HAS_MOPS + .arch_extension mops +alternative_if_not ARM64_HAS_MOPS + b .Lno_mops +alternative_else_nop_endif + + mov x1, #PAGE_SIZE + setpn [x0]!, x1!, xzr + setmn [x0]!, x1!, xzr + seten [x0]!, x1!, xzr + ret +.Lno_mops: +#endif mrs x1, dczid_el0 tbnz x1, #4, 2f /* Branch if DC ZVA is prohibited */ and w1, w1, #0xf diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index 6a56d7cf309d..e6374e7e5511 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -18,6 +18,19 @@ * x1 - src */ SYM_FUNC_START(__pi_copy_page) +#ifdef CONFIG_AS_HAS_MOPS + .arch_extension mops +alternative_if_not ARM64_HAS_MOPS + b .Lno_mops +alternative_else_nop_endif + + mov x2, #PAGE_SIZE + cpypwn [x0]!, [x1]!, x2! + cpymwn [x0]!, [x1]!, x2! + cpyewn [x0]!, [x1]!, x2! + ret +.Lno_mops: +#endif ldp x2, x3, [x1] ldp x4, x5, [x1, #16] ldp x6, x7, [x1, #32] From 2cfdb799dc7681a93844e5019f9bbff603c2c9ee Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 28 Oct 2024 18:57:21 +0000 Subject: [PATCH 6/6] arm64: mops: Document requirements for hypervisors Add a mops.rst document to clarify in more detail what hypervisors need to do to run a Linux guest on a system with FEAT_MOPS. Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20241028185721.52852-1-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- Documentation/arch/arm64/booting.rst | 4 +-- Documentation/arch/arm64/index.rst | 1 + Documentation/arch/arm64/mops.rst | 44 ++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 Documentation/arch/arm64/mops.rst diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst index db46af5b9f0f..dabd279dee5d 100644 --- a/Documentation/arch/arm64/booting.rst +++ b/Documentation/arch/arm64/booting.rst @@ -385,8 +385,8 @@ Before jumping into the kernel, the following conditions must be met: - HCRX_EL2.MSCEn (bit 11) must be initialised to 0b1. - - HCRX_EL2.MCE2 (bit 10) must be initialised to 0b1. The exception - handler must set PSTATE.SS to 0b0. + - HCRX_EL2.MCE2 (bit 10) must be initialised to 0b1 and the hypervisor + must handle MOPS exceptions as described in :ref:`arm64_mops_hyp`. For CPUs with the Extended Translation Control Register feature (FEAT_TCR2): diff --git a/Documentation/arch/arm64/index.rst b/Documentation/arch/arm64/index.rst index 78544de0a8a9..463de5855e84 100644 --- a/Documentation/arch/arm64/index.rst +++ b/Documentation/arch/arm64/index.rst @@ -20,6 +20,7 @@ ARM64 Architecture legacy_instructions memory memory-tagging-extension + mops perf pointer-authentication ptdump diff --git a/Documentation/arch/arm64/mops.rst b/Documentation/arch/arm64/mops.rst new file mode 100644 index 000000000000..2ef5b147f8dc --- /dev/null +++ b/Documentation/arch/arm64/mops.rst @@ -0,0 +1,44 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================== +Memory copy/set instructions (MOPS) +=================================== + +A MOPS memory copy/set operation consists of three consecutive CPY* or SET* +instructions: a prologue, main and epilogue (for example: CPYP, CPYM, CPYE). + +A main or epilogue instruction can take a MOPS exception for various reasons, +for example when a task is migrated to a CPU with a different MOPS +implementation, or when the instruction's alignment and size requirements are +not met. The software exception handler is then expected to reset the registers +and restart execution from the prologue instruction. Normally this is handled +by the kernel. + +For more details refer to "D1.3.5.7 Memory Copy and Memory Set exceptions" in +the Arm Architecture Reference Manual DDI 0487K.a (Arm ARM). + +.. _arm64_mops_hyp: + +Hypervisor requirements +----------------------- + +A hypervisor running a Linux guest must handle all MOPS exceptions from the +guest kernel, as Linux may not be able to handle the exception at all times. +For example, a MOPS exception can be taken when the hypervisor migrates a vCPU +to another physical CPU with a different MOPS implementation. + +To do this, the hypervisor must: + + - Set HCRX_EL2.MCE2 to 1 so that the exception is taken to the hypervisor. + + - Have an exception handler that implements the algorithm from the Arm ARM + rules CNTMJ and MWFQH. + + - Set the guest's PSTATE.SS to 0 in the exception handler, to handle a + potential step of the current instruction. + + Note: Clearing PSTATE.SS is needed so that a single step exception is taken + on the next instruction (the prologue instruction). Otherwise prologue + would get silently stepped over and the single step exception taken on the + main instruction. Note that if the guest instruction is not being stepped + then clearing PSTATE.SS has no effect.