From 9c7e2634f647630db4e0719391dd80cd81132a66 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 18 Nov 2021 19:58:03 -0800 Subject: [PATCH 1/2] x86/cpu: Don't write CSTAR MSR on Intel CPUs Intel CPUs do not support SYSCALL in 32-bit mode, but the kernel initializes MSR_CSTAR unconditionally. That MSR write is normally ignored by the CPU, but in a TDX guest it raises a #VE trap. Exclude Intel CPUs from the MSR_CSTAR initialization. [ tglx: Fixed the subject line and removed the redundant comment. ] Signed-off-by: Andi Kleen Signed-off-by: Kuppuswamy Sathyanarayanan Signed-off-by: Thomas Gleixner Reviewed-by: Tony Luck Link: https://lore.kernel.org/r/20211119035803.4012145-1-sathyanarayanan.kuppuswamy@linux.intel.com --- arch/x86/kernel/cpu/common.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0083464de5e3..0663642d6199 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1787,6 +1787,17 @@ EXPORT_PER_CPU_SYMBOL(__preempt_count); DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK; +static void wrmsrl_cstar(unsigned long val) +{ + /* + * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR + * is so far ignored by the CPU, but raises a #VE trap in a TDX + * guest. Avoid the pointless write on all Intel CPUs. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + wrmsrl(MSR_CSTAR, val); +} + /* May not be marked __init: used by software suspend */ void syscall_init(void) { @@ -1794,7 +1805,7 @@ void syscall_init(void) wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION - wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); + wrmsrl_cstar((unsigned long)entry_SYSCALL_compat); /* * This only works on Intel CPUs. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. @@ -1806,7 +1817,7 @@ void syscall_init(void) (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else - wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); + wrmsrl_cstar((unsigned long)ignore_sysret); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); From 244122b4d2e5221e6abd6e21d6a58170104db781 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Thu, 16 Dec 2021 09:24:31 -0800 Subject: [PATCH 2/2] x86/lib: Add fast-short-rep-movs check to copy_user_enhanced_fast_string() Commit f444a5ff95dc ("x86/cpufeatures: Add support for fast short REP; MOVSB") fixed memmove() with an ALTERNATIVE that will use REP MOVSB for all string lengths. copy_user_enhanced_fast_string() has a similar run time check to avoid using REP MOVSB for copies less that 64 bytes. Add an ALTERNATIVE to patch out the short length check and always use REP MOVSB on X86_FEATURE_FSRM CPUs. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Link: https://lore.kernel.org/r/20211216172431.1396371-1-tony.luck@intel.com --- arch/x86/lib/copy_user_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 2797e630b9b1..1c429f0489dd 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -200,8 +200,8 @@ EXPORT_SYMBOL(copy_user_generic_string) */ SYM_FUNC_START(copy_user_enhanced_fast_string) ASM_STAC - cmpl $64,%edx - jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ + /* CPUs without FSRM should avoid rep movsb for short copies */ + ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM movl %edx,%ecx 1: rep movsb