64ad946152
misleading/wrong stacktraces and confuse RELIABLE_STACKTRACE and LIVEPATCH as the backtrace misses the function which is being fixed up. - Add Straight Light Speculation mitigation support which uses a new compiler switch -mharden-sls= which sticks an INT3 after a RET or an indirect branch in order to block speculation after them. Reportedly, CPUs do speculate behind such insns. - The usual set of cleanups and improvements -----BEGIN PGP SIGNATURE----- iQIyBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmHfKA0ACgkQEsHwGGHe VUqLJg/2I2X2xXr5filJVaK+sQgmvDzk67DKnbxRBW2xcPF+B5sSW5yhe3G5UPW7 SJVdhQ3gHcTiliGGlBf/VE7KXbqxFN0vO4/VFHZm78r43g7OrXTxz6WXXQRJ1n67 U3YwRH3b6cqXZNFMs+X4bJt6qsGJM1kdTTZ2as4aERnaFr5AOAfQvfKbyhxLe/XA 3SakfYISVKCBQ2RkTfpMpwmqlsatGFhTC5IrvuDQ83dDsM7O+Dx1J6Gu3fwjKmie iVzPOjCh+xTpZQp/SIZmt7MzoduZvpSym4YVyHvEnMiexQT4AmyaRthWqrhnEXY/ qOvj8/XIqxmix8EaooGqRIK0Y2ZegxkPckNFzaeC3lsWohwMIGIhNXwHNEeuhNyH yvNGAW9Cq6NeDRgz5MRUXcimYw4P4oQKYLObS1WqFZhNMqm4sNtoEAYpai/lPYfs zUDckgXF2AoPOsSqy3hFAVaGovAgzfDaJVzkt0Lk4kzzjX2WQiNLhmiior460w+K 0l2Iej58IajSp3MkWmFH368Jo8YfUVmkjbbpsmjsBppA08e1xamJB7RmswI/Ezj6 s5re6UioCD+UYdjWx41kgbvYdvIkkZ2RLrktoZd/hqHrOLWEIiwEbyFO2nRFJIAh YjvPkB1p7iNuAeYcP1x9Ft9GNYVIsUlJ+hK86wtFCqy+abV+zQ== =R52z -----END PGP SIGNATURE----- Merge tag 'x86_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 core updates from Borislav Petkov: - Get rid of all the .fixup sections because this generates misleading/wrong stacktraces and confuse RELIABLE_STACKTRACE and LIVEPATCH as the backtrace misses the function which is being fixed up. - Add Straight Line Speculation mitigation support which uses a new compiler switch -mharden-sls= which sticks an INT3 after a RET or an indirect branch in order to block speculation after them. Reportedly, CPUs do speculate behind such insns. - The usual set of cleanups and improvements * tag 'x86_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits) x86/entry_32: Fix segment exceptions objtool: Remove .fixup handling x86: Remove .fixup section x86/word-at-a-time: Remove .fixup usage x86/usercopy: Remove .fixup usage x86/usercopy_32: Simplify __copy_user_intel_nocache() x86/sgx: Remove .fixup usage x86/checksum_32: Remove .fixup usage x86/vmx: Remove .fixup usage x86/kvm: Remove .fixup usage x86/segment: Remove .fixup usage x86/fpu: Remove .fixup usage x86/xen: Remove .fixup usage x86/uaccess: Remove .fixup usage x86/futex: Remove .fixup usage x86/msr: Remove .fixup usage x86/extable: Extend extable functionality x86/entry_32: Remove .fixup usage x86/entry_64: Remove .fixup usage x86/copy_mc_64: Remove .fixup usage ...
87 lines
2.0 KiB
ArmAsm
87 lines
2.0 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2014 Intel Corporation; author Matt Fleming
|
|
*
|
|
* Support for invoking 32-bit EFI runtime services from a 64-bit
|
|
* kernel.
|
|
*
|
|
* The below thunking functions are only used after ExitBootServices()
|
|
* has been called. This simplifies things considerably as compared with
|
|
* the early EFI thunking because we can leave all the kernel state
|
|
* intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
|
|
* services from __KERNEL32_CS. This means we can continue to service
|
|
* interrupts across an EFI mixed mode call.
|
|
*
|
|
* We do however, need to handle the fact that we're running in a full
|
|
* 64-bit virtual address space. Things like the stack and instruction
|
|
* addresses need to be accessible by the 32-bit firmware, so we rely on
|
|
* using the identity mappings in the EFI page table to access the stack
|
|
* and kernel text (see efi_setup_page_tables()).
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/segment.h>
|
|
|
|
.text
|
|
.code64
|
|
SYM_CODE_START(__efi64_thunk)
|
|
push %rbp
|
|
push %rbx
|
|
|
|
/*
|
|
* Switch to 1:1 mapped 32-bit stack pointer.
|
|
*/
|
|
movq %rsp, %rax
|
|
movq efi_mixed_mode_stack_pa(%rip), %rsp
|
|
push %rax
|
|
|
|
/*
|
|
* Copy args passed via the stack
|
|
*/
|
|
subq $0x24, %rsp
|
|
movq 0x18(%rax), %rbp
|
|
movq 0x20(%rax), %rbx
|
|
movq 0x28(%rax), %rax
|
|
movl %ebp, 0x18(%rsp)
|
|
movl %ebx, 0x1c(%rsp)
|
|
movl %eax, 0x20(%rsp)
|
|
|
|
/*
|
|
* Calculate the physical address of the kernel text.
|
|
*/
|
|
movq $__START_KERNEL_map, %rax
|
|
subq phys_base(%rip), %rax
|
|
|
|
leaq 1f(%rip), %rbp
|
|
leaq 2f(%rip), %rbx
|
|
subq %rax, %rbp
|
|
subq %rax, %rbx
|
|
|
|
movl %ebx, 0x0(%rsp) /* return address */
|
|
movl %esi, 0x4(%rsp)
|
|
movl %edx, 0x8(%rsp)
|
|
movl %ecx, 0xc(%rsp)
|
|
movl %r8d, 0x10(%rsp)
|
|
movl %r9d, 0x14(%rsp)
|
|
|
|
/* Switch to 32-bit descriptor */
|
|
pushq $__KERNEL32_CS
|
|
pushq %rdi /* EFI runtime service address */
|
|
lretq
|
|
|
|
1: movq 0x20(%rsp), %rsp
|
|
pop %rbx
|
|
pop %rbp
|
|
RET
|
|
|
|
.code32
|
|
2: pushl $__KERNEL_CS
|
|
pushl %ebp
|
|
lret
|
|
SYM_CODE_END(__efi64_thunk)
|
|
|
|
.bss
|
|
.balign 8
|
|
SYM_DATA(efi_mixed_mode_stack_pa, .quad 0)
|