forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
include/net/sock.h310731e2f1
("net: Fix data-races around sysctl_mem.")e70f3c7012
("Revert "net: set SK_MEM_QUANTUM to 4096"") https://lore.kernel.org/all/20220711120211.7c8b7cba@canb.auug.org.au/ net/ipv4/fib_semantics.c747c143072
("ip: fix dflt addr selection for connected nexthop")d62607c3fe
("net: rename reference+tracking helpers") net/tls/tls.h include/net/tls.h3d8c51b25a
("net/tls: Check for errors in tls_device_init")5879031423
("tls: create an internal header") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
816cd16883
3
.mailmap
3
.mailmap
@ -64,6 +64,9 @@ Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
|
||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
|
||||
Ben Gardner <bgardner@wabtec.com>
|
||||
Ben M Cahill <ben.m.cahill@intel.com>
|
||||
Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
|
||||
Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
|
||||
Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
|
||||
Björn Steinbrink <B.Steinbrink@gmx.de>
|
||||
Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
|
||||
Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
|
||||
|
@ -5197,6 +5197,30 @@
|
||||
|
||||
retain_initrd [RAM] Keep initrd memory after extraction
|
||||
|
||||
retbleed= [X86] Control mitigation of RETBleed (Arbitrary
|
||||
Speculative Code Execution with Return Instructions)
|
||||
vulnerability.
|
||||
|
||||
off - no mitigation
|
||||
auto - automatically select a migitation
|
||||
auto,nosmt - automatically select a mitigation,
|
||||
disabling SMT if necessary for
|
||||
the full mitigation (only on Zen1
|
||||
and older without STIBP).
|
||||
ibpb - mitigate short speculation windows on
|
||||
basic block boundaries too. Safe, highest
|
||||
perf impact.
|
||||
unret - force enable untrained return thunks,
|
||||
only effective on AMD f15h-f17h
|
||||
based systems.
|
||||
unret,nosmt - like unret, will disable SMT when STIBP
|
||||
is not available.
|
||||
|
||||
Selecting 'auto' will choose a mitigation method at run
|
||||
time according to the CPU.
|
||||
|
||||
Not specifying this option is equivalent to retbleed=auto.
|
||||
|
||||
rfkill.default_state=
|
||||
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
|
||||
etc. communication is blocked by default.
|
||||
@ -5568,6 +5592,7 @@
|
||||
eibrs - enhanced IBRS
|
||||
eibrs,retpoline - enhanced IBRS + Retpolines
|
||||
eibrs,lfence - enhanced IBRS + LFENCE
|
||||
ibrs - use IBRS to protect kernel
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
|
@ -223,7 +223,7 @@ Module Loading
|
||||
Inter Module support
|
||||
--------------------
|
||||
|
||||
Refer to the file kernel/module.c for more information.
|
||||
Refer to the files in kernel/module/ for more information.
|
||||
|
||||
Hardware Interfaces
|
||||
===================
|
||||
|
@ -51,8 +51,8 @@ namespace ``USB_STORAGE``, use::
|
||||
The corresponding ksymtab entry struct ``kernel_symbol`` will have the member
|
||||
``namespace`` set accordingly. A symbol that is exported without a namespace will
|
||||
refer to ``NULL``. There is no default namespace if none is defined. ``modpost``
|
||||
and kernel/module.c make use the namespace at build time or module load time,
|
||||
respectively.
|
||||
and kernel/module/main.c make use the namespace at build time or module load
|
||||
time, respectively.
|
||||
|
||||
2.2 Using the DEFAULT_SYMBOL_NAMESPACE define
|
||||
=============================================
|
||||
|
@ -67,7 +67,7 @@ if:
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 2
|
||||
minItems: 2
|
||||
|
||||
required:
|
||||
- clock-names
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Atheros ath9k wireless devices Generic Binding
|
||||
|
||||
maintainers:
|
||||
- Kalle Valo <kvalo@codeaurora.org>
|
||||
- Toke Høiland-Jørgensen <toke@toke.dk>
|
||||
|
||||
description: |
|
||||
This node provides properties for configuring the ath9k wireless device.
|
||||
|
@ -9,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Technologies ath11k wireless devices Generic Binding
|
||||
|
||||
maintainers:
|
||||
- Kalle Valo <kvalo@codeaurora.org>
|
||||
- Kalle Valo <kvalo@kernel.org>
|
||||
|
||||
description: |
|
||||
These are dt entries for Qualcomm Technologies, Inc. IEEE 802.11ax
|
||||
|
@ -25,12 +25,12 @@ properties:
|
||||
- qcom,sc7280-lpass-cpu
|
||||
|
||||
reg:
|
||||
minItems: 2
|
||||
minItems: 1
|
||||
maxItems: 6
|
||||
description: LPAIF core registers
|
||||
|
||||
reg-names:
|
||||
minItems: 2
|
||||
minItems: 1
|
||||
maxItems: 6
|
||||
|
||||
clocks:
|
||||
@ -42,12 +42,12 @@ properties:
|
||||
maxItems: 10
|
||||
|
||||
interrupts:
|
||||
minItems: 2
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
description: LPAIF DMA buffer interrupt
|
||||
|
||||
interrupt-names:
|
||||
minItems: 2
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
|
||||
qcom,adsp:
|
||||
|
@ -466,6 +466,10 @@ overlay filesystem and the value of st_ino for filesystem objects may not be
|
||||
persistent and could change even while the overlay filesystem is mounted, as
|
||||
summarized in the `Inode properties`_ table above.
|
||||
|
||||
4) "idmapped mounts"
|
||||
When the upper or lower layers are idmapped mounts overlayfs will be mounted
|
||||
without support for POSIX Access Control Lists (ACLs). This limitation will
|
||||
eventually be lifted.
|
||||
|
||||
Changes to underlying filesystems
|
||||
---------------------------------
|
||||
|
@ -210,11 +210,11 @@ module->symtab.
|
||||
=====================================
|
||||
Normally, a stripped down copy of a module's symbol table (containing only
|
||||
"core" symbols) is made available through module->symtab (See layout_symtab()
|
||||
in kernel/module.c). For livepatch modules, the symbol table copied into memory
|
||||
on module load must be exactly the same as the symbol table produced when the
|
||||
patch module was compiled. This is because the relocations in each livepatch
|
||||
relocation section refer to their respective symbols with their symbol indices,
|
||||
and the original symbol indices (and thus the symtab ordering) must be
|
||||
in kernel/module/kallsyms.c). For livepatch modules, the symbol table copied
|
||||
into memory on module load must be exactly the same as the symbol table produced
|
||||
when the patch module was compiled. This is because the relocations in each
|
||||
livepatch relocation section refer to their respective symbols with their symbol
|
||||
indices, and the original symbol indices (and thus the symtab ordering) must be
|
||||
preserved in order for apply_relocate_add() to find the right symbol.
|
||||
|
||||
For example, take this particular rela from a livepatch module:::
|
||||
|
@ -1091,7 +1091,7 @@ cipso_cache_enable - BOOLEAN
|
||||
cipso_cache_bucket_size - INTEGER
|
||||
The CIPSO label cache consists of a fixed size hash table with each
|
||||
hash bucket containing a number of cache entries. This variable limits
|
||||
the number of entries in each hash bucket; the larger the value the
|
||||
the number of entries in each hash bucket; the larger the value is, the
|
||||
more CIPSO label mappings that can be cached. When the number of
|
||||
entries in a given hash bucket reaches this limit adding new entries
|
||||
causes the oldest entry in the bucket to be removed to make room.
|
||||
@ -1185,7 +1185,7 @@ ip_autobind_reuse - BOOLEAN
|
||||
option should only be set by experts.
|
||||
Default: 0
|
||||
|
||||
ip_dynaddr - BOOLEAN
|
||||
ip_dynaddr - INTEGER
|
||||
If set non-zero, enables support for dynamic addresses.
|
||||
If set to a non-zero value larger than 1, a kernel log
|
||||
message will be printed when dynamic address rewriting
|
||||
|
@ -10,7 +10,7 @@ AC97
|
||||
====
|
||||
|
||||
AC97 is a five wire interface commonly found on many PC sound cards. It is
|
||||
now also popular in many portable devices. This DAI has a reset line and time
|
||||
now also popular in many portable devices. This DAI has a RESET line and time
|
||||
multiplexes its data on its SDATA_OUT (playback) and SDATA_IN (capture) lines.
|
||||
The bit clock (BCLK) is always driven by the CODEC (usually 12.288MHz) and the
|
||||
frame (FRAME) (usually 48kHz) is always driven by the controller. Each AC97
|
||||
|
@ -50,9 +50,9 @@ Di conseguenza, nella tabella dei simboli del kernel ci sarà una voce
|
||||
rappresentata dalla struttura ``kernel_symbol`` che avrà il campo
|
||||
``namespace`` (spazio dei nomi) impostato. Un simbolo esportato senza uno spazio
|
||||
dei nomi avrà questo campo impostato a ``NULL``. Non esiste uno spazio dei nomi
|
||||
di base. Il programma ``modpost`` e il codice in kernel/module.c usano lo spazio
|
||||
dei nomi, rispettivamente, durante la compilazione e durante il caricamento
|
||||
di un modulo.
|
||||
di base. Il programma ``modpost`` e il codice in kernel/module/main.c usano lo
|
||||
spazio dei nomi, rispettivamente, durante la compilazione e durante il
|
||||
caricamento di un modulo.
|
||||
|
||||
2.2 Usare il simbolo di preprocessore DEFAULT_SYMBOL_NAMESPACE
|
||||
==============================================================
|
||||
|
@ -224,7 +224,7 @@ kernel/kmod.c
|
||||
模块接口支持
|
||||
------------
|
||||
|
||||
更多信息请参考文件kernel/module.c。
|
||||
更多信息请参阅kernel/module/目录下的文件。
|
||||
|
||||
硬件接口
|
||||
========
|
||||
|
@ -52,7 +52,7 @@
|
||||
|
||||
相应的 ksymtab 条目结构体 ``kernel_symbol`` 将有相应的成员 ``命名空间`` 集。
|
||||
导出时未指明命名空间的符号将指向 ``NULL`` 。如果没有定义命名空间,则默认没有。
|
||||
``modpost`` 和kernel/module.c分别在构建时或模块加载时使用名称空间。
|
||||
``modpost`` 和kernel/module/main.c分别在构建时或模块加载时使用名称空间。
|
||||
|
||||
2.2 使用DEFAULT_SYMBOL_NAMESPACE定义
|
||||
====================================
|
||||
|
14
MAINTAINERS
14
MAINTAINERS
@ -425,7 +425,6 @@ F: drivers/acpi/*thermal*
|
||||
ACPI VIOT DRIVER
|
||||
M: Jean-Philippe Brucker <jean-philippe@linaro.org>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
S: Maintained
|
||||
F: drivers/acpi/viot.c
|
||||
@ -959,7 +958,6 @@ F: drivers/video/fbdev/geode/
|
||||
AMD IOMMU (AMD-VI)
|
||||
M: Joerg Roedel <joro@8bytes.org>
|
||||
R: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
||||
@ -1039,6 +1037,7 @@ F: arch/arm64/boot/dts/amd/
|
||||
|
||||
AMD XGBE DRIVER
|
||||
M: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
M: "Shyam Sundar S K" <Shyam-sundar.S-k@amd.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
|
||||
@ -5102,7 +5101,7 @@ COMPUTE EXPRESS LINK (CXL)
|
||||
M: Alison Schofield <alison.schofield@intel.com>
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Ira Weiny <ira.weiny@intel.com>
|
||||
M: Ben Widawsky <ben.widawsky@intel.com>
|
||||
M: Ben Widawsky <bwidawsk@kernel.org>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: linux-cxl@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -6054,7 +6053,6 @@ DMA MAPPING HELPERS
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
M: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Robin Murphy <robin.murphy@arm.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
S: Supported
|
||||
W: http://git.infradead.org/users/hch/dma-mapping.git
|
||||
@ -6067,7 +6065,6 @@ F: kernel/dma/
|
||||
|
||||
DMA MAPPING BENCHMARK
|
||||
M: Xiang Chen <chenxiang66@hisilicon.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
F: kernel/dma/map_benchmark.c
|
||||
F: tools/testing/selftests/dma/
|
||||
@ -7666,7 +7663,6 @@ F: drivers/gpu/drm/exynos/exynos_dp*
|
||||
|
||||
EXYNOS SYSMMU (IOMMU) driver
|
||||
M: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
S: Maintained
|
||||
F: drivers/iommu/exynos-iommu.c
|
||||
@ -10091,7 +10087,6 @@ F: drivers/hid/intel-ish-hid/
|
||||
INTEL IOMMU (VT-d)
|
||||
M: David Woodhouse <dwmw2@infradead.org>
|
||||
M: Lu Baolu <baolu.lu@linux.intel.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
||||
@ -10471,7 +10466,6 @@ F: include/linux/iomap.h
|
||||
IOMMU DRIVERS
|
||||
M: Joerg Roedel <joro@8bytes.org>
|
||||
M: Will Deacon <will@kernel.org>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
||||
@ -12631,7 +12625,6 @@ F: drivers/i2c/busses/i2c-mt65xx.c
|
||||
|
||||
MEDIATEK IOMMU DRIVER
|
||||
M: Yong Wu <yong.wu@mediatek.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
@ -16683,7 +16676,6 @@ F: drivers/i2c/busses/i2c-qcom-cci.c
|
||||
|
||||
QUALCOMM IOMMU
|
||||
M: Rob Clark <robdclark@gmail.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -19326,7 +19318,6 @@ F: arch/x86/boot/video*
|
||||
|
||||
SWIOTLB SUBSYSTEM
|
||||
M: Christoph Hellwig <hch@infradead.org>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
S: Supported
|
||||
W: http://git.infradead.org/users/hch/dma-mapping.git
|
||||
@ -22008,7 +21999,6 @@ XEN SWIOTLB SUBSYSTEM
|
||||
M: Juergen Gross <jgross@suse.com>
|
||||
M: Stefano Stabellini <sstabellini@kernel.org>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
L: iommu@lists.linux-foundation.org
|
||||
L: iommu@lists.linux.dev
|
||||
S: Supported
|
||||
F: arch/x86/xen/*swiotlb*
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -112,19 +112,6 @@ static __always_inline void set_domain(unsigned int val)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define modify_domain(dom,type) \
|
||||
do { \
|
||||
unsigned int domain = get_domain(); \
|
||||
domain &= ~domain_mask(dom); \
|
||||
domain = domain | domain_val(dom, type); \
|
||||
set_domain(domain); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
static inline void modify_domain(unsigned dom, unsigned type) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generate the T (user) versions of the LDR/STR and related
|
||||
* instructions (inline assembly)
|
||||
|
@ -27,6 +27,7 @@ enum {
|
||||
MT_HIGH_VECTORS,
|
||||
MT_MEMORY_RWX,
|
||||
MT_MEMORY_RW,
|
||||
MT_MEMORY_RO,
|
||||
MT_ROM,
|
||||
MT_MEMORY_RWX_NONCACHED,
|
||||
MT_MEMORY_RW_DTCM,
|
||||
|
@ -163,5 +163,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
||||
((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
|
||||
})
|
||||
|
||||
|
||||
/*
|
||||
* Update ITSTATE after normal execution of an IT block instruction.
|
||||
*
|
||||
* The 8 IT state bits are split into two parts in CPSR:
|
||||
* ITSTATE<1:0> are in CPSR<26:25>
|
||||
* ITSTATE<7:2> are in CPSR<15:10>
|
||||
*/
|
||||
static inline unsigned long it_advance(unsigned long cpsr)
|
||||
{
|
||||
if ((cpsr & 0x06000400) == 0) {
|
||||
/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
|
||||
cpsr &= ~PSR_IT_MASK;
|
||||
} else {
|
||||
/* We need to shift left ITSTATE<4:0> */
|
||||
const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
|
||||
unsigned long it = cpsr & mask;
|
||||
it <<= 1;
|
||||
it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
|
||||
it &= mask;
|
||||
cpsr &= ~mask;
|
||||
cpsr |= it;
|
||||
}
|
||||
return cpsr;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif
|
||||
|
@ -302,6 +302,7 @@ local_restart:
|
||||
b ret_fast_syscall
|
||||
#endif
|
||||
ENDPROC(vector_swi)
|
||||
.ltorg
|
||||
|
||||
/*
|
||||
* This is the really slow path. We're going to be doing
|
||||
|
@ -631,7 +631,11 @@ config CPU_USE_DOMAINS
|
||||
bool
|
||||
help
|
||||
This option enables or disables the use of domain switching
|
||||
via the set_fs() function.
|
||||
using the DACR (domain access control register) to protect memory
|
||||
domains from each other. In Linux we use three domains: kernel, user
|
||||
and IO. The domains are used to protect userspace from kernelspace
|
||||
and to handle IO-space as a special type of memory by assigning
|
||||
manager or client roles to running code (such as a process).
|
||||
|
||||
config CPU_V7M_NUM_IRQ
|
||||
int "Number of external interrupts connected to the NVIC"
|
||||
|
@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
if (type == TYPE_LDST)
|
||||
do_alignment_finish_ldst(addr, instr, regs, offset);
|
||||
|
||||
if (thumb_mode(regs))
|
||||
regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
|
||||
|
||||
return 0;
|
||||
|
||||
bad_or_fault:
|
||||
|
@ -296,6 +296,13 @@ static struct mem_type mem_types[] __ro_after_init = {
|
||||
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_MEMORY_RO] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_XN | L_PTE_RDONLY,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.prot_sect = PMD_TYPE_SECT,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_ROM] = {
|
||||
.prot_sect = PMD_TYPE_SECT,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
@ -489,6 +496,7 @@ static void __init build_mem_type_table(void)
|
||||
|
||||
/* Also setup NX memory mapping */
|
||||
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
|
||||
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN;
|
||||
}
|
||||
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
|
||||
/*
|
||||
@ -568,6 +576,7 @@ static void __init build_mem_type_table(void)
|
||||
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
||||
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
||||
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
||||
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -587,6 +596,8 @@ static void __init build_mem_type_table(void)
|
||||
mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
|
||||
@ -647,6 +658,8 @@ static void __init build_mem_type_table(void)
|
||||
mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
|
||||
mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
|
||||
mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
|
||||
mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd;
|
||||
mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot;
|
||||
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
|
||||
mem_types[MT_ROM].prot_sect |= cp->pmd;
|
||||
@ -1360,7 +1373,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
|
||||
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
|
||||
map.virtual = FDT_FIXED_BASE;
|
||||
map.length = FDT_FIXED_SIZE;
|
||||
map.type = MT_ROM;
|
||||
map.type = MT_MEMORY_RO;
|
||||
create_mapping(&map);
|
||||
}
|
||||
|
||||
|
@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method)
|
||||
#else
|
||||
static unsigned int spectre_v2_install_workaround(unsigned int method)
|
||||
{
|
||||
pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
|
||||
smp_processor_id());
|
||||
pr_info_once("Spectre V2: workarounds disabled by configuration\n");
|
||||
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method)
|
||||
return SPECTRE_VULNERABLE;
|
||||
|
||||
spectre_bhb_method = method;
|
||||
}
|
||||
|
||||
pr_info("CPU%u: Spectre BHB: using %s workaround\n",
|
||||
smp_processor_id(), spectre_bhb_method_name(method));
|
||||
pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
|
||||
smp_processor_id(), spectre_bhb_method_name(method));
|
||||
}
|
||||
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <asm/probes.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/kprobes.h>
|
||||
|
||||
void __init arm_probes_decode_init(void);
|
||||
@ -35,31 +36,6 @@ void __init find_str_pc_offset(void);
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Update ITSTATE after normal execution of an IT block instruction.
|
||||
*
|
||||
* The 8 IT state bits are split into two parts in CPSR:
|
||||
* ITSTATE<1:0> are in CPSR<26:25>
|
||||
* ITSTATE<7:2> are in CPSR<15:10>
|
||||
*/
|
||||
static inline unsigned long it_advance(unsigned long cpsr)
|
||||
{
|
||||
if ((cpsr & 0x06000400) == 0) {
|
||||
/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
|
||||
cpsr &= ~PSR_IT_MASK;
|
||||
} else {
|
||||
/* We need to shift left ITSTATE<4:0> */
|
||||
const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
|
||||
unsigned long it = cpsr & mask;
|
||||
it <<= 1;
|
||||
it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
|
||||
it &= mask;
|
||||
cpsr &= ~mask;
|
||||
cpsr |= it;
|
||||
}
|
||||
return cpsr;
|
||||
}
|
||||
|
||||
static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
|
||||
{
|
||||
long cpsr = regs->ARM_cpsr;
|
||||
|
@ -54,7 +54,6 @@ config LOONGARCH
|
||||
select GENERIC_CMOS_UPDATE
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_ENTRY
|
||||
select GENERIC_FIND_FIRST_BIT
|
||||
select GENERIC_GETTIMEOFDAY
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GENERIC_IRQ_PROBE
|
||||
@ -77,7 +76,6 @@ config LOONGARCH
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_ASM_MODVERSIONS
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_EXIT_THREAD
|
||||
@ -86,8 +84,6 @@ config LOONGARCH
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_PERF_EVENTS
|
||||
|
@ -48,6 +48,5 @@
|
||||
#define fcsr1 $r1
|
||||
#define fcsr2 $r2
|
||||
#define fcsr3 $r3
|
||||
#define vcsr16 $r16
|
||||
|
||||
#endif /* _ASM_FPREGDEF_H */
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define _ASM_PAGE_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <asm/addrspace.h>
|
||||
|
||||
/*
|
||||
* PAGE_SHIFT determines the page size
|
||||
|
@ -80,7 +80,6 @@ BUILD_FPR_ACCESS(64)
|
||||
|
||||
struct loongarch_fpu {
|
||||
unsigned int fcsr;
|
||||
unsigned int vcsr;
|
||||
uint64_t fcc; /* 8x8 */
|
||||
union fpureg fpr[NUM_FPU_REGS];
|
||||
};
|
||||
@ -161,7 +160,6 @@ struct thread_struct {
|
||||
*/ \
|
||||
.fpu = { \
|
||||
.fcsr = 0, \
|
||||
.vcsr = 0, \
|
||||
.fcc = 0, \
|
||||
.fpr = {{{0,},},}, \
|
||||
}, \
|
||||
|
@ -166,7 +166,6 @@ void output_thread_fpu_defines(void)
|
||||
|
||||
OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
|
||||
OFFSET(THREAD_FCC, loongarch_fpu, fcc);
|
||||
OFFSET(THREAD_VCSR, loongarch_fpu, vcsr);
|
||||
BLANK();
|
||||
}
|
||||
|
||||
|
@ -146,16 +146,6 @@
|
||||
movgr2fcsr fcsr0, \tmp0
|
||||
.endm
|
||||
|
||||
.macro sc_save_vcsr base, tmp0
|
||||
movfcsr2gr \tmp0, vcsr16
|
||||
EX st.w \tmp0, \base, 0
|
||||
.endm
|
||||
|
||||
.macro sc_restore_vcsr base, tmp0
|
||||
EX ld.w \tmp0, \base, 0
|
||||
movgr2fcsr vcsr16, \tmp0
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Save a thread's fp context.
|
||||
*/
|
||||
|
@ -429,7 +429,6 @@ int __init init_numa_memory(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(init_numa_memory);
|
||||
#endif
|
||||
|
||||
void __init paging_init(void)
|
||||
|
@ -21,6 +21,7 @@ ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
|
||||
endif
|
||||
|
||||
cflags-vdso := $(ccflags-vdso) \
|
||||
-isystem $(shell $(CC) -print-file-name=include) \
|
||||
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
|
||||
-O2 -g -fno-strict-aliasing -fno-common -fno-builtin -G0 \
|
||||
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
|
||||
|
@ -176,12 +176,8 @@ static int __init pnv_get_random_long_early(unsigned long *v)
|
||||
NULL) != pnv_get_random_long_early)
|
||||
return 0;
|
||||
|
||||
for_each_compatible_node(dn, NULL, "ibm,power-rng") {
|
||||
if (rng_create(dn))
|
||||
continue;
|
||||
/* Create devices for hwrng driver */
|
||||
of_platform_device_create(dn, NULL, NULL);
|
||||
}
|
||||
for_each_compatible_node(dn, NULL, "ibm,power-rng")
|
||||
rng_create(dn);
|
||||
|
||||
if (!ppc_md.get_random_seed)
|
||||
return 0;
|
||||
@ -205,10 +201,18 @@ void __init pnv_rng_init(void)
|
||||
|
||||
static int __init pnv_rng_late_init(void)
|
||||
{
|
||||
struct device_node *dn;
|
||||
unsigned long v;
|
||||
|
||||
/* In case it wasn't called during init for some other reason. */
|
||||
if (ppc_md.get_random_seed == pnv_get_random_long_early)
|
||||
pnv_get_random_long_early(&v);
|
||||
|
||||
if (ppc_md.get_random_seed == powernv_get_random_long) {
|
||||
for_each_compatible_node(dn, NULL, "ibm,power-rng")
|
||||
of_platform_device_create(dn, NULL, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
machine_subsys_initcall(powernv, pnv_rng_late_init);
|
||||
|
@ -38,7 +38,7 @@ config RISCV
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
|
||||
select ARCH_SUPPORTS_HUGETLBFS if MMU
|
||||
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
|
||||
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
|
||||
select ARCH_USE_MEMTEST
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
||||
|
@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
|
||||
#endif /* CONFIG_HAVE_IOREMAP_PROT */
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
#define iounmap(addr) do { } while (0)
|
||||
#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset))
|
||||
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
|
||||
{
|
||||
return (void __iomem *)(unsigned long)offset;
|
||||
}
|
||||
|
||||
static inline void iounmap(volatile void __iomem *addr) { }
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#define ioremap_uc ioremap
|
||||
|
@ -102,8 +102,8 @@ extern unsigned long uml_physmem;
|
||||
* casting is the right thing, but 32-bit UML can't have 64-bit virtual
|
||||
* addresses
|
||||
*/
|
||||
#define __pa(virt) to_phys((void *) (unsigned long) (virt))
|
||||
#define __va(phys) to_virt((unsigned long) (phys))
|
||||
#define __pa(virt) uml_to_phys((void *) (unsigned long) (virt))
|
||||
#define __va(phys) uml_to_virt((unsigned long) (phys))
|
||||
|
||||
#define phys_to_pfn(p) ((p) >> PAGE_SHIFT)
|
||||
#define pfn_to_phys(pfn) PFN_PHYS(pfn)
|
||||
|
@ -9,12 +9,12 @@
|
||||
extern int phys_mapping(unsigned long phys, unsigned long long *offset_out);
|
||||
|
||||
extern unsigned long uml_physmem;
|
||||
static inline unsigned long to_phys(void *virt)
|
||||
static inline unsigned long uml_to_phys(void *virt)
|
||||
{
|
||||
return(((unsigned long) virt) - uml_physmem);
|
||||
}
|
||||
|
||||
static inline void *to_virt(unsigned long phys)
|
||||
static inline void *uml_to_virt(unsigned long phys)
|
||||
{
|
||||
return((void *) uml_physmem + phys);
|
||||
}
|
||||
|
@ -251,7 +251,7 @@ static int userspace_tramp(void *stack)
|
||||
signal(SIGTERM, SIG_DFL);
|
||||
signal(SIGWINCH, SIG_IGN);
|
||||
|
||||
fd = phys_mapping(to_phys(__syscall_stub_start), &offset);
|
||||
fd = phys_mapping(uml_to_phys(__syscall_stub_start), &offset);
|
||||
addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
|
||||
PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
|
||||
if (addr == MAP_FAILED) {
|
||||
@ -261,7 +261,7 @@ static int userspace_tramp(void *stack)
|
||||
}
|
||||
|
||||
if (stack != NULL) {
|
||||
fd = phys_mapping(to_phys(stack), &offset);
|
||||
fd = phys_mapping(uml_to_phys(stack), &offset);
|
||||
addr = mmap((void *) STUB_DATA,
|
||||
UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_SHARED, fd, offset);
|
||||
@ -534,7 +534,7 @@ int copy_context_skas0(unsigned long new_stack, int pid)
|
||||
struct stub_data *data = (struct stub_data *) current_stack;
|
||||
struct stub_data *child_data = (struct stub_data *) new_stack;
|
||||
unsigned long long new_offset;
|
||||
int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
|
||||
int new_fd = phys_mapping(uml_to_phys((void *)new_stack), &new_offset);
|
||||
|
||||
/*
|
||||
* prepare offset and fd of child's stack as argument for parent's
|
||||
|
108
arch/x86/Kconfig
108
arch/x86/Kconfig
@ -462,29 +462,6 @@ config GOLDFISH
|
||||
def_bool y
|
||||
depends on X86_GOLDFISH
|
||||
|
||||
config RETPOLINE
|
||||
bool "Avoid speculative indirect branches in kernel"
|
||||
select OBJTOOL if HAVE_OBJTOOL
|
||||
default y
|
||||
help
|
||||
Compile kernel with the retpoline compiler options to guard against
|
||||
kernel-to-user data leaks by avoiding speculative indirect
|
||||
branches. Requires a compiler with -mindirect-branch=thunk-extern
|
||||
support for full protection. The kernel may run slower.
|
||||
|
||||
config CC_HAS_SLS
|
||||
def_bool $(cc-option,-mharden-sls=all)
|
||||
|
||||
config SLS
|
||||
bool "Mitigate Straight-Line-Speculation"
|
||||
depends on CC_HAS_SLS && X86_64
|
||||
select OBJTOOL if HAVE_OBJTOOL
|
||||
default n
|
||||
help
|
||||
Compile the kernel with straight-line-speculation options to guard
|
||||
against straight line speculation. The kernel image might be slightly
|
||||
larger.
|
||||
|
||||
config X86_CPU_RESCTRL
|
||||
bool "x86 CPU resource control support"
|
||||
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
|
||||
@ -2453,6 +2430,91 @@ source "kernel/livepatch/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
||||
config CC_HAS_SLS
|
||||
def_bool $(cc-option,-mharden-sls=all)
|
||||
|
||||
config CC_HAS_RETURN_THUNK
|
||||
def_bool $(cc-option,-mfunction-return=thunk-extern)
|
||||
|
||||
menuconfig SPECULATION_MITIGATIONS
|
||||
bool "Mitigations for speculative execution vulnerabilities"
|
||||
default y
|
||||
help
|
||||
Say Y here to enable options which enable mitigations for
|
||||
speculative execution hardware vulnerabilities.
|
||||
|
||||
If you say N, all mitigations will be disabled. You really
|
||||
should know what you are doing to say so.
|
||||
|
||||
if SPECULATION_MITIGATIONS
|
||||
|
||||
config PAGE_TABLE_ISOLATION
|
||||
bool "Remove the kernel mapping in user mode"
|
||||
default y
|
||||
depends on (X86_64 || X86_PAE)
|
||||
help
|
||||
This feature reduces the number of hardware side channels by
|
||||
ensuring that the majority of kernel addresses are not mapped
|
||||
into userspace.
|
||||
|
||||
See Documentation/x86/pti.rst for more details.
|
||||
|
||||
config RETPOLINE
|
||||
bool "Avoid speculative indirect branches in kernel"
|
||||
select OBJTOOL if HAVE_OBJTOOL
|
||||
default y
|
||||
help
|
||||
Compile kernel with the retpoline compiler options to guard against
|
||||
kernel-to-user data leaks by avoiding speculative indirect
|
||||
branches. Requires a compiler with -mindirect-branch=thunk-extern
|
||||
support for full protection. The kernel may run slower.
|
||||
|
||||
config RETHUNK
|
||||
bool "Enable return-thunks"
|
||||
depends on RETPOLINE && CC_HAS_RETURN_THUNK
|
||||
select OBJTOOL if HAVE_OBJTOOL
|
||||
default y
|
||||
help
|
||||
Compile the kernel with the return-thunks compiler option to guard
|
||||
against kernel-to-user data leaks by avoiding return speculation.
|
||||
Requires a compiler with -mfunction-return=thunk-extern
|
||||
support for full protection. The kernel may run slower.
|
||||
|
||||
config CPU_UNRET_ENTRY
|
||||
bool "Enable UNRET on kernel entry"
|
||||
depends on CPU_SUP_AMD && RETHUNK
|
||||
default y
|
||||
help
|
||||
Compile the kernel with support for the retbleed=unret mitigation.
|
||||
|
||||
config CPU_IBPB_ENTRY
|
||||
bool "Enable IBPB on kernel entry"
|
||||
depends on CPU_SUP_AMD
|
||||
default y
|
||||
help
|
||||
Compile the kernel with support for the retbleed=ibpb mitigation.
|
||||
|
||||
config CPU_IBRS_ENTRY
|
||||
bool "Enable IBRS on kernel entry"
|
||||
depends on CPU_SUP_INTEL
|
||||
default y
|
||||
help
|
||||
Compile the kernel with support for the spectre_v2=ibrs mitigation.
|
||||
This mitigates both spectre_v2 and retbleed at great cost to
|
||||
performance.
|
||||
|
||||
config SLS
|
||||
bool "Mitigate Straight-Line-Speculation"
|
||||
depends on CC_HAS_SLS && X86_64
|
||||
select OBJTOOL if HAVE_OBJTOOL
|
||||
default n
|
||||
help
|
||||
Compile the kernel with straight-line-speculation options to guard
|
||||
against straight line speculation. The kernel image might be slightly
|
||||
larger.
|
||||
|
||||
endif
|
||||
|
||||
config ARCH_HAS_ADD_PAGES
|
||||
def_bool y
|
||||
depends on ARCH_ENABLE_MEMORY_HOTPLUG
|
||||
|
@ -21,6 +21,12 @@ ifdef CONFIG_CC_IS_CLANG
|
||||
RETPOLINE_CFLAGS := -mretpoline-external-thunk
|
||||
RETPOLINE_VDSO_CFLAGS := -mretpoline
|
||||
endif
|
||||
|
||||
ifdef CONFIG_RETHUNK
|
||||
RETHUNK_CFLAGS := -mfunction-return=thunk-extern
|
||||
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
|
||||
endif
|
||||
|
||||
export RETPOLINE_CFLAGS
|
||||
export RETPOLINE_VDSO_CFLAGS
|
||||
|
||||
|
@ -110,6 +110,7 @@ void kernel_add_identity_map(unsigned long start, unsigned long end)
|
||||
void initialize_identity_maps(void *rmode)
|
||||
{
|
||||
unsigned long cmdline;
|
||||
struct setup_data *sd;
|
||||
|
||||
/* Exclude the encryption mask from __PHYSICAL_MASK */
|
||||
physical_mask &= ~sme_me_mask;
|
||||
@ -163,6 +164,18 @@ void initialize_identity_maps(void *rmode)
|
||||
cmdline = get_cmd_line_ptr();
|
||||
kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
|
||||
|
||||
/*
|
||||
* Also map the setup_data entries passed via boot_params in case they
|
||||
* need to be accessed by uncompressed kernel via the identity mapping.
|
||||
*/
|
||||
sd = (struct setup_data *)boot_params->hdr.setup_data;
|
||||
while (sd) {
|
||||
unsigned long sd_addr = (unsigned long)sd;
|
||||
|
||||
kernel_add_identity_map(sd_addr, sd_addr + sizeof(*sd) + sd->len);
|
||||
sd = (struct setup_data *)sd->next;
|
||||
}
|
||||
|
||||
sev_prep_identity_maps(top_level_pgt);
|
||||
|
||||
/* Load the new page-table. */
|
||||
|
@ -11,7 +11,7 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
CFLAGS_common.o += -fno-stack-protector
|
||||
|
||||
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
|
||||
obj-y := entry.o entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
|
||||
obj-y += common.o
|
||||
|
||||
obj-y += vdso/
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/ptrace-abi.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
/*
|
||||
|
||||
@ -282,6 +284,66 @@ For 32-bit we have the following conventions - kernel is built with
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IBRS kernel mitigation for Spectre_v2.
|
||||
*
|
||||
* Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
|
||||
* the regs it uses (AX, CX, DX). Must be called before the first RET
|
||||
* instruction (NOTE! UNTRAIN_RET includes a RET instruction)
|
||||
*
|
||||
* The optional argument is used to save/restore the current value,
|
||||
* which is used on the paranoid paths.
|
||||
*
|
||||
* Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
|
||||
*/
|
||||
.macro IBRS_ENTER save_reg
|
||||
#ifdef CONFIG_CPU_IBRS_ENTRY
|
||||
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
|
||||
movl $MSR_IA32_SPEC_CTRL, %ecx
|
||||
|
||||
.ifnb \save_reg
|
||||
rdmsr
|
||||
shl $32, %rdx
|
||||
or %rdx, %rax
|
||||
mov %rax, \save_reg
|
||||
test $SPEC_CTRL_IBRS, %eax
|
||||
jz .Ldo_wrmsr_\@
|
||||
lfence
|
||||
jmp .Lend_\@
|
||||
.Ldo_wrmsr_\@:
|
||||
.endif
|
||||
|
||||
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
|
||||
movl %edx, %eax
|
||||
shr $32, %rdx
|
||||
wrmsr
|
||||
.Lend_\@:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
|
||||
* regs. Must be called after the last RET.
|
||||
*/
|
||||
.macro IBRS_EXIT save_reg
|
||||
#ifdef CONFIG_CPU_IBRS_ENTRY
|
||||
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
|
||||
movl $MSR_IA32_SPEC_CTRL, %ecx
|
||||
|
||||
.ifnb \save_reg
|
||||
mov \save_reg, %rdx
|
||||
.else
|
||||
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
|
||||
andl $(~SPEC_CTRL_IBRS), %edx
|
||||
.endif
|
||||
|
||||
movl %edx, %eax
|
||||
shr $32, %rdx
|
||||
wrmsr
|
||||
.Lend_\@:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Mitigate Spectre v1 for conditional swapgs code paths.
|
||||
*
|
||||
|
22
arch/x86/entry/entry.S
Normal file
22
arch/x86/entry/entry.S
Normal file
@ -0,0 +1,22 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Common place for both 32- and 64-bit entry routines.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
.pushsection .noinstr.text, "ax"
|
||||
|
||||
SYM_FUNC_START(entry_ibpb)
|
||||
movl $MSR_IA32_PRED_CMD, %ecx
|
||||
movl $PRED_CMD_IBPB, %eax
|
||||
xorl %edx, %edx
|
||||
wrmsr
|
||||
RET
|
||||
SYM_FUNC_END(entry_ibpb)
|
||||
/* For KVM */
|
||||
EXPORT_SYMBOL_GPL(entry_ibpb);
|
||||
|
||||
.popsection
|
@ -698,7 +698,6 @@ SYM_CODE_START(__switch_to_asm)
|
||||
movl %ebx, PER_CPU_VAR(__stack_chk_guard)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* When switching from a shallower to a deeper call stack
|
||||
* the RSB may either underflow or use entries populated
|
||||
@ -707,7 +706,6 @@ SYM_CODE_START(__switch_to_asm)
|
||||
* speculative execution to prevent attack.
|
||||
*/
|
||||
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||
#endif
|
||||
|
||||
/* Restore flags or the incoming task to restore AC state. */
|
||||
popfl
|
||||
|
@ -85,7 +85,7 @@
|
||||
*/
|
||||
|
||||
SYM_CODE_START(entry_SYSCALL_64)
|
||||
UNWIND_HINT_EMPTY
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
|
||||
swapgs
|
||||
@ -112,6 +112,11 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
|
||||
movq %rsp, %rdi
|
||||
/* Sign extend the lower 32bit as syscall numbers are treated as int */
|
||||
movslq %eax, %rsi
|
||||
|
||||
/* clobbers %rax, make sure it is after saving the syscall nr */
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
|
||||
call do_syscall_64 /* returns with IRQs disabled */
|
||||
|
||||
/*
|
||||
@ -191,6 +196,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
|
||||
* perf profiles. Nothing jumps here.
|
||||
*/
|
||||
syscall_return_via_sysret:
|
||||
IBRS_EXIT
|
||||
POP_REGS pop_rdi=0
|
||||
|
||||
/*
|
||||
@ -249,7 +255,6 @@ SYM_FUNC_START(__switch_to_asm)
|
||||
movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* When switching from a shallower to a deeper call stack
|
||||
* the RSB may either underflow or use entries populated
|
||||
@ -258,7 +263,6 @@ SYM_FUNC_START(__switch_to_asm)
|
||||
* speculative execution to prevent attack.
|
||||
*/
|
||||
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||
#endif
|
||||
|
||||
/* restore callee-saved registers */
|
||||
popq %r15
|
||||
@ -322,13 +326,13 @@ SYM_CODE_END(ret_from_fork)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Save all registers in pt_regs */
|
||||
SYM_CODE_START_LOCAL(push_and_clear_regs)
|
||||
SYM_CODE_START_LOCAL(xen_error_entry)
|
||||
UNWIND_HINT_FUNC
|
||||
PUSH_AND_CLEAR_REGS save_ret=1
|
||||
ENCODE_FRAME_POINTER 8
|
||||
UNTRAIN_RET
|
||||
RET
|
||||
SYM_CODE_END(push_and_clear_regs)
|
||||
SYM_CODE_END(xen_error_entry)
|
||||
|
||||
/**
|
||||
* idtentry_body - Macro to emit code calling the C function
|
||||
@ -337,9 +341,6 @@ SYM_CODE_END(push_and_clear_regs)
|
||||
*/
|
||||
.macro idtentry_body cfunc has_error_code:req
|
||||
|
||||
call push_and_clear_regs
|
||||
UNWIND_HINT_REGS
|
||||
|
||||
/*
|
||||
* Call error_entry() and switch to the task stack if from userspace.
|
||||
*
|
||||
@ -349,7 +350,7 @@ SYM_CODE_END(push_and_clear_regs)
|
||||
* switch the CR3. So it can skip invoking error_entry().
|
||||
*/
|
||||
ALTERNATIVE "call error_entry; movq %rax, %rsp", \
|
||||
"", X86_FEATURE_XENPV
|
||||
"call xen_error_entry", X86_FEATURE_XENPV
|
||||
|
||||
ENCODE_FRAME_POINTER
|
||||
UNWIND_HINT_REGS
|
||||
@ -612,6 +613,7 @@ __irqentry_text_end:
|
||||
|
||||
SYM_CODE_START_LOCAL(common_interrupt_return)
|
||||
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
||||
IBRS_EXIT
|
||||
#ifdef CONFIG_DEBUG_ENTRY
|
||||
/* Assert that pt_regs indicates user mode. */
|
||||
testb $3, CS(%rsp)
|
||||
@ -725,6 +727,7 @@ native_irq_return_ldt:
|
||||
pushq %rdi /* Stash user RDI */
|
||||
swapgs /* to kernel GS */
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
|
||||
UNTRAIN_RET
|
||||
|
||||
movq PER_CPU_VAR(espfix_waddr), %rdi
|
||||
movq %rax, (0*8)(%rdi) /* user RAX */
|
||||
@ -897,6 +900,9 @@ SYM_CODE_END(xen_failsafe_callback)
|
||||
* 1 -> no SWAPGS on exit
|
||||
*
|
||||
* Y GSBASE value at entry, must be restored in paranoid_exit
|
||||
*
|
||||
* R14 - old CR3
|
||||
* R15 - old SPEC_CTRL
|
||||
*/
|
||||
SYM_CODE_START_LOCAL(paranoid_entry)
|
||||
UNWIND_HINT_FUNC
|
||||
@ -940,7 +946,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
||||
* is needed here.
|
||||
*/
|
||||
SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
|
||||
RET
|
||||
jmp .Lparanoid_gsbase_done
|
||||
|
||||
.Lparanoid_entry_checkgs:
|
||||
/* EBX = 1 -> kernel GSBASE active, no restore required */
|
||||
@ -959,8 +965,16 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
||||
xorl %ebx, %ebx
|
||||
swapgs
|
||||
.Lparanoid_kernel_gsbase:
|
||||
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
.Lparanoid_gsbase_done:
|
||||
|
||||
/*
|
||||
* Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
|
||||
* CR3 above, keep the old value in a callee saved register.
|
||||
*/
|
||||
IBRS_ENTER save_reg=%r15
|
||||
UNTRAIN_RET
|
||||
|
||||
RET
|
||||
SYM_CODE_END(paranoid_entry)
|
||||
|
||||
@ -982,9 +996,19 @@ SYM_CODE_END(paranoid_entry)
|
||||
* 1 -> no SWAPGS on exit
|
||||
*
|
||||
* Y User space GSBASE, must be restored unconditionally
|
||||
*
|
||||
* R14 - old CR3
|
||||
* R15 - old SPEC_CTRL
|
||||
*/
|
||||
SYM_CODE_START_LOCAL(paranoid_exit)
|
||||
UNWIND_HINT_REGS
|
||||
|
||||
/*
|
||||
* Must restore IBRS state before both CR3 and %GS since we need access
|
||||
* to the per-CPU x86_spec_ctrl_shadow variable.
|
||||
*/
|
||||
IBRS_EXIT save_reg=%r15
|
||||
|
||||
/*
|
||||
* The order of operations is important. RESTORE_CR3 requires
|
||||
* kernel GSBASE.
|
||||
@ -1017,6 +1041,10 @@ SYM_CODE_END(paranoid_exit)
|
||||
*/
|
||||
SYM_CODE_START_LOCAL(error_entry)
|
||||
UNWIND_HINT_FUNC
|
||||
|
||||
PUSH_AND_CLEAR_REGS save_ret=1
|
||||
ENCODE_FRAME_POINTER 8
|
||||
|
||||
testb $3, CS+8(%rsp)
|
||||
jz .Lerror_kernelspace
|
||||
|
||||
@ -1028,9 +1056,12 @@ SYM_CODE_START_LOCAL(error_entry)
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
/* We have user CR3. Change to kernel CR3. */
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
|
||||
leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
|
||||
.Lerror_entry_from_usermode_after_swapgs:
|
||||
|
||||
/* Put us onto the real thread stack. */
|
||||
call sync_regs
|
||||
RET
|
||||
@ -1065,6 +1096,7 @@ SYM_CODE_START_LOCAL(error_entry)
|
||||
.Lerror_entry_done_lfence:
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
leaq 8(%rsp), %rax /* return pt_regs pointer */
|
||||
ANNOTATE_UNRET_END
|
||||
RET
|
||||
|
||||
.Lbstep_iret:
|
||||
@ -1080,6 +1112,8 @@ SYM_CODE_START_LOCAL(error_entry)
|
||||
swapgs
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
|
||||
/*
|
||||
* Pretend that the exception came from user mode: set up pt_regs
|
||||
@ -1185,6 +1219,9 @@ SYM_CODE_START(asm_exc_nmi)
|
||||
PUSH_AND_CLEAR_REGS rdx=(%rdx)
|
||||
ENCODE_FRAME_POINTER
|
||||
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
|
||||
/*
|
||||
* At this point we no longer need to worry about stack damage
|
||||
* due to nesting -- we're on the normal thread stack and we're
|
||||
@ -1409,6 +1446,9 @@ end_repeat_nmi:
|
||||
movq $-1, %rsi
|
||||
call exc_nmi
|
||||
|
||||
/* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
|
||||
IBRS_EXIT save_reg=%r15
|
||||
|
||||
/* Always restore stashed CR3 value (see paranoid_entry) */
|
||||
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
*
|
||||
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
|
||||
*/
|
||||
#include "calling.h"
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/errno.h>
|
||||
@ -14,9 +13,12 @@
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include "calling.h"
|
||||
|
||||
.section .entry.text, "ax"
|
||||
|
||||
/*
|
||||
@ -47,7 +49,7 @@
|
||||
* 0(%ebp) arg6
|
||||
*/
|
||||
SYM_CODE_START(entry_SYSENTER_compat)
|
||||
UNWIND_HINT_EMPTY
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
/* Interrupts are off on entry. */
|
||||
swapgs
|
||||
@ -88,6 +90,9 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
|
||||
|
||||
cld
|
||||
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
|
||||
/*
|
||||
* SYSENTER doesn't filter flags, so we need to clear NT and AC
|
||||
* ourselves. To save a few cycles, we can check whether
|
||||
@ -174,7 +179,7 @@ SYM_CODE_END(entry_SYSENTER_compat)
|
||||
* 0(%esp) arg6
|
||||
*/
|
||||
SYM_CODE_START(entry_SYSCALL_compat)
|
||||
UNWIND_HINT_EMPTY
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
/* Interrupts are off on entry. */
|
||||
swapgs
|
||||
@ -203,6 +208,9 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
|
||||
PUSH_AND_CLEAR_REGS rcx=%rbp rax=$-ENOSYS
|
||||
UNWIND_HINT_REGS
|
||||
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
|
||||
movq %rsp, %rdi
|
||||
call do_fast_syscall_32
|
||||
/* XEN PV guests always use IRET path */
|
||||
@ -217,6 +225,8 @@ sysret32_from_system_call:
|
||||
*/
|
||||
STACKLEAK_ERASE
|
||||
|
||||
IBRS_EXIT
|
||||
|
||||
movq RBX(%rsp), %rbx /* pt_regs->rbx */
|
||||
movq RBP(%rsp), %rbp /* pt_regs->rbp */
|
||||
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
|
||||
@ -295,7 +305,7 @@ SYM_CODE_END(entry_SYSCALL_compat)
|
||||
* ebp arg6
|
||||
*/
|
||||
SYM_CODE_START(entry_INT80_compat)
|
||||
UNWIND_HINT_EMPTY
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
/*
|
||||
* Interrupts are off on entry.
|
||||
@ -337,6 +347,9 @@ SYM_CODE_START(entry_INT80_compat)
|
||||
|
||||
cld
|
||||
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
|
||||
movq %rsp, %rdi
|
||||
call do_int80_syscall_32
|
||||
jmp swapgs_restore_regs_and_return_to_usermode
|
||||
|
@ -92,6 +92,7 @@ endif
|
||||
endif
|
||||
|
||||
$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
||||
$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO
|
||||
|
||||
#
|
||||
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
||||
|
@ -19,17 +19,20 @@ __vsyscall_page:
|
||||
|
||||
mov $__NR_gettimeofday, %rax
|
||||
syscall
|
||||
RET
|
||||
ret
|
||||
int3
|
||||
|
||||
.balign 1024, 0xcc
|
||||
mov $__NR_time, %rax
|
||||
syscall
|
||||
RET
|
||||
ret
|
||||
int3
|
||||
|
||||
.balign 1024, 0xcc
|
||||
mov $__NR_getcpu, %rax
|
||||
syscall
|
||||
RET
|
||||
ret
|
||||
int3
|
||||
|
||||
.balign 4096, 0xcc
|
||||
|
||||
|
@ -76,6 +76,7 @@ extern int alternatives_patched;
|
||||
extern void alternative_instructions(void);
|
||||
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
|
||||
extern void apply_retpolines(s32 *start, s32 *end);
|
||||
extern void apply_returns(s32 *start, s32 *end);
|
||||
extern void apply_ibt_endbr(s32 *start, s32 *end);
|
||||
|
||||
struct module;
|
||||
|
@ -203,8 +203,8 @@
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
|
||||
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
|
||||
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
@ -296,6 +296,12 @@
|
||||
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
|
||||
#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
|
||||
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
|
||||
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
|
||||
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
|
||||
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
||||
@ -316,6 +322,7 @@
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
||||
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
|
||||
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
|
||||
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
@ -447,5 +454,6 @@
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -50,6 +50,25 @@
|
||||
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
# define DISABLE_RETPOLINE 0
|
||||
#else
|
||||
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
|
||||
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
# define DISABLE_RETHUNK 0
|
||||
#else
|
||||
# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
# define DISABLE_UNRET 0
|
||||
#else
|
||||
# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
# define DISABLE_ENQCMD 0
|
||||
#else
|
||||
@ -82,7 +101,7 @@
|
||||
#define DISABLED_MASK8 (DISABLE_TDX_GUEST)
|
||||
#define DISABLED_MASK9 (DISABLE_SGX)
|
||||
#define DISABLED_MASK10 0
|
||||
#define DISABLED_MASK11 0
|
||||
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
|
||||
#define DISABLED_MASK12 0
|
||||
#define DISABLED_MASK13 0
|
||||
#define DISABLED_MASK14 0
|
||||
|
@ -19,19 +19,27 @@
|
||||
#define __ALIGN_STR __stringify(__ALIGN)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
|
||||
#define RET jmp __x86_return_thunk
|
||||
#else /* CONFIG_RETPOLINE */
|
||||
#ifdef CONFIG_SLS
|
||||
#define RET ret; int3
|
||||
#else
|
||||
#define RET ret
|
||||
#endif
|
||||
#endif /* CONFIG_RETPOLINE */
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
|
||||
#define ASM_RET "jmp __x86_return_thunk\n\t"
|
||||
#else /* CONFIG_RETPOLINE */
|
||||
#ifdef CONFIG_SLS
|
||||
#define ASM_RET "ret; int3\n\t"
|
||||
#else
|
||||
#define ASM_RET "ret\n\t"
|
||||
#endif
|
||||
#endif /* CONFIG_RETPOLINE */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -51,6 +51,8 @@
|
||||
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
|
||||
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
||||
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
|
||||
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
||||
@ -93,6 +95,7 @@
|
||||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
|
||||
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
|
||||
#define ARCH_CAP_SSB_NO BIT(4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
@ -140,6 +143,13 @@
|
||||
* bit available to control VERW
|
||||
* behavior.
|
||||
*/
|
||||
#define ARCH_CAP_RRSBA BIT(19) /*
|
||||
* Indicates RET may use predictors
|
||||
* other than the RSB. With eIBRS
|
||||
* enabled predictions in kernel mode
|
||||
* are restricted to targets in
|
||||
* kernel.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
@ -567,6 +577,9 @@
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
|
||||
|
||||
/* Fam 16h MSRs */
|
||||
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
||||
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/unwind_hints.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#define RETPOLINE_THUNK_SIZE 32
|
||||
|
||||
@ -75,6 +76,23 @@
|
||||
.popsection
|
||||
.endm
|
||||
|
||||
/*
|
||||
* (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
|
||||
* vs RETBleed validation.
|
||||
*/
|
||||
#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
|
||||
|
||||
/*
|
||||
* Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
|
||||
* eventually turn into it's own annotation.
|
||||
*/
|
||||
.macro ANNOTATE_UNRET_END
|
||||
#ifdef CONFIG_DEBUG_ENTRY
|
||||
ANNOTATE_RETPOLINE_SAFE
|
||||
nop
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
||||
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
||||
@ -105,10 +123,34 @@
|
||||
* monstrosity above, manually.
|
||||
*/
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
|
||||
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
|
||||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
|
||||
#else
|
||||
#define CALL_ZEN_UNTRAIN_RET ""
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
|
||||
* return thunk isn't mapped into the userspace tables (then again, AMD
|
||||
* typically has NO_MELTDOWN).
|
||||
*
|
||||
* While zen_untrain_ret() doesn't clobber anything but requires stack,
|
||||
* entry_ibpb() will clobber AX, CX, DX.
|
||||
*
|
||||
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
|
||||
* where we have a stack but before any RET instruction.
|
||||
*/
|
||||
.macro UNTRAIN_RET
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
|
||||
ANNOTATE_UNRET_END
|
||||
ALTERNATIVE_2 "", \
|
||||
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
|
||||
#endif
|
||||
.endm
|
||||
|
||||
@ -120,17 +162,20 @@
|
||||
_ASM_PTR " 999b\n\t" \
|
||||
".popsection\n\t"
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
|
||||
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
|
||||
extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||
|
||||
extern void __x86_return_thunk(void);
|
||||
extern void zen_untrain_ret(void);
|
||||
extern void entry_ibpb(void);
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
|
||||
#define GEN(reg) \
|
||||
extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
|
||||
#include <asm/GEN-for-each-reg.h>
|
||||
#undef GEN
|
||||
|
||||
extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
@ -193,6 +238,7 @@ enum spectre_v2_mitigation {
|
||||
SPECTRE_V2_EIBRS,
|
||||
SPECTRE_V2_EIBRS_RETPOLINE,
|
||||
SPECTRE_V2_EIBRS_LFENCE,
|
||||
SPECTRE_V2_IBRS,
|
||||
};
|
||||
|
||||
/* The indirect branch speculation control variants */
|
||||
@ -235,6 +281,9 @@ static inline void indirect_branch_prediction_barrier(void)
|
||||
|
||||
/* The Intel SPEC CTRL MSR base value cache */
|
||||
extern u64 x86_spec_ctrl_base;
|
||||
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
extern void write_spec_ctrl_current(u64 val, bool force);
|
||||
extern u64 spec_ctrl_current(void);
|
||||
|
||||
/*
|
||||
* With retpoline, we must use IBRS to restrict branch prediction
|
||||
@ -244,18 +293,16 @@ extern u64 x86_spec_ctrl_base;
|
||||
*/
|
||||
#define firmware_restrict_branch_speculation_start() \
|
||||
do { \
|
||||
u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
|
||||
\
|
||||
preempt_disable(); \
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
|
||||
spec_ctrl_current() | SPEC_CTRL_IBRS, \
|
||||
X86_FEATURE_USE_IBRS_FW); \
|
||||
} while (0)
|
||||
|
||||
#define firmware_restrict_branch_speculation_end() \
|
||||
do { \
|
||||
u64 val = x86_spec_ctrl_base; \
|
||||
\
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
|
||||
spec_ctrl_current(), \
|
||||
X86_FEATURE_USE_IBRS_FW); \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
@ -120,6 +120,9 @@ void *extend_brk(size_t size, size_t align);
|
||||
static char __brk_##name[size]
|
||||
|
||||
extern void probe_roms(void);
|
||||
|
||||
void clear_bss(void);
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
asmlinkage void __init i386_start_kernel(void);
|
||||
|
@ -21,6 +21,16 @@
|
||||
* relative displacement across sections.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The trampoline is 8 bytes and of the general form:
|
||||
*
|
||||
* jmp.d32 \func
|
||||
* ud1 %esp, %ecx
|
||||
*
|
||||
* That trailing #UD provides both a speculation stop and serves as a unique
|
||||
* 3 byte signature identifying static call trampolines. Also see tramp_ud[]
|
||||
* and __static_call_fixup().
|
||||
*/
|
||||
#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \
|
||||
asm(".pushsection .static_call.text, \"ax\" \n" \
|
||||
".align 4 \n" \
|
||||
@ -28,7 +38,7 @@
|
||||
STATIC_CALL_TRAMP_STR(name) ": \n" \
|
||||
ANNOTATE_NOENDBR \
|
||||
insns " \n" \
|
||||
".byte 0x53, 0x43, 0x54 \n" \
|
||||
".byte 0x0f, 0xb9, 0xcc \n" \
|
||||
".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
|
||||
".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
|
||||
".popsection \n")
|
||||
@ -36,8 +46,13 @@
|
||||
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
|
||||
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
|
||||
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk")
|
||||
#else
|
||||
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
|
||||
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop")
|
||||
#endif
|
||||
|
||||
#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) \
|
||||
ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
|
||||
@ -48,4 +63,6 @@
|
||||
".long " STATIC_CALL_KEY_STR(name) " - . \n" \
|
||||
".popsection \n")
|
||||
|
||||
extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
|
||||
|
||||
#endif /* _ASM_STATIC_CALL_H */
|
||||
|
@ -8,7 +8,11 @@
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro UNWIND_HINT_EMPTY
|
||||
UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_CALL end=1
|
||||
UNWIND_HINT type=UNWIND_HINT_TYPE_CALL end=1
|
||||
.endm
|
||||
|
||||
.macro UNWIND_HINT_ENTRY
|
||||
UNWIND_HINT type=UNWIND_HINT_TYPE_ENTRY end=1
|
||||
.endm
|
||||
|
||||
.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0
|
||||
@ -52,6 +56,14 @@
|
||||
UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC
|
||||
.endm
|
||||
|
||||
.macro UNWIND_HINT_SAVE
|
||||
UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE
|
||||
.endm
|
||||
|
||||
.macro UNWIND_HINT_RESTORE
|
||||
UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
||||
#define UNWIND_HINT_FUNC \
|
||||
|
@ -15,7 +15,7 @@
|
||||
#define SETUP_INDIRECT (1<<31)
|
||||
|
||||
/* SETUP_INDIRECT | max(SETUP_*) */
|
||||
#define SETUP_TYPE_MAX (SETUP_INDIRECT | SETUP_JAILHOUSE)
|
||||
#define SETUP_TYPE_MAX (SETUP_INDIRECT | SETUP_CC_BLOB)
|
||||
|
||||
/* ram_size flags */
|
||||
#define RAMDISK_IMAGE_START_MASK 0x07FF
|
||||
|
@ -11,6 +11,16 @@
|
||||
|
||||
/* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
|
||||
|
||||
bool cpc_supported_by_cpu(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
case X86_VENDOR_HYGON:
|
||||
return boot_cpu_has(X86_FEATURE_CPPC);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool cpc_ffh_supported(void)
|
||||
{
|
||||
return true;
|
||||
|
@ -115,6 +115,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
|
||||
}
|
||||
|
||||
extern s32 __retpoline_sites[], __retpoline_sites_end[];
|
||||
extern s32 __return_sites[], __return_sites_end[];
|
||||
extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
|
||||
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
||||
extern s32 __smp_locks[], __smp_locks_end[];
|
||||
@ -507,9 +508,76 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
/*
|
||||
* Rewrite the compiler generated return thunk tail-calls.
|
||||
*
|
||||
* For example, convert:
|
||||
*
|
||||
* JMP __x86_return_thunk
|
||||
*
|
||||
* into:
|
||||
*
|
||||
* RET
|
||||
*/
|
||||
static int patch_return(void *addr, struct insn *insn, u8 *bytes)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
return -1;
|
||||
|
||||
bytes[i++] = RET_INSN_OPCODE;
|
||||
|
||||
for (; i < insn->length;)
|
||||
bytes[i++] = INT3_INSN_OPCODE;
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
void __init_or_module noinline apply_returns(s32 *start, s32 *end)
|
||||
{
|
||||
s32 *s;
|
||||
|
||||
for (s = start; s < end; s++) {
|
||||
void *dest = NULL, *addr = (void *)s + *s;
|
||||
struct insn insn;
|
||||
int len, ret;
|
||||
u8 bytes[16];
|
||||
u8 op;
|
||||
|
||||
ret = insn_decode_kernel(&insn, addr);
|
||||
if (WARN_ON_ONCE(ret < 0))
|
||||
continue;
|
||||
|
||||
op = insn.opcode.bytes[0];
|
||||
if (op == JMP32_INSN_OPCODE)
|
||||
dest = addr + insn.length + insn.immediate.value;
|
||||
|
||||
if (__static_call_fixup(addr, op, dest) ||
|
||||
WARN_ON_ONCE(dest != &__x86_return_thunk))
|
||||
continue;
|
||||
|
||||
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
|
||||
addr, addr, insn.length,
|
||||
addr + insn.length + insn.immediate.value);
|
||||
|
||||
len = patch_return(addr, &insn, bytes);
|
||||
if (len == insn.length) {
|
||||
DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
|
||||
DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
|
||||
text_poke_early(addr, bytes, len);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
|
||||
#endif /* CONFIG_RETHUNK */
|
||||
|
||||
#else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
|
||||
|
||||
void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
|
||||
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
|
||||
|
||||
#endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
|
||||
|
||||
@ -860,6 +928,7 @@ void __init alternative_instructions(void)
|
||||
* those can rewrite the retpoline thunks.
|
||||
*/
|
||||
apply_retpolines(__retpoline_sites, __retpoline_sites_end);
|
||||
apply_returns(__return_sites, __return_sites_end);
|
||||
|
||||
/*
|
||||
* Then patch alternatives, such that those paravirt calls that are in
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tdx.h>
|
||||
#include "../kvm/vmx/vmx.h"
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
#include <xen/interface/xen.h>
|
||||
@ -107,4 +108,9 @@ static void __used common(void)
|
||||
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
|
||||
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
|
||||
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KVM_INTEL)) {
|
||||
BLANK();
|
||||
OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
|
||||
}
|
||||
}
|
||||
|
@ -862,6 +862,28 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
clear_rdrand_cpuid_bit(c);
|
||||
}
|
||||
|
||||
void init_spectral_chicken(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
u64 value;
|
||||
|
||||
/*
|
||||
* On Zen2 we offer this chicken (bit) on the altar of Speculation.
|
||||
*
|
||||
* This suppresses speculation from the middle of a basic block, i.e. it
|
||||
* suppresses non-branch predictions.
|
||||
*
|
||||
* We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
|
||||
*/
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
||||
if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
|
||||
value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
|
||||
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
@ -870,12 +892,21 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
node_reclaim_distance = 32;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Fix erratum 1076: CPB feature bit not being set in CPUID.
|
||||
* Always set it, except when running under a hypervisor.
|
||||
*/
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
/* Fix up CPUID bits, but only if not virtualised. */
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
|
||||
|
||||
/* Erratum 1076: CPB feature bit not being set in CPUID. */
|
||||
if (!cpu_has(c, X86_FEATURE_CPB))
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
|
||||
/*
|
||||
* Zen3 (Fam19 model < 0x10) parts are not susceptible to
|
||||
* Branch Type Confusion, but predate the allocation of the
|
||||
* BTC_NO bit.
|
||||
*/
|
||||
if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
|
||||
set_cpu_cap(c, X86_FEATURE_BTC_NO);
|
||||
}
|
||||
}
|
||||
|
||||
static void init_amd(struct cpuinfo_x86 *c)
|
||||
@ -907,7 +938,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
case 0x12: init_amd_ln(c); break;
|
||||
case 0x15: init_amd_bd(c); break;
|
||||
case 0x16: init_amd_jg(c); break;
|
||||
case 0x17: fallthrough;
|
||||
case 0x17: init_spectral_chicken(c);
|
||||
fallthrough;
|
||||
case 0x19: init_amd_zn(c); break;
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,8 @@
|
||||
|
||||
static void __init spectre_v1_select_mitigation(void);
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init retbleed_select_mitigation(void);
|
||||
static void __init spectre_v2_user_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
static void __init mds_select_mitigation(void);
|
||||
@ -48,16 +50,40 @@ static void __init mmio_select_mitigation(void);
|
||||
static void __init srbds_select_mitigation(void);
|
||||
static void __init l1d_flush_select_mitigation(void);
|
||||
|
||||
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
|
||||
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
|
||||
u64 x86_spec_ctrl_base;
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
|
||||
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
|
||||
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
/*
|
||||
* The vendor and possibly platform specific bits which can be modified in
|
||||
* x86_spec_ctrl_base.
|
||||
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
|
||||
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
|
||||
*/
|
||||
static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
|
||||
void write_spec_ctrl_current(u64 val, bool force)
|
||||
{
|
||||
if (this_cpu_read(x86_spec_ctrl_current) == val)
|
||||
return;
|
||||
|
||||
this_cpu_write(x86_spec_ctrl_current, val);
|
||||
|
||||
/*
|
||||
* When KERNEL_IBRS this MSR is written on return-to-user, unless
|
||||
* forced the update can be delayed until that time.
|
||||
*/
|
||||
if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, val);
|
||||
}
|
||||
|
||||
u64 spec_ctrl_current(void)
|
||||
{
|
||||
return this_cpu_read(x86_spec_ctrl_current);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spec_ctrl_current);
|
||||
|
||||
/*
|
||||
* AMD specific MSR info for Speculative Store Bypass control.
|
||||
@ -114,13 +140,21 @@ void __init check_bugs(void)
|
||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
|
||||
/* Allow STIBP in MSR_SPEC_CTRL if supported */
|
||||
if (boot_cpu_has(X86_FEATURE_STIBP))
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
||||
|
||||
/* Select the proper CPU mitigations before patching alternatives: */
|
||||
spectre_v1_select_mitigation();
|
||||
spectre_v2_select_mitigation();
|
||||
/*
|
||||
* retbleed_select_mitigation() relies on the state set by
|
||||
* spectre_v2_select_mitigation(); specifically it wants to know about
|
||||
* spectre_v2=ibrs.
|
||||
*/
|
||||
retbleed_select_mitigation();
|
||||
/*
|
||||
* spectre_v2_user_select_mitigation() relies on the state set by
|
||||
* retbleed_select_mitigation(); specifically the STIBP selection is
|
||||
* forced for UNRET.
|
||||
*/
|
||||
spectre_v2_user_select_mitigation();
|
||||
ssb_select_mitigation();
|
||||
l1tf_select_mitigation();
|
||||
md_clear_select_mitigation();
|
||||
@ -161,31 +195,17 @@ void __init check_bugs(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: This function is *only* called for SVM. VMX spec_ctrl handling is
|
||||
* done in vmenter.S.
|
||||
*/
|
||||
void
|
||||
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
||||
{
|
||||
u64 msrval, guestval, hostval = x86_spec_ctrl_base;
|
||||
u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
|
||||
struct thread_info *ti = current_thread_info();
|
||||
|
||||
/* Is MSR_SPEC_CTRL implemented ? */
|
||||
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
||||
/*
|
||||
* Restrict guest_spec_ctrl to supported values. Clear the
|
||||
* modifiable bits in the host base value and or the
|
||||
* modifiable bits from the guest value.
|
||||
*/
|
||||
guestval = hostval & ~x86_spec_ctrl_mask;
|
||||
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
|
||||
|
||||
/* SSBD controlled in MSR_SPEC_CTRL */
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
static_cpu_has(X86_FEATURE_AMD_SSBD))
|
||||
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
/* Conditional STIBP enabled? */
|
||||
if (static_branch_unlikely(&switch_to_cond_stibp))
|
||||
hostval |= stibp_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
if (hostval != guestval) {
|
||||
msrval = setguest ? guestval : hostval;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
||||
@ -752,12 +772,180 @@ static int __init nospectre_v1_cmdline(char *str)
|
||||
}
|
||||
early_param("nospectre_v1", nospectre_v1_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||
|
||||
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
||||
SPECTRE_V2_NONE;
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "RETBleed: " fmt
|
||||
|
||||
enum retbleed_mitigation {
|
||||
RETBLEED_MITIGATION_NONE,
|
||||
RETBLEED_MITIGATION_UNRET,
|
||||
RETBLEED_MITIGATION_IBPB,
|
||||
RETBLEED_MITIGATION_IBRS,
|
||||
RETBLEED_MITIGATION_EIBRS,
|
||||
};
|
||||
|
||||
enum retbleed_mitigation_cmd {
|
||||
RETBLEED_CMD_OFF,
|
||||
RETBLEED_CMD_AUTO,
|
||||
RETBLEED_CMD_UNRET,
|
||||
RETBLEED_CMD_IBPB,
|
||||
};
|
||||
|
||||
const char * const retbleed_strings[] = {
|
||||
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
|
||||
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
|
||||
[RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
|
||||
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
|
||||
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
|
||||
};
|
||||
|
||||
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
|
||||
RETBLEED_MITIGATION_NONE;
|
||||
static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
|
||||
RETBLEED_CMD_AUTO;
|
||||
|
||||
static int __ro_after_init retbleed_nosmt = false;
|
||||
|
||||
static int __init retbleed_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
while (str) {
|
||||
char *next = strchr(str, ',');
|
||||
if (next) {
|
||||
*next = 0;
|
||||
next++;
|
||||
}
|
||||
|
||||
if (!strcmp(str, "off")) {
|
||||
retbleed_cmd = RETBLEED_CMD_OFF;
|
||||
} else if (!strcmp(str, "auto")) {
|
||||
retbleed_cmd = RETBLEED_CMD_AUTO;
|
||||
} else if (!strcmp(str, "unret")) {
|
||||
retbleed_cmd = RETBLEED_CMD_UNRET;
|
||||
} else if (!strcmp(str, "ibpb")) {
|
||||
retbleed_cmd = RETBLEED_CMD_IBPB;
|
||||
} else if (!strcmp(str, "nosmt")) {
|
||||
retbleed_nosmt = true;
|
||||
} else {
|
||||
pr_err("Ignoring unknown retbleed option (%s).", str);
|
||||
}
|
||||
|
||||
str = next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("retbleed", retbleed_parse_cmdline);
|
||||
|
||||
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
|
||||
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
|
||||
|
||||
static void __init retbleed_select_mitigation(void)
|
||||
{
|
||||
bool mitigate_smt = false;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
switch (retbleed_cmd) {
|
||||
case RETBLEED_CMD_OFF:
|
||||
return;
|
||||
|
||||
case RETBLEED_CMD_UNRET:
|
||||
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
|
||||
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
|
||||
goto do_cmd_auto;
|
||||
}
|
||||
break;
|
||||
|
||||
case RETBLEED_CMD_IBPB:
|
||||
if (!boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
pr_err("WARNING: CPU does not support IBPB.\n");
|
||||
goto do_cmd_auto;
|
||||
} else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
|
||||
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
|
||||
goto do_cmd_auto;
|
||||
}
|
||||
break;
|
||||
|
||||
do_cmd_auto:
|
||||
case RETBLEED_CMD_AUTO:
|
||||
default:
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
|
||||
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
|
||||
else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
|
||||
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
|
||||
}
|
||||
|
||||
/*
|
||||
* The Intel mitigation (IBRS or eIBRS) was already selected in
|
||||
* spectre_v2_select_mitigation(). 'retbleed_mitigation' will
|
||||
* be set accordingly below.
|
||||
*/
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
switch (retbleed_mitigation) {
|
||||
case RETBLEED_MITIGATION_UNRET:
|
||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
pr_err(RETBLEED_UNTRAIN_MSG);
|
||||
|
||||
mitigate_smt = true;
|
||||
break;
|
||||
|
||||
case RETBLEED_MITIGATION_IBPB:
|
||||
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
|
||||
mitigate_smt = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
|
||||
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
|
||||
cpu_smt_disable(false);
|
||||
|
||||
/*
|
||||
* Let IBRS trump all on Intel without affecting the effects of the
|
||||
* retbleed= cmdline option.
|
||||
*/
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
|
||||
switch (spectre_v2_enabled) {
|
||||
case SPECTRE_V2_IBRS:
|
||||
retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
|
||||
break;
|
||||
case SPECTRE_V2_EIBRS:
|
||||
case SPECTRE_V2_EIBRS_RETPOLINE:
|
||||
case SPECTRE_V2_EIBRS_LFENCE:
|
||||
retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
|
||||
break;
|
||||
default:
|
||||
pr_err(RETBLEED_INTEL_MSG);
|
||||
}
|
||||
}
|
||||
|
||||
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||
|
||||
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
|
||||
SPECTRE_V2_USER_NONE;
|
||||
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
|
||||
@ -828,6 +1016,7 @@ enum spectre_v2_mitigation_cmd {
|
||||
SPECTRE_V2_CMD_EIBRS,
|
||||
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
|
||||
SPECTRE_V2_CMD_EIBRS_LFENCE,
|
||||
SPECTRE_V2_CMD_IBRS,
|
||||
};
|
||||
|
||||
enum spectre_v2_user_cmd {
|
||||
@ -868,13 +1057,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
|
||||
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
|
||||
}
|
||||
|
||||
static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
|
||||
|
||||
static enum spectre_v2_user_cmd __init
|
||||
spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
spectre_v2_parse_user_cmdline(void)
|
||||
{
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
switch (v2_cmd) {
|
||||
switch (spectre_v2_cmd) {
|
||||
case SPECTRE_V2_CMD_NONE:
|
||||
return SPECTRE_V2_USER_CMD_NONE;
|
||||
case SPECTRE_V2_CMD_FORCE:
|
||||
@ -900,15 +1091,16 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
return SPECTRE_V2_USER_CMD_AUTO;
|
||||
}
|
||||
|
||||
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
|
||||
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
|
||||
{
|
||||
return (mode == SPECTRE_V2_EIBRS ||
|
||||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
||||
mode == SPECTRE_V2_EIBRS_LFENCE);
|
||||
return mode == SPECTRE_V2_IBRS ||
|
||||
mode == SPECTRE_V2_EIBRS ||
|
||||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
||||
mode == SPECTRE_V2_EIBRS_LFENCE;
|
||||
}
|
||||
|
||||
static void __init
|
||||
spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
spectre_v2_user_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
|
||||
bool smt_possible = IS_ENABLED(CONFIG_SMP);
|
||||
@ -921,7 +1113,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
||||
smt_possible = false;
|
||||
|
||||
cmd = spectre_v2_parse_user_cmdline(v2_cmd);
|
||||
cmd = spectre_v2_parse_user_cmdline();
|
||||
switch (cmd) {
|
||||
case SPECTRE_V2_USER_CMD_NONE:
|
||||
goto set_mode;
|
||||
@ -969,12 +1161,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
}
|
||||
|
||||
/*
|
||||
* If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
|
||||
* required.
|
||||
* If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
|
||||
* STIBP is not required.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
|
||||
!smt_possible ||
|
||||
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
||||
spectre_v2_in_ibrs_mode(spectre_v2_enabled))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -986,6 +1178,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
|
||||
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
|
||||
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
|
||||
if (mode != SPECTRE_V2_USER_STRICT &&
|
||||
mode != SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
pr_info("Selecting STIBP always-on mode to complement retbleed mitigation'\n");
|
||||
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
|
||||
}
|
||||
|
||||
spectre_v2_user_stibp = mode;
|
||||
|
||||
set_mode:
|
||||
@ -999,6 +1198,7 @@ static const char * const spectre_v2_strings[] = {
|
||||
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
|
||||
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
|
||||
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
|
||||
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
@ -1016,6 +1216,7 @@ static const struct {
|
||||
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
|
||||
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
|
||||
{ "auto", SPECTRE_V2_CMD_AUTO, false },
|
||||
{ "ibrs", SPECTRE_V2_CMD_IBRS, false },
|
||||
};
|
||||
|
||||
static void __init spec_v2_print_cond(const char *reason, bool secure)
|
||||
@ -1078,6 +1279,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
|
||||
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
|
||||
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
|
||||
pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
|
||||
pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
spec_v2_print_cond(mitigation_options[i].option,
|
||||
mitigation_options[i].secure);
|
||||
return cmd;
|
||||
@ -1093,6 +1318,22 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
|
||||
return SPECTRE_V2_RETPOLINE;
|
||||
}
|
||||
|
||||
/* Disable in-kernel use of non-RSB RET predictors */
|
||||
static void __init spec_ctrl_disable_kernel_rrsba(void)
|
||||
{
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
|
||||
return;
|
||||
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
if (ia32_cap & ARCH_CAP_RRSBA) {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
|
||||
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
@ -1117,6 +1358,15 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
|
||||
boot_cpu_has_bug(X86_BUG_RETBLEED) &&
|
||||
retbleed_cmd != RETBLEED_CMD_OFF &&
|
||||
boot_cpu_has(X86_FEATURE_IBRS) &&
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
|
||||
mode = SPECTRE_V2_IBRS;
|
||||
break;
|
||||
}
|
||||
|
||||
mode = spectre_v2_select_retpoline();
|
||||
break;
|
||||
|
||||
@ -1133,6 +1383,10 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
mode = spectre_v2_select_retpoline();
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_CMD_IBRS:
|
||||
mode = SPECTRE_V2_IBRS;
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_CMD_EIBRS:
|
||||
mode = SPECTRE_V2_EIBRS;
|
||||
break;
|
||||
@ -1149,10 +1403,9 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
|
||||
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
|
||||
|
||||
if (spectre_v2_in_eibrs_mode(mode)) {
|
||||
/* Force it so VMEXIT will restore correctly */
|
||||
if (spectre_v2_in_ibrs_mode(mode)) {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
@ -1160,6 +1413,10 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
case SPECTRE_V2_EIBRS:
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_IBRS:
|
||||
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_LFENCE:
|
||||
case SPECTRE_V2_EIBRS_LFENCE:
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
|
||||
@ -1171,43 +1428,107 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable alternate RSB predictions in kernel when indirect CALLs and
|
||||
* JMPs gets protection against BHI and Intramode-BTI, but RET
|
||||
* prediction from a non-RSB predictor is still a risk.
|
||||
*/
|
||||
if (mode == SPECTRE_V2_EIBRS_LFENCE ||
|
||||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
||||
mode == SPECTRE_V2_RETPOLINE)
|
||||
spec_ctrl_disable_kernel_rrsba();
|
||||
|
||||
spectre_v2_enabled = mode;
|
||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If spectre v2 protection has been enabled, unconditionally fill
|
||||
* RSB during a context switch; this protects against two independent
|
||||
* issues:
|
||||
* If Spectre v2 protection has been enabled, fill the RSB during a
|
||||
* context switch. In general there are two types of RSB attacks
|
||||
* across context switches, for which the CALLs/RETs may be unbalanced.
|
||||
*
|
||||
* - RSB underflow (and switch to BTB) on Skylake+
|
||||
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
|
||||
* 1) RSB underflow
|
||||
*
|
||||
* Some Intel parts have "bottomless RSB". When the RSB is empty,
|
||||
* speculated return targets may come from the branch predictor,
|
||||
* which could have a user-poisoned BTB or BHB entry.
|
||||
*
|
||||
* AMD has it even worse: *all* returns are speculated from the BTB,
|
||||
* regardless of the state of the RSB.
|
||||
*
|
||||
* When IBRS or eIBRS is enabled, the "user -> kernel" attack
|
||||
* scenario is mitigated by the IBRS branch prediction isolation
|
||||
* properties, so the RSB buffer filling wouldn't be necessary to
|
||||
* protect against this type of attack.
|
||||
*
|
||||
* The "user -> user" attack scenario is mitigated by RSB filling.
|
||||
*
|
||||
* 2) Poisoned RSB entry
|
||||
*
|
||||
* If the 'next' in-kernel return stack is shorter than 'prev',
|
||||
* 'next' could be tricked into speculating with a user-poisoned RSB
|
||||
* entry.
|
||||
*
|
||||
* The "user -> kernel" attack scenario is mitigated by SMEP and
|
||||
* eIBRS.
|
||||
*
|
||||
* The "user -> user" scenario, also known as SpectreBHB, requires
|
||||
* RSB clearing.
|
||||
*
|
||||
* So to mitigate all cases, unconditionally fill RSB on context
|
||||
* switches.
|
||||
*
|
||||
* FIXME: Is this pointless for retbleed-affected AMD?
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
||||
|
||||
/*
|
||||
* Retpoline means the kernel is safe because it has no indirect
|
||||
* branches. Enhanced IBRS protects firmware too, so, enable restricted
|
||||
* speculation around firmware calls only when Enhanced IBRS isn't
|
||||
* supported.
|
||||
* Similar to context switches, there are two types of RSB attacks
|
||||
* after vmexit:
|
||||
*
|
||||
* 1) RSB underflow
|
||||
*
|
||||
* 2) Poisoned RSB entry
|
||||
*
|
||||
* When retpoline is enabled, both are mitigated by filling/clearing
|
||||
* the RSB.
|
||||
*
|
||||
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
|
||||
* prediction isolation protections, RSB still needs to be cleared
|
||||
* because of #2. Note that SMEP provides no protection here, unlike
|
||||
* user-space-poisoned RSB entries.
|
||||
*
|
||||
* eIBRS, on the other hand, has RSB-poisoning protections, so it
|
||||
* doesn't need RSB clearing after vmexit.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
|
||||
boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
|
||||
|
||||
/*
|
||||
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
|
||||
* and Enhanced IBRS protect firmware too, so enable IBRS around
|
||||
* firmware calls only when IBRS / Enhanced IBRS aren't otherwise
|
||||
* enabled.
|
||||
*
|
||||
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
|
||||
* the user might select retpoline on the kernel command line and if
|
||||
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
|
||||
* enable IBRS around firmware calls.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
|
||||
pr_info("Enabling Restricted Speculation for firmware calls\n");
|
||||
}
|
||||
|
||||
/* Set up IBPB and STIBP depending on the general spectre V2 command */
|
||||
spectre_v2_user_select_mitigation(cmd);
|
||||
spectre_v2_cmd = cmd;
|
||||
}
|
||||
|
||||
static void update_stibp_msr(void * __unused)
|
||||
{
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
|
||||
write_spec_ctrl_current(val, true);
|
||||
}
|
||||
|
||||
/* Update x86_spec_ctrl_base in case SMT state changed. */
|
||||
@ -1423,16 +1744,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
|
||||
* bit in the mask to allow guests to use the mitigation even in the
|
||||
* case where the host does not enable it.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have three CPU feature flags that are in play here:
|
||||
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
|
||||
@ -1450,7 +1761,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
x86_amd_ssb_disable();
|
||||
} else {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1701,7 +2012,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
void x86_spec_ctrl_setup_ap(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
||||
x86_amd_ssb_disable();
|
||||
@ -1938,7 +2249,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)
|
||||
|
||||
static char *stibp_state(void)
|
||||
{
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
||||
if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
|
||||
return "";
|
||||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
@ -1994,6 +2305,24 @@ static ssize_t srbds_show_state(char *buf)
|
||||
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t retbleed_show_state(char *buf)
|
||||
{
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n");
|
||||
|
||||
return sprintf(buf, "%s; SMT %s\n",
|
||||
retbleed_strings[retbleed_mitigation],
|
||||
!sched_smt_active() ? "disabled" :
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
|
||||
"enabled with STIBP protection" : "vulnerable");
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
@ -2039,6 +2368,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
case X86_BUG_MMIO_STALE_DATA:
|
||||
return mmio_stale_data_show_state(buf);
|
||||
|
||||
case X86_BUG_RETBLEED:
|
||||
return retbleed_show_state(buf);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -2095,4 +2427,9 @@ ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *at
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
|
||||
}
|
||||
#endif
|
||||
|
@ -1205,48 +1205,60 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
#define VULNBL(vendor, family, model, blacklist) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
|
||||
|
||||
#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
|
||||
INTEL_FAM6_##model, steppings, \
|
||||
X86_FEATURE_ANY, issues)
|
||||
|
||||
#define VULNBL_AMD(family, blacklist) \
|
||||
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
|
||||
|
||||
#define VULNBL_HYGON(family, blacklist) \
|
||||
VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
|
||||
|
||||
#define SRBDS BIT(0)
|
||||
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
|
||||
#define MMIO BIT(1)
|
||||
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
|
||||
#define MMIO_SBDS BIT(2)
|
||||
/* CPU is affected by RETbleed, speculating where you would not expect it */
|
||||
#define RETBLEED BIT(3)
|
||||
|
||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) |
|
||||
BIT(7) | BIT(0xB), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
|
||||
|
||||
VULNBL_AMD(0x15, RETBLEED),
|
||||
VULNBL_AMD(0x16, RETBLEED),
|
||||
VULNBL_AMD(0x17, RETBLEED),
|
||||
VULNBL_HYGON(0x18, RETBLEED),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1348,6 +1360,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
!arch_cap_mmio_immune(ia32_cap))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
|
||||
setup_force_cpu_bug(X86_BUG_RETBLEED);
|
||||
}
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
@ -61,6 +61,8 @@ static inline void tsx_init(void) { }
|
||||
static inline void tsx_ap_init(void) { }
|
||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
||||
|
||||
extern void init_spectral_chicken(struct cpuinfo_x86 *c);
|
||||
|
||||
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
||||
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
|
||||
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
||||
|
@ -302,6 +302,12 @@ static void init_hygon(struct cpuinfo_x86 *c)
|
||||
/* get apicid instead of initial apic id from cpuid */
|
||||
c->apicid = hard_smp_processor_id();
|
||||
|
||||
/*
|
||||
* XXX someone from Hygon needs to confirm this DTRT
|
||||
*
|
||||
init_spectral_chicken(c);
|
||||
*/
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
|
||||
|
@ -27,6 +27,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||
{ X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
|
||||
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
|
||||
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
|
||||
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
|
||||
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
|
||||
|
@ -301,7 +301,7 @@ union ftrace_op_code_union {
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
#define RET_SIZE 1 + IS_ENABLED(CONFIG_SLS)
|
||||
#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
|
||||
|
||||
static unsigned long
|
||||
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
||||
@ -357,7 +357,10 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
||||
goto fail;
|
||||
|
||||
ip = trampoline + size;
|
||||
memcpy(ip, retq, RET_SIZE);
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE);
|
||||
else
|
||||
memcpy(ip, retq, sizeof(retq));
|
||||
|
||||
/* No need to test direct calls on created trampolines */
|
||||
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
|
||||
|
@ -426,10 +426,12 @@ void __init do_early_exception(struct pt_regs *regs, int trapnr)
|
||||
|
||||
/* Don't add a printk in there. printk relies on the PDA which is not initialized
|
||||
yet. */
|
||||
static void __init clear_bss(void)
|
||||
void __init clear_bss(void)
|
||||
{
|
||||
memset(__bss_start, 0,
|
||||
(unsigned long) __bss_stop - (unsigned long) __bss_start);
|
||||
memset(__brk_base, 0,
|
||||
(unsigned long) __brk_limit - (unsigned long) __brk_base);
|
||||
}
|
||||
|
||||
static unsigned long get_cmd_line_ptr(void)
|
||||
|
@ -389,6 +389,8 @@ SYM_CODE_START_NOALIGN(vc_boot_ghcb)
|
||||
UNWIND_HINT_IRET_REGS offset=8
|
||||
ENDBR
|
||||
|
||||
ANNOTATE_UNRET_END
|
||||
|
||||
/* Build pt_regs */
|
||||
PUSH_AND_CLEAR_REGS
|
||||
|
||||
@ -448,6 +450,7 @@ SYM_CODE_END(early_idt_handler_array)
|
||||
|
||||
SYM_CODE_START_LOCAL(early_idt_handler_common)
|
||||
UNWIND_HINT_IRET_REGS offset=16
|
||||
ANNOTATE_UNRET_END
|
||||
/*
|
||||
* The stack is the hardware frame, an error code or zero, and the
|
||||
* vector number.
|
||||
@ -497,6 +500,8 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
|
||||
UNWIND_HINT_IRET_REGS offset=8
|
||||
ENDBR
|
||||
|
||||
ANNOTATE_UNRET_END
|
||||
|
||||
/* Build pt_regs */
|
||||
PUSH_AND_CLEAR_REGS
|
||||
|
||||
|
@ -253,7 +253,7 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
{
|
||||
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
|
||||
*para = NULL, *orc = NULL, *orc_ip = NULL,
|
||||
*retpolines = NULL, *ibt_endbr = NULL;
|
||||
*retpolines = NULL, *returns = NULL, *ibt_endbr = NULL;
|
||||
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
||||
|
||||
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
|
||||
@ -271,6 +271,8 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
orc_ip = s;
|
||||
if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
|
||||
retpolines = s;
|
||||
if (!strcmp(".return_sites", secstrings + s->sh_name))
|
||||
returns = s;
|
||||
if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
|
||||
ibt_endbr = s;
|
||||
}
|
||||
@ -287,6 +289,10 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
void *rseg = (void *)retpolines->sh_addr;
|
||||
apply_retpolines(rseg, rseg + retpolines->sh_size);
|
||||
}
|
||||
if (returns) {
|
||||
void *rseg = (void *)returns->sh_addr;
|
||||
apply_returns(rseg, rseg + returns->sh_size);
|
||||
}
|
||||
if (alt) {
|
||||
/* patch .altinstructions */
|
||||
void *aseg = (void *)alt->sh_addr;
|
||||
|
@ -600,7 +600,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
||||
}
|
||||
|
||||
if (updmsr)
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
||||
write_spec_ctrl_current(msr, false);
|
||||
}
|
||||
|
||||
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
||||
|
@ -7,10 +7,12 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
/*
|
||||
* Must be relocatable PIC code callable as a C function
|
||||
* Must be relocatable PIC code callable as a C function, in particular
|
||||
* there must be a plain RET and not jump to return thunk.
|
||||
*/
|
||||
|
||||
#define PTR(x) (x << 2)
|
||||
@ -91,7 +93,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
|
||||
movl %edi, %eax
|
||||
addl $(identity_mapped - relocate_kernel), %eax
|
||||
pushl %eax
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(relocate_kernel)
|
||||
|
||||
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
||||
@ -159,12 +163,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
||||
xorl %edx, %edx
|
||||
xorl %esi, %esi
|
||||
xorl %ebp, %ebp
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
1:
|
||||
popl %edx
|
||||
movl CP_PA_SWAP_PAGE(%edi), %esp
|
||||
addl $PAGE_SIZE, %esp
|
||||
2:
|
||||
ANNOTATE_RETPOLINE_SAFE
|
||||
call *%edx
|
||||
|
||||
/* get the re-entry point of the peer system */
|
||||
@ -190,7 +197,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
||||
movl %edi, %eax
|
||||
addl $(virtual_mapped - relocate_kernel), %eax
|
||||
pushl %eax
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(identity_mapped)
|
||||
|
||||
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||
@ -208,7 +217,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(virtual_mapped)
|
||||
|
||||
/* Do the copies */
|
||||
@ -271,7 +282,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
|
||||
popl %edi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(swap_pages)
|
||||
|
||||
.globl kexec_control_code_size
|
||||
|
@ -13,7 +13,8 @@
|
||||
#include <asm/unwind_hints.h>
|
||||
|
||||
/*
|
||||
* Must be relocatable PIC code callable as a C function
|
||||
* Must be relocatable PIC code callable as a C function, in particular
|
||||
* there must be a plain RET and not jump to return thunk.
|
||||
*/
|
||||
|
||||
#define PTR(x) (x << 3)
|
||||
@ -105,7 +106,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
|
||||
/* jump to identity mapped page */
|
||||
addq $(identity_mapped - relocate_kernel), %r8
|
||||
pushq %r8
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(relocate_kernel)
|
||||
|
||||
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
||||
@ -200,7 +203,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
||||
xorl %r14d, %r14d
|
||||
xorl %r15d, %r15d
|
||||
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
|
||||
1:
|
||||
popq %rdx
|
||||
@ -219,7 +224,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
||||
call swap_pages
|
||||
movq $virtual_mapped, %rax
|
||||
pushq %rax
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(identity_mapped)
|
||||
|
||||
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||
@ -241,7 +248,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||
popq %r12
|
||||
popq %rbp
|
||||
popq %rbx
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(virtual_mapped)
|
||||
|
||||
/* Do the copies */
|
||||
@ -298,7 +307,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
|
||||
lea PAGE_SIZE(%rax), %rsi
|
||||
jmp 0b
|
||||
3:
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(swap_pages)
|
||||
|
||||
.globl kexec_control_code_size
|
||||
|
@ -11,6 +11,13 @@ enum insn_type {
|
||||
RET = 3, /* tramp / site cond-tail-call */
|
||||
};
|
||||
|
||||
/*
|
||||
* ud1 %esp, %ecx - a 3 byte #UD that is unique to trampolines, chosen such
|
||||
* that there is no false-positive trampoline identification while also being a
|
||||
* speculation stop.
|
||||
*/
|
||||
static const u8 tramp_ud[] = { 0x0f, 0xb9, 0xcc };
|
||||
|
||||
/*
|
||||
* cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax
|
||||
*/
|
||||
@ -18,7 +25,8 @@ static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
|
||||
|
||||
static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
|
||||
|
||||
static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
|
||||
static void __ref __static_call_transform(void *insn, enum insn_type type,
|
||||
void *func, bool modinit)
|
||||
{
|
||||
const void *emulate = NULL;
|
||||
int size = CALL_INSN_SIZE;
|
||||
@ -43,14 +51,17 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
|
||||
break;
|
||||
|
||||
case RET:
|
||||
code = &retinsn;
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
code = text_gen_insn(JMP32_INSN_OPCODE, insn, &__x86_return_thunk);
|
||||
else
|
||||
code = &retinsn;
|
||||
break;
|
||||
}
|
||||
|
||||
if (memcmp(insn, code, size) == 0)
|
||||
return;
|
||||
|
||||
if (unlikely(system_state == SYSTEM_BOOTING))
|
||||
if (system_state == SYSTEM_BOOTING || modinit)
|
||||
return text_poke_early(insn, code, size);
|
||||
|
||||
text_poke_bp(insn, code, size, emulate);
|
||||
@ -60,7 +71,7 @@ static void __static_call_validate(void *insn, bool tail, bool tramp)
|
||||
{
|
||||
u8 opcode = *(u8 *)insn;
|
||||
|
||||
if (tramp && memcmp(insn+5, "SCT", 3)) {
|
||||
if (tramp && memcmp(insn+5, tramp_ud, 3)) {
|
||||
pr_err("trampoline signature fail");
|
||||
BUG();
|
||||
}
|
||||
@ -104,14 +115,42 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
|
||||
|
||||
if (tramp) {
|
||||
__static_call_validate(tramp, true, true);
|
||||
__static_call_transform(tramp, __sc_insn(!func, true), func);
|
||||
__static_call_transform(tramp, __sc_insn(!func, true), func, false);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
|
||||
__static_call_validate(site, tail, false);
|
||||
__static_call_transform(site, __sc_insn(!func, tail), func);
|
||||
__static_call_transform(site, __sc_insn(!func, tail), func, false);
|
||||
}
|
||||
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_static_call_transform);
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
/*
|
||||
* This is called by apply_returns() to fix up static call trampolines,
|
||||
* specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as
|
||||
* having a return trampoline.
|
||||
*
|
||||
* The problem is that static_call() is available before determining
|
||||
* X86_FEATURE_RETHUNK and, by implication, running alternatives.
|
||||
*
|
||||
* This means that __static_call_transform() above can have overwritten the
|
||||
* return trampoline and we now need to fix things up to be consistent.
|
||||
*/
|
||||
bool __static_call_fixup(void *tramp, u8 op, void *dest)
|
||||
{
|
||||
if (memcmp(tramp+5, tramp_ud, 3)) {
|
||||
/* Not a trampoline site, not our problem. */
|
||||
return false;
|
||||
}
|
||||
|
||||
mutex_lock(&text_mutex);
|
||||
if (op == RET_INSN_OPCODE || dest == &__x86_return_thunk)
|
||||
__static_call_transform(tramp, RET, NULL, true);
|
||||
mutex_unlock(&text_mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
@ -141,7 +141,7 @@ SECTIONS
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
__indirect_thunk_start = .;
|
||||
*(.text.__x86.indirect_thunk)
|
||||
*(.text.__x86.*)
|
||||
__indirect_thunk_end = .;
|
||||
#endif
|
||||
} :text =0xcccc
|
||||
@ -283,6 +283,13 @@ SECTIONS
|
||||
*(.retpoline_sites)
|
||||
__retpoline_sites_end = .;
|
||||
}
|
||||
|
||||
. = ALIGN(8);
|
||||
.return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
|
||||
__return_sites = .;
|
||||
*(.return_sites)
|
||||
__return_sites_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
@ -385,7 +392,7 @@ SECTIONS
|
||||
__end_of_kernel_reserve = .;
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.brk (NOLOAD) : AT(ADDR(.brk) - LOAD_OFFSET) {
|
||||
.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
|
||||
__brk_base = .;
|
||||
. += 64 * 1024; /* 64k alignment slop space */
|
||||
*(.bss..brk) /* areas brk users have reserved */
|
||||
|
@ -325,13 +325,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
||||
#define FOP_RET(name) \
|
||||
__FOP_RET(#name)
|
||||
|
||||
#define FOP_START(op) \
|
||||
#define __FOP_START(op, align) \
|
||||
extern void em_##op(struct fastop *fake); \
|
||||
asm(".pushsection .text, \"ax\" \n\t" \
|
||||
".global em_" #op " \n\t" \
|
||||
".align " __stringify(FASTOP_SIZE) " \n\t" \
|
||||
".align " __stringify(align) " \n\t" \
|
||||
"em_" #op ":\n\t"
|
||||
|
||||
#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
|
||||
|
||||
#define FOP_END \
|
||||
".popsection")
|
||||
|
||||
@ -435,16 +437,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
||||
/*
|
||||
* Depending on .config the SETcc functions look like:
|
||||
*
|
||||
* ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT]
|
||||
* SETcc %al [3 bytes]
|
||||
* RET [1 byte]
|
||||
* INT3 [1 byte; CONFIG_SLS]
|
||||
*
|
||||
* Which gives possible sizes 4, 5, 8 or 9. When rounded up to the
|
||||
* next power-of-two alignment they become 4, 8 or 16 resp.
|
||||
* ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT]
|
||||
* SETcc %al [3 bytes]
|
||||
* RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK]
|
||||
* INT3 [1 byte; CONFIG_SLS]
|
||||
*/
|
||||
#define SETCC_LENGTH (ENDBR_INSN_SIZE + 4 + IS_ENABLED(CONFIG_SLS))
|
||||
#define SETCC_ALIGN (4 << IS_ENABLED(CONFIG_SLS) << HAS_KERNEL_IBT)
|
||||
#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETHUNK)) + \
|
||||
IS_ENABLED(CONFIG_SLS))
|
||||
#define SETCC_LENGTH (ENDBR_INSN_SIZE + 3 + RET_LENGTH)
|
||||
#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1))
|
||||
static_assert(SETCC_LENGTH <= SETCC_ALIGN);
|
||||
|
||||
#define FOP_SETCC(op) \
|
||||
@ -453,9 +454,10 @@ static_assert(SETCC_LENGTH <= SETCC_ALIGN);
|
||||
#op ": \n\t" \
|
||||
ASM_ENDBR \
|
||||
#op " %al \n\t" \
|
||||
__FOP_RET(#op)
|
||||
__FOP_RET(#op) \
|
||||
".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
|
||||
|
||||
FOP_START(setcc)
|
||||
__FOP_START(setcc, SETCC_ALIGN)
|
||||
FOP_SETCC(seto)
|
||||
FOP_SETCC(setno)
|
||||
FOP_SETCC(setc)
|
||||
|
@ -110,6 +110,15 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
mov %r15, VCPU_R15(%_ASM_AX)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
|
||||
* untrained as soon as we exit the VM and are back to the
|
||||
* kernel. This should be done before re-enabling interrupts
|
||||
* because interrupt handlers won't sanitize 'ret' if the return is
|
||||
* from the kernel.
|
||||
*/
|
||||
UNTRAIN_RET
|
||||
|
||||
/*
|
||||
* Clear all general purpose registers except RSP and RAX to prevent
|
||||
* speculative use of the guest's values, even those that are reloaded
|
||||
@ -190,6 +199,15 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
|
||||
* untrained as soon as we exit the VM and are back to the
|
||||
* kernel. This should be done before re-enabling interrupts
|
||||
* because interrupt handlers won't sanitize RET if the return is
|
||||
* from the kernel.
|
||||
*/
|
||||
UNTRAIN_RET
|
||||
|
||||
pop %_ASM_BX
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -4,8 +4,8 @@
|
||||
|
||||
#include <asm/vmx.h>
|
||||
|
||||
#include "lapic.h"
|
||||
#include "x86.h"
|
||||
#include "../lapic.h"
|
||||
#include "../x86.h"
|
||||
|
||||
extern bool __read_mostly enable_vpid;
|
||||
extern bool __read_mostly flexpriority_enabled;
|
||||
|
@ -3087,7 +3087,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
|
||||
vmx->loaded_vmcs->launched);
|
||||
__vmx_vcpu_run_flags(vmx));
|
||||
|
||||
if (vmx->msr_autoload.host.nr)
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
||||
|
8
arch/x86/kvm/vmx/run_flags.h
Normal file
8
arch/x86/kvm/vmx/run_flags.h
Normal file
@ -0,0 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
|
||||
#define __KVM_X86_VMX_RUN_FLAGS_H
|
||||
|
||||
#define VMX_RUN_VMRESUME (1 << 0)
|
||||
#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
|
||||
|
||||
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */
|
@ -1,10 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <asm/kvm_vcpu_regs.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/segment.h>
|
||||
#include "run_flags.h"
|
||||
|
||||
#define WORD_SIZE (BITS_PER_LONG / 8)
|
||||
|
||||
@ -30,73 +33,12 @@
|
||||
|
||||
.section .noinstr.text, "ax"
|
||||
|
||||
/**
|
||||
* vmx_vmenter - VM-Enter the current loaded VMCS
|
||||
*
|
||||
* %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
|
||||
*
|
||||
* Returns:
|
||||
* %RFLAGS.CF is set on VM-Fail Invalid
|
||||
* %RFLAGS.ZF is set on VM-Fail Valid
|
||||
* %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
|
||||
*
|
||||
* Note that VMRESUME/VMLAUNCH fall-through and return directly if
|
||||
* they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
|
||||
* to vmx_vmexit.
|
||||
*/
|
||||
SYM_FUNC_START_LOCAL(vmx_vmenter)
|
||||
/* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
|
||||
je 2f
|
||||
|
||||
1: vmresume
|
||||
RET
|
||||
|
||||
2: vmlaunch
|
||||
RET
|
||||
|
||||
3: cmpb $0, kvm_rebooting
|
||||
je 4f
|
||||
RET
|
||||
4: ud2
|
||||
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
|
||||
SYM_FUNC_END(vmx_vmenter)
|
||||
|
||||
/**
|
||||
* vmx_vmexit - Handle a VMX VM-Exit
|
||||
*
|
||||
* Returns:
|
||||
* %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
|
||||
*
|
||||
* This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump
|
||||
* here after hardware loads the host's state, i.e. this is the destination
|
||||
* referred to by VMCS.HOST_RIP.
|
||||
*/
|
||||
SYM_FUNC_START(vmx_vmexit)
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
|
||||
/* Preserve guest's RAX, it's used to stuff the RSB. */
|
||||
push %_ASM_AX
|
||||
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
|
||||
/* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
|
||||
or $1, %_ASM_AX
|
||||
|
||||
pop %_ASM_AX
|
||||
.Lvmexit_skip_rsb:
|
||||
#endif
|
||||
RET
|
||||
SYM_FUNC_END(vmx_vmexit)
|
||||
|
||||
/**
|
||||
* __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
|
||||
* @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
|
||||
* @vmx: struct vcpu_vmx *
|
||||
* @regs: unsigned long * (to guest registers)
|
||||
* @launched: %true if the VMCS has been launched
|
||||
* @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
|
||||
* VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
|
||||
*
|
||||
* Returns:
|
||||
* 0 on VM-Exit, 1 on VM-Fail
|
||||
@ -115,24 +57,56 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||
#endif
|
||||
push %_ASM_BX
|
||||
|
||||
/* Save @vmx for SPEC_CTRL handling */
|
||||
push %_ASM_ARG1
|
||||
|
||||
/* Save @flags for SPEC_CTRL handling */
|
||||
push %_ASM_ARG3
|
||||
|
||||
/*
|
||||
* Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
|
||||
* @regs is needed after VM-Exit to save the guest's register values.
|
||||
*/
|
||||
push %_ASM_ARG2
|
||||
|
||||
/* Copy @launched to BL, _ASM_ARG3 is volatile. */
|
||||
/* Copy @flags to BL, _ASM_ARG3 is volatile. */
|
||||
mov %_ASM_ARG3B, %bl
|
||||
|
||||
/* Adjust RSP to account for the CALL to vmx_vmenter(). */
|
||||
lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
|
||||
lea (%_ASM_SP), %_ASM_ARG2
|
||||
call vmx_update_host_rsp
|
||||
|
||||
ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL
|
||||
|
||||
/*
|
||||
* SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
|
||||
* host's, write the MSR.
|
||||
*
|
||||
* IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
|
||||
* there must not be any returns or indirect branches between this code
|
||||
* and vmentry.
|
||||
*/
|
||||
mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
|
||||
movl VMX_spec_ctrl(%_ASM_DI), %edi
|
||||
movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
|
||||
cmp %edi, %esi
|
||||
je .Lspec_ctrl_done
|
||||
mov $MSR_IA32_SPEC_CTRL, %ecx
|
||||
xor %edx, %edx
|
||||
mov %edi, %eax
|
||||
wrmsr
|
||||
|
||||
.Lspec_ctrl_done:
|
||||
|
||||
/*
|
||||
* Since vmentry is serializing on affected CPUs, there's no need for
|
||||
* an LFENCE to stop speculation from skipping the wrmsr.
|
||||
*/
|
||||
|
||||
/* Load @regs to RAX. */
|
||||
mov (%_ASM_SP), %_ASM_AX
|
||||
|
||||
/* Check if vmlaunch or vmresume is needed */
|
||||
testb %bl, %bl
|
||||
testb $VMX_RUN_VMRESUME, %bl
|
||||
|
||||
/* Load guest registers. Don't clobber flags. */
|
||||
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
|
||||
@ -154,11 +128,37 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||
/* Load guest RAX. This kills the @regs pointer! */
|
||||
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
|
||||
|
||||
/* Enter guest mode */
|
||||
call vmx_vmenter
|
||||
/* Check EFLAGS.ZF from 'testb' above */
|
||||
jz .Lvmlaunch
|
||||
|
||||
/* Jump on VM-Fail. */
|
||||
jbe 2f
|
||||
/*
|
||||
* After a successful VMRESUME/VMLAUNCH, control flow "magically"
|
||||
* resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting.
|
||||
* So this isn't a typical function and objtool needs to be told to
|
||||
* save the unwind state here and restore it below.
|
||||
*/
|
||||
UNWIND_HINT_SAVE
|
||||
|
||||
/*
|
||||
* If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
|
||||
* the 'vmx_vmexit' label below.
|
||||
*/
|
||||
.Lvmresume:
|
||||
vmresume
|
||||
jmp .Lvmfail
|
||||
|
||||
.Lvmlaunch:
|
||||
vmlaunch
|
||||
jmp .Lvmfail
|
||||
|
||||
_ASM_EXTABLE(.Lvmresume, .Lfixup)
|
||||
_ASM_EXTABLE(.Lvmlaunch, .Lfixup)
|
||||
|
||||
SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
|
||||
|
||||
/* Restore unwind state from before the VMRESUME/VMLAUNCH. */
|
||||
UNWIND_HINT_RESTORE
|
||||
ENDBR
|
||||
|
||||
/* Temporarily save guest's RAX. */
|
||||
push %_ASM_AX
|
||||
@ -185,21 +185,23 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||
mov %r15, VCPU_R15(%_ASM_AX)
|
||||
#endif
|
||||
|
||||
/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
|
||||
xor %eax, %eax
|
||||
/* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
|
||||
xor %ebx, %ebx
|
||||
|
||||
.Lclear_regs:
|
||||
/*
|
||||
* Clear all general purpose registers except RSP and RAX to prevent
|
||||
* Clear all general purpose registers except RSP and RBX to prevent
|
||||
* speculative use of the guest's values, even those that are reloaded
|
||||
* via the stack. In theory, an L1 cache miss when restoring registers
|
||||
* could lead to speculative execution with the guest's values.
|
||||
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
|
||||
* free. RSP and RAX are exempt as RSP is restored by hardware during
|
||||
* VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
|
||||
* VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
|
||||
* value.
|
||||
*/
|
||||
1: xor %ecx, %ecx
|
||||
xor %eax, %eax
|
||||
xor %ecx, %ecx
|
||||
xor %edx, %edx
|
||||
xor %ebx, %ebx
|
||||
xor %ebp, %ebp
|
||||
xor %esi, %esi
|
||||
xor %edi, %edi
|
||||
@ -216,8 +218,30 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||
|
||||
/* "POP" @regs. */
|
||||
add $WORD_SIZE, %_ASM_SP
|
||||
pop %_ASM_BX
|
||||
|
||||
/*
|
||||
* IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
|
||||
* the first unbalanced RET after vmexit!
|
||||
*
|
||||
* For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
|
||||
* entries and (in some cases) RSB underflow.
|
||||
*
|
||||
* eIBRS has its own protection against poisoned RSB, so it doesn't
|
||||
* need the RSB filling sequence. But it does need to be enabled
|
||||
* before the first unbalanced RET.
|
||||
*/
|
||||
|
||||
FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
|
||||
|
||||
pop %_ASM_ARG2 /* @flags */
|
||||
pop %_ASM_ARG1 /* @vmx */
|
||||
|
||||
call vmx_spec_ctrl_restore_host
|
||||
|
||||
/* Put return value in AX */
|
||||
mov %_ASM_BX, %_ASM_AX
|
||||
|
||||
pop %_ASM_BX
|
||||
#ifdef CONFIG_X86_64
|
||||
pop %r12
|
||||
pop %r13
|
||||
@ -230,9 +254,15 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||
pop %_ASM_BP
|
||||
RET
|
||||
|
||||
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
|
||||
2: mov $1, %eax
|
||||
jmp 1b
|
||||
.Lfixup:
|
||||
cmpb $0, kvm_rebooting
|
||||
jne .Lvmfail
|
||||
ud2
|
||||
.Lvmfail:
|
||||
/* VM-Fail: set return value to 1 */
|
||||
mov $1, %_ASM_BX
|
||||
jmp .Lclear_regs
|
||||
|
||||
SYM_FUNC_END(__vmx_vcpu_run)
|
||||
|
||||
|
||||
|
@ -383,9 +383,9 @@ static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
|
||||
if (!vmx->disable_fb_clear)
|
||||
return;
|
||||
|
||||
rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
|
||||
msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
|
||||
msr |= FB_CLEAR_DIS;
|
||||
wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
|
||||
native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
|
||||
/* Cache the MSR value to avoid reading it later */
|
||||
vmx->msr_ia32_mcu_opt_ctrl = msr;
|
||||
}
|
||||
@ -396,7 +396,7 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
|
||||
return;
|
||||
|
||||
vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
|
||||
wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
|
||||
native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
|
||||
}
|
||||
|
||||
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
|
||||
@ -839,6 +839,24 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
|
||||
MSR_IA32_SPEC_CTRL);
|
||||
}
|
||||
|
||||
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
|
||||
{
|
||||
unsigned int flags = 0;
|
||||
|
||||
if (vmx->loaded_vmcs->launched)
|
||||
flags |= VMX_RUN_VMRESUME;
|
||||
|
||||
/*
|
||||
* If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
|
||||
* to change it directly without causing a vmexit. In that case read
|
||||
* it after vmexit and store it in vmx->spec_ctrl.
|
||||
*/
|
||||
if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
|
||||
flags |= VMX_RUN_SAVE_SPEC_CTRL;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
|
||||
unsigned long entry, unsigned long exit)
|
||||
{
|
||||
@ -6813,6 +6831,31 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
|
||||
}
|
||||
}
|
||||
|
||||
void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
|
||||
unsigned int flags)
|
||||
{
|
||||
u64 hostval = this_cpu_read(x86_spec_ctrl_current);
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
return;
|
||||
|
||||
if (flags & VMX_RUN_SAVE_SPEC_CTRL)
|
||||
vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
|
||||
|
||||
/*
|
||||
* If the guest/host SPEC_CTRL values differ, restore the host value.
|
||||
*
|
||||
* For legacy IBRS, the IBRS bit always needs to be written after
|
||||
* transitioning from a less privileged predictor mode, regardless of
|
||||
* whether the guest/host values differ.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
|
||||
vmx->spec_ctrl != hostval)
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
|
||||
|
||||
barrier_nospec();
|
||||
}
|
||||
|
||||
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (to_vmx(vcpu)->exit_reason.basic) {
|
||||
@ -6826,7 +6869,8 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
struct vcpu_vmx *vmx)
|
||||
struct vcpu_vmx *vmx,
|
||||
unsigned long flags)
|
||||
{
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
@ -6845,7 +6889,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
native_write_cr2(vcpu->arch.cr2);
|
||||
|
||||
vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
|
||||
vmx->loaded_vmcs->launched);
|
||||
flags);
|
||||
|
||||
vcpu->arch.cr2 = native_read_cr2();
|
||||
|
||||
@ -6944,36 +6988,8 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_wait_lapic_expire(vcpu);
|
||||
|
||||
/*
|
||||
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
||||
* it's non-zero. Since vmentry is serialising on affected CPUs, there
|
||||
* is no need to worry about the conditional branch over the wrmsr
|
||||
* being speculatively taken.
|
||||
*/
|
||||
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
||||
|
||||
/* The actual VMENTER/EXIT is in the .noinstr.text section. */
|
||||
vmx_vcpu_enter_exit(vcpu, vmx);
|
||||
|
||||
/*
|
||||
* We do not use IBRS in the kernel. If this vCPU has used the
|
||||
* SPEC_CTRL MSR it may have left it on; save the value and
|
||||
* turn it off. This is much more efficient than blindly adding
|
||||
* it to the atomic save/restore list. Especially as the former
|
||||
* (Saving guest MSRs on vmexit) doesn't even exist in KVM.
|
||||
*
|
||||
* For non-nested case:
|
||||
* If the L01 MSR bitmap does not intercept the MSR, then we need to
|
||||
* save it.
|
||||
*
|
||||
* For nested case:
|
||||
* If the L02 MSR bitmap does not intercept the MSR, then we need to
|
||||
* save it.
|
||||
*/
|
||||
if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
|
||||
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||
|
||||
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
|
||||
vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
|
||||
|
||||
/* All fields are clean at this point */
|
||||
if (static_branch_unlikely(&enable_evmcs)) {
|
||||
|
@ -8,11 +8,12 @@
|
||||
#include <asm/intel_pt.h>
|
||||
|
||||
#include "capabilities.h"
|
||||
#include "kvm_cache_regs.h"
|
||||
#include "../kvm_cache_regs.h"
|
||||
#include "posted_intr.h"
|
||||
#include "vmcs.h"
|
||||
#include "vmx_ops.h"
|
||||
#include "cpuid.h"
|
||||
#include "../cpuid.h"
|
||||
#include "run_flags.h"
|
||||
|
||||
#define MSR_TYPE_R 1
|
||||
#define MSR_TYPE_W 2
|
||||
@ -404,7 +405,10 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
||||
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
|
||||
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
|
||||
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
|
||||
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
|
||||
void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
|
||||
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
|
||||
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
|
||||
unsigned int flags);
|
||||
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
|
||||
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include "evmcs.h"
|
||||
#include "vmcs.h"
|
||||
#include "x86.h"
|
||||
#include "../x86.h"
|
||||
|
||||
asmlinkage void vmread_error(unsigned long field, bool fault);
|
||||
__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
|
||||
|
@ -12631,9 +12631,9 @@ void kvm_arch_end_assignment(struct kvm *kvm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
|
||||
|
||||
bool kvm_arch_has_assigned_device(struct kvm *kvm)
|
||||
bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
|
||||
{
|
||||
return atomic_read(&kvm->arch.assigned_device_count);
|
||||
return arch_atomic_read(&kvm->arch.assigned_device_count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
|
||||
|
||||
|
@ -39,7 +39,7 @@ SYM_FUNC_START(__memmove)
|
||||
/* FSRM implies ERMS => no length checks, do the copy directly */
|
||||
.Lmemmove_begin_forward:
|
||||
ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
|
||||
ALTERNATIVE "", __stringify(movq %rdx, %rcx; rep movsb; RET), X86_FEATURE_ERMS
|
||||
ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS
|
||||
|
||||
/*
|
||||
* movsq instruction have many startup latency
|
||||
@ -205,6 +205,11 @@ SYM_FUNC_START(__memmove)
|
||||
movb %r11b, (%rdi)
|
||||
13:
|
||||
RET
|
||||
|
||||
.Lmemmove_erms:
|
||||
movq %rdx, %rcx
|
||||
rep movsb
|
||||
RET
|
||||
SYM_FUNC_END(__memmove)
|
||||
EXPORT_SYMBOL(__memmove)
|
||||
|
||||
|
@ -33,9 +33,9 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
|
||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
||||
__stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
|
||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE
|
||||
ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
|
||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
|
||||
__stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
|
||||
|
||||
.endm
|
||||
|
||||
@ -67,3 +67,76 @@ SYM_CODE_END(__x86_indirect_thunk_array)
|
||||
#define GEN(reg) EXPORT_THUNK(reg)
|
||||
#include <asm/GEN-for-each-reg.h>
|
||||
#undef GEN
|
||||
|
||||
/*
|
||||
* This function name is magical and is used by -mfunction-return=thunk-extern
|
||||
* for the compiler to generate JMPs to it.
|
||||
*/
|
||||
#ifdef CONFIG_RETHUNK
|
||||
|
||||
.section .text.__x86.return_thunk
|
||||
|
||||
/*
|
||||
* Safety details here pertain to the AMD Zen{1,2} microarchitecture:
|
||||
* 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
|
||||
* alignment within the BTB.
|
||||
* 2) The instruction at zen_untrain_ret must contain, and not
|
||||
* end with, the 0xc3 byte of the RET.
|
||||
* 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
|
||||
* from re-poisioning the BTB prediction.
|
||||
*/
|
||||
.align 64
|
||||
.skip 63, 0xcc
|
||||
SYM_FUNC_START_NOALIGN(zen_untrain_ret);
|
||||
|
||||
/*
|
||||
* As executed from zen_untrain_ret, this is:
|
||||
*
|
||||
* TEST $0xcc, %bl
|
||||
* LFENCE
|
||||
* JMP __x86_return_thunk
|
||||
*
|
||||
* Executing the TEST instruction has a side effect of evicting any BTB
|
||||
* prediction (potentially attacker controlled) attached to the RET, as
|
||||
* __x86_return_thunk + 1 isn't an instruction boundary at the moment.
|
||||
*/
|
||||
.byte 0xf6
|
||||
|
||||
/*
|
||||
* As executed from __x86_return_thunk, this is a plain RET.
|
||||
*
|
||||
* As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
|
||||
*
|
||||
* We subsequently jump backwards and architecturally execute the RET.
|
||||
* This creates a correct BTB prediction (type=ret), but in the
|
||||
* meantime we suffer Straight Line Speculation (because the type was
|
||||
* no branch) which is halted by the INT3.
|
||||
*
|
||||
* With SMT enabled and STIBP active, a sibling thread cannot poison
|
||||
* RET's prediction to a type of its choice, but can evict the
|
||||
* prediction due to competitive sharing. If the prediction is
|
||||
* evicted, __x86_return_thunk will suffer Straight Line Speculation
|
||||
* which will be contained safely by the INT3.
|
||||
*/
|
||||
SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL)
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
|
||||
/*
|
||||
* Ensure the TEST decoding / BTB invalidation is complete.
|
||||
*/
|
||||
lfence
|
||||
|
||||
/*
|
||||
* Jump back and execute the RET in the middle of the TEST instruction.
|
||||
* INT3 is for SLS protection.
|
||||
*/
|
||||
jmp __x86_return_thunk
|
||||
int3
|
||||
SYM_FUNC_END(zen_untrain_ret)
|
||||
__EXPORT_THUNK(zen_untrain_ret)
|
||||
|
||||
EXPORT_SYMBOL(__x86_return_thunk)
|
||||
|
||||
#endif /* CONFIG_RETHUNK */
|
||||
|
@ -65,7 +65,10 @@ SYM_FUNC_START(sme_encrypt_execute)
|
||||
movq %rbp, %rsp /* Restore original stack pointer */
|
||||
pop %rbp
|
||||
|
||||
RET
|
||||
/* Offset to __x86_return_thunk would be wrong here */
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(sme_encrypt_execute)
|
||||
|
||||
SYM_FUNC_START(__enc_copy)
|
||||
@ -151,6 +154,9 @@ SYM_FUNC_START(__enc_copy)
|
||||
pop %r12
|
||||
pop %r15
|
||||
|
||||
RET
|
||||
/* Offset to __x86_return_thunk would be wrong here */
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
.L__enc_copy_end:
|
||||
SYM_FUNC_END(__enc_copy)
|
||||
|
@ -412,16 +412,30 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||
EMIT_LFENCE();
|
||||
EMIT2(0xFF, 0xE0 + reg);
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
|
||||
OPTIMIZER_HIDE_VAR(reg);
|
||||
emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
|
||||
} else
|
||||
#endif
|
||||
EMIT2(0xFF, 0xE0 + reg);
|
||||
} else {
|
||||
EMIT2(0xFF, 0xE0 + reg);
|
||||
}
|
||||
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void emit_return(u8 **pprog, u8 *ip)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
||||
emit_jump(&prog, &__x86_return_thunk, ip);
|
||||
} else {
|
||||
EMIT1(0xC3); /* ret */
|
||||
if (IS_ENABLED(CONFIG_SLS))
|
||||
EMIT1(0xCC); /* int3 */
|
||||
}
|
||||
|
||||
*pprog = prog;
|
||||
}
|
||||
@ -1686,7 +1700,7 @@ emit_jmp:
|
||||
ctx->cleanup_addr = proglen;
|
||||
pop_callee_regs(&prog, callee_regs_used);
|
||||
EMIT1(0xC9); /* leave */
|
||||
EMIT1(0xC3); /* ret */
|
||||
emit_return(&prog, image + addrs[i - 1] + (prog - temp));
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -2197,7 +2211,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
if (flags & BPF_TRAMP_F_SKIP_FRAME)
|
||||
/* skip our return address and return to parent */
|
||||
EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
|
||||
EMIT1(0xC3); /* ret */
|
||||
emit_return(&prog, prog);
|
||||
/* Make sure the trampoline generation logic doesn't overflow */
|
||||
if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
|
||||
ret = -EFAULT;
|
||||
|
@ -1183,15 +1183,19 @@ static void __init xen_domu_set_legacy_features(void)
|
||||
extern void early_xen_iret_patch(void);
|
||||
|
||||
/* First C function to be called on Xen boot */
|
||||
asmlinkage __visible void __init xen_start_kernel(void)
|
||||
asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
|
||||
{
|
||||
struct physdev_set_iopl set_iopl;
|
||||
unsigned long initrd_start = 0;
|
||||
int rc;
|
||||
|
||||
if (!xen_start_info)
|
||||
if (!si)
|
||||
return;
|
||||
|
||||
clear_bss();
|
||||
|
||||
xen_start_info = si;
|
||||
|
||||
__text_gen_insn(&early_xen_iret_patch,
|
||||
JMP32_INSN_OPCODE, &early_xen_iret_patch, &xen_iret,
|
||||
JMP32_INSN_SIZE);
|
||||
|
@ -918,7 +918,7 @@ void xen_enable_sysenter(void)
|
||||
if (!boot_cpu_has(sysenter_feature))
|
||||
return;
|
||||
|
||||
ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
|
||||
ret = register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat);
|
||||
if(ret != 0)
|
||||
setup_clear_cpu_cap(sysenter_feature);
|
||||
}
|
||||
@ -927,7 +927,7 @@ void xen_enable_syscall(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
|
||||
ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
|
||||
if (ret != 0) {
|
||||
printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
|
||||
/* Pretty fatal; 64-bit userspace has no other
|
||||
@ -936,7 +936,7 @@ void xen_enable_syscall(void)
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
|
||||
ret = register_callback(CALLBACKTYPE_syscall32,
|
||||
xen_syscall32_target);
|
||||
xen_entry_SYSCALL_compat);
|
||||
if (ret != 0)
|
||||
setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ SYM_FUNC_END(xen_read_cr2_direct);
|
||||
|
||||
.macro xen_pv_trap name
|
||||
SYM_CODE_START(xen_\name)
|
||||
UNWIND_HINT_EMPTY
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
pop %rcx
|
||||
pop %r11
|
||||
@ -234,8 +234,8 @@ SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
|
||||
*/
|
||||
|
||||
/* Normal 64-bit system call target */
|
||||
SYM_CODE_START(xen_syscall_target)
|
||||
UNWIND_HINT_EMPTY
|
||||
SYM_CODE_START(xen_entry_SYSCALL_64)
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
popq %rcx
|
||||
popq %r11
|
||||
@ -249,13 +249,13 @@ SYM_CODE_START(xen_syscall_target)
|
||||
movq $__USER_CS, 1*8(%rsp)
|
||||
|
||||
jmp entry_SYSCALL_64_after_hwframe
|
||||
SYM_CODE_END(xen_syscall_target)
|
||||
SYM_CODE_END(xen_entry_SYSCALL_64)
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
|
||||
/* 32-bit compat syscall target */
|
||||
SYM_CODE_START(xen_syscall32_target)
|
||||
UNWIND_HINT_EMPTY
|
||||
SYM_CODE_START(xen_entry_SYSCALL_compat)
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
popq %rcx
|
||||
popq %r11
|
||||
@ -269,11 +269,11 @@ SYM_CODE_START(xen_syscall32_target)
|
||||
movq $__USER32_CS, 1*8(%rsp)
|
||||
|
||||
jmp entry_SYSCALL_compat_after_hwframe
|
||||
SYM_CODE_END(xen_syscall32_target)
|
||||
SYM_CODE_END(xen_entry_SYSCALL_compat)
|
||||
|
||||
/* 32-bit compat sysenter target */
|
||||
SYM_CODE_START(xen_sysenter_target)
|
||||
UNWIND_HINT_EMPTY
|
||||
SYM_CODE_START(xen_entry_SYSENTER_compat)
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
/*
|
||||
* NB: Xen is polite and clears TF from EFLAGS for us. This means
|
||||
@ -291,19 +291,19 @@ SYM_CODE_START(xen_sysenter_target)
|
||||
movq $__USER32_CS, 1*8(%rsp)
|
||||
|
||||
jmp entry_SYSENTER_compat_after_hwframe
|
||||
SYM_CODE_END(xen_sysenter_target)
|
||||
SYM_CODE_END(xen_entry_SYSENTER_compat)
|
||||
|
||||
#else /* !CONFIG_IA32_EMULATION */
|
||||
|
||||
SYM_CODE_START(xen_syscall32_target)
|
||||
SYM_CODE_START(xen_sysenter_target)
|
||||
UNWIND_HINT_EMPTY
|
||||
SYM_CODE_START(xen_entry_SYSCALL_compat)
|
||||
SYM_CODE_START(xen_entry_SYSENTER_compat)
|
||||
UNWIND_HINT_ENTRY
|
||||
ENDBR
|
||||
lea 16(%rsp), %rsp /* strip %rcx, %r11 */
|
||||
mov $-ENOSYS, %rax
|
||||
pushq $0
|
||||
jmp hypercall_iret
|
||||
SYM_CODE_END(xen_sysenter_target)
|
||||
SYM_CODE_END(xen_syscall32_target)
|
||||
SYM_CODE_END(xen_entry_SYSENTER_compat)
|
||||
SYM_CODE_END(xen_entry_SYSCALL_compat)
|
||||
|
||||
#endif /* CONFIG_IA32_EMULATION */
|
||||
|
@ -26,6 +26,7 @@ SYM_CODE_START(hypercall_page)
|
||||
.rept (PAGE_SIZE / 32)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
/*
|
||||
* Xen will write the hypercall page, and sort out ENDBR.
|
||||
@ -48,15 +49,6 @@ SYM_CODE_START(startup_xen)
|
||||
ANNOTATE_NOENDBR
|
||||
cld
|
||||
|
||||
/* Clear .bss */
|
||||
xor %eax,%eax
|
||||
mov $__bss_start, %rdi
|
||||
mov $__bss_stop, %rcx
|
||||
sub %rdi, %rcx
|
||||
shr $3, %rcx
|
||||
rep stosq
|
||||
|
||||
mov %rsi, xen_start_info
|
||||
mov initial_stack(%rip), %rsp
|
||||
|
||||
/* Set up %gs.
|
||||
@ -71,6 +63,7 @@ SYM_CODE_START(startup_xen)
|
||||
cdq
|
||||
wrmsr
|
||||
|
||||
mov %rsi, %rdi
|
||||
call xen_start_kernel
|
||||
SYM_CODE_END(startup_xen)
|
||||
__FINIT
|
||||
|
@ -10,10 +10,10 @@
|
||||
/* These are code, but not functions. Defined in entry.S */
|
||||
extern const char xen_failsafe_callback[];
|
||||
|
||||
void xen_sysenter_target(void);
|
||||
void xen_entry_SYSENTER_compat(void);
|
||||
#ifdef CONFIG_X86_64
|
||||
void xen_syscall_target(void);
|
||||
void xen_syscall32_target(void);
|
||||
void xen_entry_SYSCALL_64(void);
|
||||
void xen_entry_SYSCALL_compat(void);
|
||||
#endif
|
||||
|
||||
extern void *xen_initial_gdt;
|
||||
|
@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(osc_cpc_flexible_adr_space_confirmed);
|
||||
bool osc_sb_native_usb4_support_confirmed;
|
||||
EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
|
||||
|
||||
bool osc_sb_cppc_not_supported;
|
||||
bool osc_sb_cppc2_support_acked;
|
||||
|
||||
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
|
||||
static void acpi_bus_osc_negotiate_platform_control(void)
|
||||
@ -358,11 +358,6 @@ static void acpi_bus_osc_negotiate_platform_control(void)
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI_CPPC_LIB
|
||||
osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] &
|
||||
(OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT));
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Now run _OSC again with query flag clear and with the caps
|
||||
* supported by both the OS and the platform.
|
||||
@ -376,6 +371,10 @@ static void acpi_bus_osc_negotiate_platform_control(void)
|
||||
|
||||
capbuf_ret = context.ret.pointer;
|
||||
if (context.ret.length > OSC_SUPPORT_DWORD) {
|
||||
#ifdef CONFIG_ACPI_CPPC_LIB
|
||||
osc_sb_cppc2_support_acked = capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPCV2_SUPPORT;
|
||||
#endif
|
||||
|
||||
osc_sb_apei_support_acked =
|
||||
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
|
||||
osc_pc_lpi_support_confirmed =
|
||||
|
@ -577,6 +577,19 @@ bool __weak cpc_ffh_supported(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpc_supported_by_cpu() - check if CPPC is supported by CPU
|
||||
*
|
||||
* Check if the architectural support for CPPC is present even
|
||||
* if the _OSC hasn't prescribed it
|
||||
*
|
||||
* Return: true for supported, false for not supported
|
||||
*/
|
||||
bool __weak cpc_supported_by_cpu(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
|
||||
*
|
||||
@ -684,8 +697,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
||||
acpi_status status;
|
||||
int ret = -ENODATA;
|
||||
|
||||
if (osc_sb_cppc_not_supported)
|
||||
return -ENODEV;
|
||||
if (!osc_sb_cppc2_support_acked) {
|
||||
pr_debug("CPPC v2 _OSC not acked\n");
|
||||
if (!cpc_supported_by_cpu())
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Parse the ACPI _CPC table for this CPU. */
|
||||
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user