mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. No conflicts. Adjacent changes: drivers/net/ethernet/intel/igc/igc_main.c06b412589e
("igc: Add lock to safeguard global Qbv variables")d3750076d4
("igc: Add TransmissionOverrun counter") drivers/net/ethernet/microsoft/mana/mana_en.ca7dfeda6fd
("net: mana: Fix MANA VF unload when hardware is unresponsive")a9ca9f9cef
("page_pool: split types and declarations from page_pool.h")92272ec410
("eth: add missing xdp.h includes in drivers") net/mptcp/protocol.h511b90e392
("mptcp: fix disconnect vs accept race")b8dc6d6ce9
("mptcp: fix rcv buffer auto-tuning") tools/testing/selftests/net/mptcp/mptcp_join.shc8c101ae39
("selftests: mptcp: join: fix 'implicit EP' test")03668c65d1
("selftests: mptcp: join: rework detailed report") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
4d016ae42e
@ -82,7 +82,12 @@ Description:
|
||||
whether it resides in persistent capacity, volatile capacity,
|
||||
or the LSA, is made permanently unavailable by whatever means
|
||||
is appropriate for the media type. This functionality requires
|
||||
the device to be not be actively decoding any HPA ranges.
|
||||
the device to be disabled, that is, not actively decoding any
|
||||
HPA ranges. This permits avoiding explicit global CPU cache
|
||||
management, relying instead for it to be done when a region
|
||||
transitions between software programmed and hardware committed
|
||||
states. If this file is not present, then there is no hardware
|
||||
support for the operation.
|
||||
|
||||
|
||||
What /sys/bus/cxl/devices/memX/security/erase
|
||||
@ -92,7 +97,13 @@ Contact: linux-cxl@vger.kernel.org
|
||||
Description:
|
||||
(WO) Write a boolean 'true' string value to this attribute to
|
||||
secure erase user data by changing the media encryption keys for
|
||||
all user data areas of the device.
|
||||
all user data areas of the device. This functionality requires
|
||||
the device to be disabled, that is, not actively decoding any
|
||||
HPA ranges. This permits avoiding explicit global CPU cache
|
||||
management, relying instead for it to be done when a region
|
||||
transitions between software programmed and hardware committed
|
||||
states. If this file is not present, then there is no hardware
|
||||
support for the operation.
|
||||
|
||||
|
||||
What: /sys/bus/cxl/devices/memX/firmware/
|
||||
|
@ -513,17 +513,18 @@ Description: information about CPUs heterogeneity.
|
||||
cpu_capacity: capacity of cpuX.
|
||||
|
||||
What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/meltdown
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
||||
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
||||
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/l1tf
|
||||
/sys/devices/system/cpu/vulnerabilities/mds
|
||||
/sys/devices/system/cpu/vulnerabilities/srbds
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/meltdown
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
/sys/devices/system/cpu/vulnerabilities/retbleed
|
||||
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
||||
/sys/devices/system/cpu/vulnerabilities/srbds
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
@ -2,7 +2,7 @@ What: /sys/devices/platform/hidma-*/chid
|
||||
/sys/devices/platform/QCOM8061:*/chid
|
||||
Date: Dec 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Contains the ID of the channel within the HIDMA instance.
|
||||
It is used to associate a given HIDMA channel with the
|
||||
|
@ -2,7 +2,7 @@ What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/priority
|
||||
/sys/devices/platform/QCOM8060:*/chanops/chan*/priority
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Contains either 0 or 1 and indicates if the DMA channel is a
|
||||
low priority (0) or high priority (1) channel.
|
||||
@ -11,7 +11,7 @@ What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/weight
|
||||
/sys/devices/platform/QCOM8060:*/chanops/chan*/weight
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Contains 0..15 and indicates the weight of the channel among
|
||||
equal priority channels during round robin scheduling.
|
||||
@ -20,7 +20,7 @@ What: /sys/devices/platform/hidma-mgmt*/chreset_timeout_cycles
|
||||
/sys/devices/platform/QCOM8060:*/chreset_timeout_cycles
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Contains the platform specific cycle value to wait after a
|
||||
reset command is issued. If the value is chosen too short,
|
||||
@ -32,7 +32,7 @@ What: /sys/devices/platform/hidma-mgmt*/dma_channels
|
||||
/sys/devices/platform/QCOM8060:*/dma_channels
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Contains the number of dma channels supported by one instance
|
||||
of HIDMA hardware. The value may change from chip to chip.
|
||||
@ -41,7 +41,7 @@ What: /sys/devices/platform/hidma-mgmt*/hw_version_major
|
||||
/sys/devices/platform/QCOM8060:*/hw_version_major
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Version number major for the hardware.
|
||||
|
||||
@ -49,7 +49,7 @@ What: /sys/devices/platform/hidma-mgmt*/hw_version_minor
|
||||
/sys/devices/platform/QCOM8060:*/hw_version_minor
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Version number minor for the hardware.
|
||||
|
||||
@ -57,7 +57,7 @@ What: /sys/devices/platform/hidma-mgmt*/max_rd_xactions
|
||||
/sys/devices/platform/QCOM8060:*/max_rd_xactions
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Contains a value between 0 and 31. Maximum number of
|
||||
read transactions that can be issued back to back.
|
||||
@ -69,7 +69,7 @@ What: /sys/devices/platform/hidma-mgmt*/max_read_request
|
||||
/sys/devices/platform/QCOM8060:*/max_read_request
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Size of each read request. The value needs to be a power
|
||||
of two and can be between 128 and 1024.
|
||||
@ -78,7 +78,7 @@ What: /sys/devices/platform/hidma-mgmt*/max_wr_xactions
|
||||
/sys/devices/platform/QCOM8060:*/max_wr_xactions
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Contains a value between 0 and 31. Maximum number of
|
||||
write transactions that can be issued back to back.
|
||||
@ -91,7 +91,7 @@ What: /sys/devices/platform/hidma-mgmt*/max_write_request
|
||||
/sys/devices/platform/QCOM8060:*/max_write_request
|
||||
Date: Nov 2015
|
||||
KernelVersion: 4.4
|
||||
Contact: "Sinan Kaya <okaya@codeaurora.org>"
|
||||
Contact: "Sinan Kaya <okaya@kernel.org>"
|
||||
Description:
|
||||
Size of each write request. The value needs to be a power
|
||||
of two and can be between 128 and 1024.
|
||||
|
109
Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
Normal file
109
Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
Normal file
@ -0,0 +1,109 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
GDS - Gather Data Sampling
|
||||
==========================
|
||||
|
||||
Gather Data Sampling is a hardware vulnerability which allows unprivileged
|
||||
speculative access to data which was previously stored in vector registers.
|
||||
|
||||
Problem
|
||||
-------
|
||||
When a gather instruction performs loads from memory, different data elements
|
||||
are merged into the destination vector register. However, when a gather
|
||||
instruction that is transiently executed encounters a fault, stale data from
|
||||
architectural or internal vector registers may get transiently forwarded to the
|
||||
destination vector register instead. This will allow a malicious attacker to
|
||||
infer stale data using typical side channel techniques like cache timing
|
||||
attacks. GDS is a purely sampling-based attack.
|
||||
|
||||
The attacker uses gather instructions to infer the stale vector register data.
|
||||
The victim does not need to do anything special other than use the vector
|
||||
registers. The victim does not need to use gather instructions to be
|
||||
vulnerable.
|
||||
|
||||
Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks
|
||||
are possible.
|
||||
|
||||
Attack scenarios
|
||||
----------------
|
||||
Without mitigation, GDS can infer stale data across virtually all
|
||||
permission boundaries:
|
||||
|
||||
Non-enclaves can infer SGX enclave data
|
||||
Userspace can infer kernel data
|
||||
Guests can infer data from hosts
|
||||
Guest can infer guest from other guests
|
||||
Users can infer data from other users
|
||||
|
||||
Because of this, it is important to ensure that the mitigation stays enabled in
|
||||
lower-privilege contexts like guests and when running outside SGX enclaves.
|
||||
|
||||
The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure
|
||||
that guests are not allowed to disable the GDS mitigation. If a host erred and
|
||||
allowed this, a guest could theoretically disable GDS mitigation, mount an
|
||||
attack, and re-enable it.
|
||||
|
||||
Mitigation mechanism
|
||||
--------------------
|
||||
This issue is mitigated in microcode. The microcode defines the following new
|
||||
bits:
|
||||
|
||||
================================ === ============================
|
||||
IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability
|
||||
and mitigation support.
|
||||
IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable.
|
||||
IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation
|
||||
0 by default.
|
||||
IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes
|
||||
to GDS_MITG_DIS are ignored
|
||||
Can't be cleared once set.
|
||||
================================ === ============================
|
||||
|
||||
GDS can also be mitigated on systems that don't have updated microcode by
|
||||
disabling AVX. This can be done by setting gather_data_sampling="force" or
|
||||
"clearcpuid=avx" on the kernel command-line.
|
||||
|
||||
If used, these options will disable AVX use by turning off XSAVE YMM support.
|
||||
However, the processor will still enumerate AVX support. Userspace that
|
||||
does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM
|
||||
support will break.
|
||||
|
||||
Mitigation control on the kernel command line
|
||||
---------------------------------------------
|
||||
The mitigation can be disabled by setting "gather_data_sampling=off" or
|
||||
"mitigations=off" on the kernel command line. Not specifying either will default
|
||||
to the mitigation being enabled. Specifying "gather_data_sampling=force" will
|
||||
use the microcode mitigation when available or disable AVX on affected systems
|
||||
where the microcode hasn't been updated to include the mitigation.
|
||||
|
||||
GDS System Information
|
||||
------------------------
|
||||
The kernel provides vulnerability status information through sysfs. For
|
||||
GDS this can be accessed by the following sysfs file:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
|
||||
|
||||
The possible values contained in this file are:
|
||||
|
||||
============================== =============================================
|
||||
Not affected Processor not vulnerable.
|
||||
Vulnerable Processor vulnerable and mitigation disabled.
|
||||
Vulnerable: No microcode Processor vulnerable and microcode is missing
|
||||
mitigation.
|
||||
Mitigation: AVX disabled,
|
||||
no microcode Processor is vulnerable and microcode is missing
|
||||
mitigation. AVX disabled as mitigation.
|
||||
Mitigation: Microcode Processor is vulnerable and mitigation is in
|
||||
effect.
|
||||
Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in
|
||||
effect and cannot be disabled.
|
||||
Unknown: Dependent on
|
||||
hypervisor status Running on a virtual guest processor that is
|
||||
affected but with no way to know if host
|
||||
processor is mitigated or vulnerable.
|
||||
============================== =============================================
|
||||
|
||||
GDS Default mitigation
|
||||
----------------------
|
||||
The updated microcode will enable the mitigation by default. The kernel's
|
||||
default action is to leave the mitigation enabled.
|
@ -19,3 +19,5 @@ are configurable at compile, boot or run time.
|
||||
l1d_flush.rst
|
||||
processor_mmio_stale_data.rst
|
||||
cross-thread-rsb.rst
|
||||
srso
|
||||
gather_data_sampling.rst
|
||||
|
133
Documentation/admin-guide/hw-vuln/srso.rst
Normal file
133
Documentation/admin-guide/hw-vuln/srso.rst
Normal file
@ -0,0 +1,133 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Speculative Return Stack Overflow (SRSO)
|
||||
========================================
|
||||
|
||||
This is a mitigation for the speculative return stack overflow (SRSO)
|
||||
vulnerability found on AMD processors. The mechanism is by now the well
|
||||
known scenario of poisoning CPU functional units - the Branch Target
|
||||
Buffer (BTB) and Return Address Predictor (RAP) in this case - and then
|
||||
tricking the elevated privilege domain (the kernel) into leaking
|
||||
sensitive data.
|
||||
|
||||
AMD CPUs predict RET instructions using a Return Address Predictor (aka
|
||||
Return Address Stack/Return Stack Buffer). In some cases, a non-architectural
|
||||
CALL instruction (i.e., an instruction predicted to be a CALL but is
|
||||
not actually a CALL) can create an entry in the RAP which may be used
|
||||
to predict the target of a subsequent RET instruction.
|
||||
|
||||
The specific circumstances that lead to this varies by microarchitecture
|
||||
but the concern is that an attacker can mis-train the CPU BTB to predict
|
||||
non-architectural CALL instructions in kernel space and use this to
|
||||
control the speculative target of a subsequent kernel RET, potentially
|
||||
leading to information disclosure via a speculative side-channel.
|
||||
|
||||
The issue is tracked under CVE-2023-20569.
|
||||
|
||||
Affected processors
|
||||
-------------------
|
||||
|
||||
AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older
|
||||
processors have not been investigated.
|
||||
|
||||
System information and options
|
||||
------------------------------
|
||||
|
||||
First of all, it is required that the latest microcode be loaded for
|
||||
mitigations to be effective.
|
||||
|
||||
The sysfs file showing SRSO mitigation status is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
- 'Not affected' The processor is not vulnerable
|
||||
|
||||
- 'Vulnerable: no microcode' The processor is vulnerable, no
|
||||
microcode extending IBPB functionality
|
||||
to address the vulnerability has been
|
||||
applied.
|
||||
|
||||
- 'Mitigation: microcode' Extended IBPB functionality microcode
|
||||
patch has been applied. It does not
|
||||
address User->Kernel and Guest->Host
|
||||
transitions protection but it does
|
||||
address User->User and VM->VM attack
|
||||
vectors.
|
||||
|
||||
(spec_rstack_overflow=microcode)
|
||||
|
||||
- 'Mitigation: safe RET' Software-only mitigation. It complements
|
||||
the extended IBPB microcode patch
|
||||
functionality by addressing User->Kernel
|
||||
and Guest->Host transitions protection.
|
||||
|
||||
Selected by default or by
|
||||
spec_rstack_overflow=safe-ret
|
||||
|
||||
- 'Mitigation: IBPB' Similar protection as "safe RET" above
|
||||
but employs an IBPB barrier on privilege
|
||||
domain crossings (User->Kernel,
|
||||
Guest->Host).
|
||||
|
||||
(spec_rstack_overflow=ibpb)
|
||||
|
||||
- 'Mitigation: IBPB on VMEXIT' Mitigation addressing the cloud provider
|
||||
scenario - the Guest->Host transitions
|
||||
only.
|
||||
|
||||
(spec_rstack_overflow=ibpb-vmexit)
|
||||
|
||||
In order to exploit vulnerability, an attacker needs to:
|
||||
|
||||
- gain local access on the machine
|
||||
|
||||
- break kASLR
|
||||
|
||||
- find gadgets in the running kernel in order to use them in the exploit
|
||||
|
||||
- potentially create and pin an additional workload on the sibling
|
||||
thread, depending on the microarchitecture (not necessary on fam 0x19)
|
||||
|
||||
- run the exploit
|
||||
|
||||
Considering the performance implications of each mitigation type, the
|
||||
default one is 'Mitigation: safe RET' which should take care of most
|
||||
attack vectors, including the local User->Kernel one.
|
||||
|
||||
As always, the user is advised to keep her/his system up-to-date by
|
||||
applying software updates regularly.
|
||||
|
||||
The default setting will be reevaluated when needed and especially when
|
||||
new attack vectors appear.
|
||||
|
||||
As one can surmise, 'Mitigation: safe RET' does come at the cost of some
|
||||
performance depending on the workload. If one trusts her/his userspace
|
||||
and does not want to suffer the performance impact, one can always
|
||||
disable the mitigation with spec_rstack_overflow=off.
|
||||
|
||||
Similarly, 'Mitigation: IBPB' is another full mitigation type employing
|
||||
an indrect branch prediction barrier after having applied the required
|
||||
microcode patch for one's system. This mitigation comes also at
|
||||
a performance cost.
|
||||
|
||||
Mitigation: safe RET
|
||||
--------------------
|
||||
|
||||
The mitigation works by ensuring all RET instructions speculate to
|
||||
a controlled location, similar to how speculation is controlled in the
|
||||
retpoline sequence. To accomplish this, the __x86_return_thunk forces
|
||||
the CPU to mispredict every function return using a 'safe return'
|
||||
sequence.
|
||||
|
||||
To ensure the safety of this mitigation, the kernel must ensure that the
|
||||
safe return sequence is itself free from attacker interference. In Zen3
|
||||
and Zen4, this is accomplished by creating a BTB alias between the
|
||||
untraining function srso_untrain_ret_alias() and the safe return
|
||||
function srso_safe_ret_alias() which results in evicting a potentially
|
||||
poisoned BTB entry and using that safe one for all function returns.
|
||||
|
||||
In older Zen1 and Zen2, this is accomplished using a reinterpretation
|
||||
technique similar to Retbleed one: srso_untrain_ret() and
|
||||
srso_safe_ret().
|
@ -624,3 +624,9 @@ Used to get the correct ranges:
|
||||
* VMALLOC_START ~ VMALLOC_END : vmalloc() / ioremap() space.
|
||||
* VMEMMAP_START ~ VMEMMAP_END : vmemmap space, used for struct page array.
|
||||
* KERNEL_LINK_ADDR : start address of Kernel link and BPF
|
||||
|
||||
va_kernel_pa_offset
|
||||
-------------------
|
||||
|
||||
Indicates the offset between the kernel virtual and physical mappings.
|
||||
Used to translate virtual to physical addresses.
|
||||
|
@ -1623,6 +1623,26 @@
|
||||
Format: off | on
|
||||
default: on
|
||||
|
||||
gather_data_sampling=
|
||||
[X86,INTEL] Control the Gather Data Sampling (GDS)
|
||||
mitigation.
|
||||
|
||||
Gather Data Sampling is a hardware vulnerability which
|
||||
allows unprivileged speculative access to data which was
|
||||
previously stored in vector registers.
|
||||
|
||||
This issue is mitigated by default in updated microcode.
|
||||
The mitigation may have a performance impact but can be
|
||||
disabled. On systems without the microcode mitigation
|
||||
disabling AVX serves as a mitigation.
|
||||
|
||||
force: Disable AVX to mitigate systems without
|
||||
microcode mitigation. No effect if the microcode
|
||||
mitigation is present. Known to cause crashes in
|
||||
userspace with buggy AVX enumeration.
|
||||
|
||||
off: Disable GDS mitigation.
|
||||
|
||||
gcov_persist= [GCOV] When non-zero (default), profiling data for
|
||||
kernel modules is saved and remains accessible via
|
||||
debugfs, even when the module is unloaded/reloaded.
|
||||
@ -3273,24 +3293,25 @@
|
||||
Disable all optional CPU mitigations. This
|
||||
improves system performance, but it may also
|
||||
expose users to several CPU vulnerabilities.
|
||||
Equivalent to: nopti [X86,PPC]
|
||||
if nokaslr then kpti=0 [ARM64]
|
||||
nospectre_v1 [X86,PPC]
|
||||
nobp=0 [S390]
|
||||
nospectre_v2 [X86,PPC,S390,ARM64]
|
||||
spectre_v2_user=off [X86]
|
||||
spec_store_bypass_disable=off [X86,PPC]
|
||||
ssbd=force-off [ARM64]
|
||||
nospectre_bhb [ARM64]
|
||||
Equivalent to: if nokaslr then kpti=0 [ARM64]
|
||||
gather_data_sampling=off [X86]
|
||||
kvm.nx_huge_pages=off [X86]
|
||||
l1tf=off [X86]
|
||||
mds=off [X86]
|
||||
tsx_async_abort=off [X86]
|
||||
kvm.nx_huge_pages=off [X86]
|
||||
srbds=off [X86,INTEL]
|
||||
mmio_stale_data=off [X86]
|
||||
no_entry_flush [PPC]
|
||||
no_uaccess_flush [PPC]
|
||||
mmio_stale_data=off [X86]
|
||||
nobp=0 [S390]
|
||||
nopti [X86,PPC]
|
||||
nospectre_bhb [ARM64]
|
||||
nospectre_v1 [X86,PPC]
|
||||
nospectre_v2 [X86,PPC,S390,ARM64]
|
||||
retbleed=off [X86]
|
||||
spec_store_bypass_disable=off [X86,PPC]
|
||||
spectre_v2_user=off [X86]
|
||||
srbds=off [X86,INTEL]
|
||||
ssbd=force-off [ARM64]
|
||||
tsx_async_abort=off [X86]
|
||||
|
||||
Exceptions:
|
||||
This does not have any effect on
|
||||
@ -5875,6 +5896,17 @@
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2_user=auto.
|
||||
|
||||
spec_rstack_overflow=
|
||||
[X86] Control RAS overflow mitigation on AMD Zen CPUs
|
||||
|
||||
off - Disable mitigation
|
||||
microcode - Enable microcode mitigation only
|
||||
safe-ret - Enable sw-only safe RET mitigation (default)
|
||||
ibpb - Enable mitigation by issuing IBPB on
|
||||
kernel entry
|
||||
ibpb-vmexit - Issue IBPB only on VMEXIT
|
||||
(cloud-specific mitigation)
|
||||
|
||||
spec_store_bypass_disable=
|
||||
[HW] Control Speculative Store Bypass (SSB) Disable mitigation
|
||||
(Speculative Store Bypass vulnerability)
|
||||
|
@ -551,9 +551,8 @@ mutex or just to use i_size_read() instead.
|
||||
Note: this does not protect the file->f_pos against concurrent modifications
|
||||
since this is something the userspace has to take care about.
|
||||
|
||||
->iterate() is called with i_rwsem exclusive.
|
||||
|
||||
->iterate_shared() is called with i_rwsem at least shared.
|
||||
->iterate_shared() is called with i_rwsem held for reading, and with the
|
||||
file f_pos_lock held exclusively
|
||||
|
||||
->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
|
||||
Most instances call fasync_helper(), which does that maintenance, so it's
|
||||
|
@ -537,7 +537,7 @@ vfs_readdir() is gone; switch to iterate_dir() instead
|
||||
|
||||
**mandatory**
|
||||
|
||||
->readdir() is gone now; switch to ->iterate()
|
||||
->readdir() is gone now; switch to ->iterate_shared()
|
||||
|
||||
**mandatory**
|
||||
|
||||
@ -693,24 +693,19 @@ parallel now.
|
||||
|
||||
---
|
||||
|
||||
**recommended**
|
||||
**mandatory**
|
||||
|
||||
->iterate_shared() is added; it's a parallel variant of ->iterate().
|
||||
->iterate_shared() is added.
|
||||
Exclusion on struct file level is still provided (as well as that
|
||||
between it and lseek on the same struct file), but if your directory
|
||||
has been opened several times, you can get these called in parallel.
|
||||
Exclusion between that method and all directory-modifying ones is
|
||||
still provided, of course.
|
||||
|
||||
Often enough ->iterate() can serve as ->iterate_shared() without any
|
||||
changes - it is a read-only operation, after all. If you have any
|
||||
per-inode or per-dentry in-core data structures modified by ->iterate(),
|
||||
you might need something to serialize the access to them. If you
|
||||
do dcache pre-seeding, you'll need to switch to d_alloc_parallel() for
|
||||
that; look for in-tree examples.
|
||||
|
||||
Old method is only used if the new one is absent; eventually it will
|
||||
be removed. Switch while you still can; the old one won't stay.
|
||||
If you have any per-inode or per-dentry in-core data structures modified
|
||||
by ->iterate_shared(), you might need something to serialize the access
|
||||
to them. If you do dcache pre-seeding, you'll need to switch to
|
||||
d_alloc_parallel() for that; look for in-tree examples.
|
||||
|
||||
---
|
||||
|
||||
@ -930,9 +925,9 @@ should be done by looking at FMODE_LSEEK in file->f_mode.
|
||||
filldir_t (readdir callbacks) calling conventions have changed. Instead of
|
||||
returning 0 or -E... it returns bool now. false means "no more" (as -E... used
|
||||
to) and true - "keep going" (as 0 in old calling conventions). Rationale:
|
||||
callers never looked at specific -E... values anyway. ->iterate() and
|
||||
->iterate_shared() instance require no changes at all, all filldir_t ones in
|
||||
the tree converted.
|
||||
callers never looked at specific -E... values anyway. -> iterate_shared()
|
||||
instances require no changes at all, all filldir_t ones in the tree
|
||||
converted.
|
||||
|
||||
---
|
||||
|
||||
|
28
MAINTAINERS
28
MAINTAINERS
@ -2339,7 +2339,7 @@ F: drivers/phy/mediatek/
|
||||
ARM/MICROCHIP (ARM64) SoC support
|
||||
M: Conor Dooley <conor@kernel.org>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
T: git https://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git
|
||||
@ -2348,7 +2348,7 @@ F: arch/arm64/boot/dts/microchip/
|
||||
ARM/Microchip (AT91) SoC support
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
W: http://www.linux4sam.org
|
||||
@ -3250,7 +3250,7 @@ F: include/uapi/linux/atm*
|
||||
|
||||
ATMEL MACB ETHERNET DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/cadence/
|
||||
|
||||
@ -9679,6 +9679,7 @@ F: tools/hv/
|
||||
|
||||
HYPERBUS SUPPORT
|
||||
M: Vignesh Raghavendra <vigneshr@ti.com>
|
||||
R: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Supported
|
||||
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
|
||||
@ -13804,7 +13805,7 @@ F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
|
||||
F: drivers/spi/spi-at91-usart.c
|
||||
|
||||
MICROCHIP AUDIO ASOC DRIVERS
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/sound/atmel*
|
||||
@ -13827,7 +13828,7 @@ S: Maintained
|
||||
F: drivers/crypto/atmel-ecc.*
|
||||
|
||||
MICROCHIP EIC DRIVER
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/interrupt-controller/microchip,sama7g5-eic.yaml
|
||||
@ -13900,7 +13901,7 @@ F: drivers/video/fbdev/atmel_lcdfb.c
|
||||
F: include/video/atmel_lcdc.h
|
||||
|
||||
MICROCHIP MCP16502 PMIC DRIVER
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
|
||||
@ -13927,7 +13928,7 @@ F: Documentation/devicetree/bindings/mtd/atmel-nand.txt
|
||||
F: drivers/mtd/nand/raw/atmel/*
|
||||
|
||||
MICROCHIP OTPC DRIVER
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml
|
||||
@ -13966,7 +13967,7 @@ F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
|
||||
F: drivers/fpga/microchip-spi.c
|
||||
|
||||
MICROCHIP PWM DRIVER
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-pwm@vger.kernel.org
|
||||
S: Supported
|
||||
@ -13982,7 +13983,7 @@ F: drivers/iio/adc/at91-sama5d2_adc.c
|
||||
F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h
|
||||
|
||||
MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml
|
||||
F: drivers/power/reset/at91-sama5d2_shdwc.c
|
||||
@ -13999,7 +14000,7 @@ S: Supported
|
||||
F: drivers/spi/spi-atmel.*
|
||||
|
||||
MICROCHIP SSC DRIVER
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/misc/atmel-ssc.txt
|
||||
@ -14028,7 +14029,7 @@ F: drivers/usb/gadget/udc/atmel_usba_udc.*
|
||||
|
||||
MICROCHIP WILC1000 WIFI DRIVER
|
||||
M: Ajay Singh <ajay.kathat@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/microchip/wilc1000/
|
||||
@ -18533,17 +18534,14 @@ RTL8180 WIRELESS DRIVER
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Orphan
|
||||
W: https://wireless.wiki.kernel.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
|
||||
F: drivers/net/wireless/realtek/rtl818x/rtl8180/
|
||||
|
||||
RTL8187 WIRELESS DRIVER
|
||||
M: Herton Ronaldo Krzesinski <herton@canonical.com>
|
||||
M: Hin-Tak Leung <htl10@users.sourceforge.net>
|
||||
M: Hin-Tak Leung <hintak.leung@gmail.com>
|
||||
M: Larry Finger <Larry.Finger@lwfinger.net>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://wireless.wiki.kernel.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
|
||||
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
|
||||
|
||||
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -31,6 +31,13 @@
|
||||
.Lskip_hcrx_\@:
|
||||
.endm
|
||||
|
||||
/* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */
|
||||
.macro __check_hvhe fail, tmp
|
||||
mrs \tmp, hcr_el2
|
||||
and \tmp, \tmp, #HCR_E2H
|
||||
cbz \tmp, \fail
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Allow Non-secure EL1 and EL0 to access physical timer and counter.
|
||||
* This is not necessary for VHE, since the host kernel runs in EL2,
|
||||
@ -43,9 +50,7 @@
|
||||
*/
|
||||
.macro __init_el2_timers
|
||||
mov x0, #3 // Enable EL1 physical timers
|
||||
mrs x1, hcr_el2
|
||||
and x1, x1, #HCR_E2H
|
||||
cbz x1, .LnVHE_\@
|
||||
__check_hvhe .LnVHE_\@, x1
|
||||
lsl x0, x0, #10
|
||||
.LnVHE_\@:
|
||||
msr cnthctl_el2, x0
|
||||
@ -139,15 +144,14 @@
|
||||
|
||||
/* Coprocessor traps */
|
||||
.macro __init_el2_cptr
|
||||
mrs x1, hcr_el2
|
||||
and x1, x1, #HCR_E2H
|
||||
cbz x1, .LnVHE_\@
|
||||
__check_hvhe .LnVHE_\@, x1
|
||||
mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
|
||||
b .Lset_cptr_\@
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_\@
|
||||
.LnVHE_\@:
|
||||
mov x0, #0x33ff
|
||||
.Lset_cptr_\@:
|
||||
msr cptr_el2, x0 // Disable copro. traps to EL2
|
||||
.Lskip_set_cptr_\@:
|
||||
.endm
|
||||
|
||||
/* Disable any fine grained traps */
|
||||
@ -268,19 +272,19 @@
|
||||
check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
|
||||
|
||||
.Linit_sve_\@: /* SVE register access */
|
||||
mrs x0, cptr_el2 // Disable SVE traps
|
||||
mrs x1, hcr_el2
|
||||
and x1, x1, #HCR_E2H
|
||||
cbz x1, .Lcptr_nvhe_\@
|
||||
__check_hvhe .Lcptr_nvhe_\@, x1
|
||||
|
||||
// VHE case
|
||||
// (h)VHE case
|
||||
mrs x0, cpacr_el1 // Disable SVE traps
|
||||
orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
|
||||
b .Lset_cptr_\@
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_\@
|
||||
|
||||
.Lcptr_nvhe_\@: // nVHE case
|
||||
mrs x0, cptr_el2 // Disable SVE traps
|
||||
bic x0, x0, #CPTR_EL2_TZ
|
||||
.Lset_cptr_\@:
|
||||
msr cptr_el2, x0
|
||||
.Lskip_set_cptr_\@:
|
||||
isb
|
||||
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
|
||||
msr_s SYS_ZCR_EL2, x1 // length for EL1.
|
||||
@ -289,9 +293,19 @@
|
||||
check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2
|
||||
|
||||
.Linit_sme_\@: /* SME register access and priority mapping */
|
||||
__check_hvhe .Lcptr_nvhe_sme_\@, x1
|
||||
|
||||
// (h)VHE case
|
||||
mrs x0, cpacr_el1 // Disable SME traps
|
||||
orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_sme_\@
|
||||
|
||||
.Lcptr_nvhe_sme_\@: // nVHE case
|
||||
mrs x0, cptr_el2 // Disable SME traps
|
||||
bic x0, x0, #CPTR_EL2_TSM
|
||||
msr cptr_el2, x0
|
||||
.Lskip_set_cptr_sme_\@:
|
||||
isb
|
||||
|
||||
mrs x1, sctlr_el2
|
||||
|
@ -278,7 +278,7 @@ asmlinkage void __noreturn hyp_panic_bad_stack(void);
|
||||
asmlinkage void kvm_unexpected_el2_exception(void);
|
||||
struct kvm_cpu_context;
|
||||
void handle_trap(struct kvm_cpu_context *host_ctxt);
|
||||
asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on);
|
||||
asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
|
||||
void __noreturn __pkvm_init_finalise(void);
|
||||
void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
|
||||
void kvm_patch_vector_branch(struct alt_instr *alt,
|
||||
|
@ -571,6 +571,14 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
|
||||
return test_bit(feature, vcpu->arch.features);
|
||||
}
|
||||
|
||||
static __always_inline void kvm_write_cptr_el2(u64 val)
|
||||
{
|
||||
if (has_vhe() || has_hvhe())
|
||||
write_sysreg(val, cpacr_el1);
|
||||
else
|
||||
write_sysreg(val, cptr_el2);
|
||||
}
|
||||
|
||||
static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
@ -578,8 +586,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
||||
if (has_vhe()) {
|
||||
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
|
||||
CPACR_EL1_ZEN_EL1EN);
|
||||
if (cpus_have_final_cap(ARM64_SME))
|
||||
val |= CPACR_EL1_SMEN_EL1EN;
|
||||
} else if (has_hvhe()) {
|
||||
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
|
||||
|
||||
if (!vcpu_has_sve(vcpu) ||
|
||||
(vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
|
||||
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
|
||||
if (cpus_have_final_cap(ARM64_SME))
|
||||
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
|
||||
} else {
|
||||
val = CPTR_NVHE_EL2_RES1;
|
||||
|
||||
@ -597,9 +613,6 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val = kvm_get_reset_cptr_el2(vcpu);
|
||||
|
||||
if (has_vhe() || has_hvhe())
|
||||
write_sysreg(val, cpacr_el1);
|
||||
else
|
||||
write_sysreg(val, cptr_el2);
|
||||
kvm_write_cptr_el2(val);
|
||||
}
|
||||
#endif /* __ARM64_KVM_EMULATE_H__ */
|
||||
|
@ -679,7 +679,7 @@ static void fpsimd_to_sve(struct task_struct *task)
|
||||
void *sst = task->thread.sve_state;
|
||||
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
|
||||
|
||||
if (!system_supports_sve())
|
||||
if (!system_supports_sve() && !system_supports_sme())
|
||||
return;
|
||||
|
||||
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
|
||||
@ -705,7 +705,7 @@ static void sve_to_fpsimd(struct task_struct *task)
|
||||
unsigned int i;
|
||||
__uint128_t const *p;
|
||||
|
||||
if (!system_supports_sve())
|
||||
if (!system_supports_sve() && !system_supports_sme())
|
||||
return;
|
||||
|
||||
vl = thread_get_cur_vl(&task->thread);
|
||||
@ -835,7 +835,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
|
||||
void *sst = task->thread.sve_state;
|
||||
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
|
||||
|
||||
if (!test_tsk_thread_flag(task, TIF_SVE))
|
||||
if (!test_tsk_thread_flag(task, TIF_SVE) &&
|
||||
!thread_sm_enabled(&task->thread))
|
||||
return;
|
||||
|
||||
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
|
||||
@ -909,7 +910,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
|
||||
*/
|
||||
task->thread.svcr &= ~(SVCR_SM_MASK |
|
||||
SVCR_ZA_MASK);
|
||||
clear_thread_flag(TIF_SME);
|
||||
clear_tsk_thread_flag(task, TIF_SME);
|
||||
free_sme = true;
|
||||
}
|
||||
}
|
||||
|
@ -932,11 +932,13 @@ static int sve_set_common(struct task_struct *target,
|
||||
/*
|
||||
* Ensure target->thread.sve_state is up to date with target's
|
||||
* FPSIMD regs, so that a short copyin leaves trailing
|
||||
* registers unmodified. Always enable SVE even if going into
|
||||
* streaming mode.
|
||||
* registers unmodified. Only enable SVE if we are
|
||||
* configuring normal SVE, a system with streaming SVE may not
|
||||
* have normal SVE.
|
||||
*/
|
||||
fpsimd_sync_to_sve(target);
|
||||
set_tsk_thread_flag(target, TIF_SVE);
|
||||
if (type == ARM64_VEC_SVE)
|
||||
set_tsk_thread_flag(target, TIF_SVE);
|
||||
target->thread.fp_type = FP_STATE_SVE;
|
||||
|
||||
BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
|
||||
@ -1180,6 +1182,8 @@ static int zt_set(struct task_struct *target,
|
||||
if (ret == 0)
|
||||
target->thread.svcr |= SVCR_ZA_MASK;
|
||||
|
||||
fpsimd_flush_task_state(target);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
|
||||
static bool vgic_present, kvm_arm_initialised;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
|
||||
static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
|
||||
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
bool is_kvm_arm_initialised(void)
|
||||
@ -1864,18 +1864,24 @@ static void cpu_hyp_reinit(void)
|
||||
cpu_hyp_init_features();
|
||||
}
|
||||
|
||||
static void _kvm_arch_hardware_enable(void *discard)
|
||||
static void cpu_hyp_init(void *discard)
|
||||
{
|
||||
if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
|
||||
if (!__this_cpu_read(kvm_hyp_initialized)) {
|
||||
cpu_hyp_reinit();
|
||||
__this_cpu_write(kvm_arm_hardware_enabled, 1);
|
||||
__this_cpu_write(kvm_hyp_initialized, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_hyp_uninit(void *discard)
|
||||
{
|
||||
if (__this_cpu_read(kvm_hyp_initialized)) {
|
||||
cpu_hyp_reset();
|
||||
__this_cpu_write(kvm_hyp_initialized, 0);
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_arch_hardware_enable(void)
|
||||
{
|
||||
int was_enabled;
|
||||
|
||||
/*
|
||||
* Most calls to this function are made with migration
|
||||
* disabled, but not with preemption disabled. The former is
|
||||
@ -1884,36 +1890,23 @@ int kvm_arch_hardware_enable(void)
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
was_enabled = __this_cpu_read(kvm_arm_hardware_enabled);
|
||||
_kvm_arch_hardware_enable(NULL);
|
||||
cpu_hyp_init(NULL);
|
||||
|
||||
if (!was_enabled) {
|
||||
kvm_vgic_cpu_up();
|
||||
kvm_timer_cpu_up();
|
||||
}
|
||||
kvm_vgic_cpu_up();
|
||||
kvm_timer_cpu_up();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _kvm_arch_hardware_disable(void *discard)
|
||||
{
|
||||
if (__this_cpu_read(kvm_arm_hardware_enabled)) {
|
||||
cpu_hyp_reset();
|
||||
__this_cpu_write(kvm_arm_hardware_enabled, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_arch_hardware_disable(void)
|
||||
{
|
||||
if (__this_cpu_read(kvm_arm_hardware_enabled)) {
|
||||
kvm_timer_cpu_down();
|
||||
kvm_vgic_cpu_down();
|
||||
}
|
||||
kvm_timer_cpu_down();
|
||||
kvm_vgic_cpu_down();
|
||||
|
||||
if (!is_protected_kvm_enabled())
|
||||
_kvm_arch_hardware_disable(NULL);
|
||||
cpu_hyp_uninit(NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_PM
|
||||
@ -1922,16 +1915,16 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
|
||||
void *v)
|
||||
{
|
||||
/*
|
||||
* kvm_arm_hardware_enabled is left with its old value over
|
||||
* kvm_hyp_initialized is left with its old value over
|
||||
* PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
|
||||
* re-enable hyp.
|
||||
*/
|
||||
switch (cmd) {
|
||||
case CPU_PM_ENTER:
|
||||
if (__this_cpu_read(kvm_arm_hardware_enabled))
|
||||
if (__this_cpu_read(kvm_hyp_initialized))
|
||||
/*
|
||||
* don't update kvm_arm_hardware_enabled here
|
||||
* so that the hardware will be re-enabled
|
||||
* don't update kvm_hyp_initialized here
|
||||
* so that the hyp will be re-enabled
|
||||
* when we resume. See below.
|
||||
*/
|
||||
cpu_hyp_reset();
|
||||
@ -1939,8 +1932,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
|
||||
return NOTIFY_OK;
|
||||
case CPU_PM_ENTER_FAILED:
|
||||
case CPU_PM_EXIT:
|
||||
if (__this_cpu_read(kvm_arm_hardware_enabled))
|
||||
/* The hardware was enabled before suspend. */
|
||||
if (__this_cpu_read(kvm_hyp_initialized))
|
||||
/* The hyp was enabled before suspend. */
|
||||
cpu_hyp_reinit();
|
||||
|
||||
return NOTIFY_OK;
|
||||
@ -2021,7 +2014,7 @@ static int __init init_subsystems(void)
|
||||
/*
|
||||
* Enable hardware so that subsystem initialisation can access EL2.
|
||||
*/
|
||||
on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
|
||||
on_each_cpu(cpu_hyp_init, NULL, 1);
|
||||
|
||||
/*
|
||||
* Register CPU lower-power notifier
|
||||
@ -2059,7 +2052,7 @@ out:
|
||||
hyp_cpu_pm_exit();
|
||||
|
||||
if (err || !is_protected_kvm_enabled())
|
||||
on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
|
||||
on_each_cpu(cpu_hyp_uninit, NULL, 1);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -2097,7 +2090,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
|
||||
* The stub hypercalls are now disabled, so set our local flag to
|
||||
* prevent a later re-init attempt in kvm_arch_hardware_enable().
|
||||
*/
|
||||
__this_cpu_write(kvm_arm_hardware_enabled, 1);
|
||||
__this_cpu_write(kvm_hyp_initialized, 1);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
|
@ -457,6 +457,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
val &= ~(TCR_HD | TCR_HA);
|
||||
write_sysreg_el1(val, SYS_TCR);
|
||||
__kvm_skip_instr(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -705,7 +705,20 @@ int hyp_ffa_init(void *pages)
|
||||
if (res.a0 == FFA_RET_NOT_SUPPORTED)
|
||||
return 0;
|
||||
|
||||
if (res.a0 != FFA_VERSION_1_0)
|
||||
/*
|
||||
* Firmware returns the maximum supported version of the FF-A
|
||||
* implementation. Check that the returned version is
|
||||
* backwards-compatible with the hyp according to the rules in DEN0077A
|
||||
* v1.1 REL0 13.2.1.
|
||||
*
|
||||
* Of course, things are never simple when dealing with firmware. v1.1
|
||||
* broke ABI with v1.0 on several structures, which is itself
|
||||
* incompatible with the aforementioned versioning scheme. The
|
||||
* expectation is that v1.x implementations that do not support the v1.0
|
||||
* ABI return NOT_SUPPORTED rather than a version number, according to
|
||||
* DEN0077A v1.1 REL0 18.6.4.
|
||||
*/
|
||||
if (FFA_MAJOR_VERSION(res.a0) != 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
|
||||
|
@ -63,7 +63,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
__activate_traps_fpsimd32(vcpu);
|
||||
}
|
||||
|
||||
write_sysreg(val, cptr_el2);
|
||||
kvm_write_cptr_el2(val);
|
||||
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
|
@ -417,14 +417,6 @@ void *arch_dma_alloc(struct device *dev, size_t size,
|
||||
map_uncached_pages(vaddr, size, paddr);
|
||||
*dma_handle = (dma_addr_t) paddr;
|
||||
|
||||
#if 0
|
||||
/* This probably isn't needed to support EISA cards.
|
||||
** ISA cards will certainly only support 24-bit DMA addressing.
|
||||
** Not clear if we can, want, or need to support ISA.
|
||||
*/
|
||||
if (!dev || *dev->coherent_dma_mask < 0xffffffff)
|
||||
gfp |= GFP_DMA;
|
||||
#endif
|
||||
return (void *)vaddr;
|
||||
}
|
||||
|
||||
|
@ -337,7 +337,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
|
||||
: "r19", "r20", "r21", "r22", "r1" );
|
||||
#else
|
||||
{
|
||||
unsigned long valh=(val>>32),vall=(val&0xffffffffl);
|
||||
unsigned long valh = (val >> 32), vall = (val & 0xffffffffl);
|
||||
__asm__ __volatile__ (
|
||||
" mtsp %4, %%sr1\n"
|
||||
" zdep %2, 29, 2, %%r19\n"
|
||||
@ -473,7 +473,7 @@ void handle_unaligned(struct pt_regs *regs)
|
||||
case OPCODE_LDWA_I:
|
||||
case OPCODE_LDW_S:
|
||||
case OPCODE_LDWA_S:
|
||||
ret = emulate_ldw(regs, R3(regs->iir),0);
|
||||
ret = emulate_ldw(regs, R3(regs->iir), 0);
|
||||
break;
|
||||
|
||||
case OPCODE_STH:
|
||||
@ -482,7 +482,7 @@ void handle_unaligned(struct pt_regs *regs)
|
||||
|
||||
case OPCODE_STW:
|
||||
case OPCODE_STWA:
|
||||
ret = emulate_stw(regs, R2(regs->iir),0);
|
||||
ret = emulate_stw(regs, R2(regs->iir), 0);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
@ -490,12 +490,12 @@ void handle_unaligned(struct pt_regs *regs)
|
||||
case OPCODE_LDDA_I:
|
||||
case OPCODE_LDD_S:
|
||||
case OPCODE_LDDA_S:
|
||||
ret = emulate_ldd(regs, R3(regs->iir),0);
|
||||
ret = emulate_ldd(regs, R3(regs->iir), 0);
|
||||
break;
|
||||
|
||||
case OPCODE_STD:
|
||||
case OPCODE_STDA:
|
||||
ret = emulate_std(regs, R2(regs->iir),0);
|
||||
ret = emulate_std(regs, R2(regs->iir), 0);
|
||||
break;
|
||||
#endif
|
||||
|
||||
@ -503,24 +503,24 @@ void handle_unaligned(struct pt_regs *regs)
|
||||
case OPCODE_FLDWS:
|
||||
case OPCODE_FLDWXR:
|
||||
case OPCODE_FLDWSR:
|
||||
ret = emulate_ldw(regs,FR3(regs->iir),1);
|
||||
ret = emulate_ldw(regs, FR3(regs->iir), 1);
|
||||
break;
|
||||
|
||||
case OPCODE_FLDDX:
|
||||
case OPCODE_FLDDS:
|
||||
ret = emulate_ldd(regs,R3(regs->iir),1);
|
||||
ret = emulate_ldd(regs, R3(regs->iir), 1);
|
||||
break;
|
||||
|
||||
case OPCODE_FSTWX:
|
||||
case OPCODE_FSTWS:
|
||||
case OPCODE_FSTWXR:
|
||||
case OPCODE_FSTWSR:
|
||||
ret = emulate_stw(regs,FR3(regs->iir),1);
|
||||
ret = emulate_stw(regs, FR3(regs->iir), 1);
|
||||
break;
|
||||
|
||||
case OPCODE_FSTDX:
|
||||
case OPCODE_FSTDS:
|
||||
ret = emulate_std(regs,R3(regs->iir),1);
|
||||
ret = emulate_std(regs, R3(regs->iir), 1);
|
||||
break;
|
||||
|
||||
case OPCODE_LDCD_I:
|
||||
|
@ -19,9 +19,6 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
|
||||
pmd_t *pmd = pmd_offset(pud, vaddr);
|
||||
pte_t *pte;
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
pte = pte_alloc_kernel(pmd, vaddr);
|
||||
|
||||
pte = pte_offset_kernel(pmd, vaddr);
|
||||
set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX));
|
||||
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
|
||||
|
@ -669,6 +669,39 @@ static void __init gateway_init(void)
|
||||
PAGE_SIZE, PAGE_GATEWAY, 1);
|
||||
}
|
||||
|
||||
static void __init fixmap_init(void)
|
||||
{
|
||||
unsigned long addr = FIXMAP_START;
|
||||
unsigned long end = FIXMAP_START + FIXMAP_SIZE;
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
pmd_t *pmd;
|
||||
|
||||
BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE);
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
if (pud_none(*pud)) {
|
||||
pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
|
||||
PAGE_SIZE << PMD_TABLE_ORDER);
|
||||
if (!pmd)
|
||||
panic("fixmap: pmd allocation failed.\n");
|
||||
pud_populate(NULL, pud, pmd);
|
||||
}
|
||||
#endif
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!pte)
|
||||
panic("fixmap: pte allocation failed.\n");
|
||||
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
|
||||
addr += PAGE_SIZE;
|
||||
} while (addr < end);
|
||||
}
|
||||
|
||||
static void __init parisc_bootmem_free(void)
|
||||
{
|
||||
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
|
||||
@ -683,6 +716,7 @@ void __init paging_init(void)
|
||||
setup_bootmem();
|
||||
pagetable_init();
|
||||
gateway_init();
|
||||
fixmap_init();
|
||||
flush_cache_all_local(); /* start with known state */
|
||||
flush_tlb_all_local(NULL);
|
||||
|
||||
|
@ -375,8 +375,7 @@ _GLOBAL(generic_secondary_smp_init)
|
||||
beq 20f
|
||||
|
||||
/* start the specified thread */
|
||||
LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
|
||||
ld r4, 0(r5)
|
||||
LOAD_REG_ADDR(r5, DOTSYM(fsl_secondary_thread_init))
|
||||
bl book3e_start_thread
|
||||
|
||||
/* stop the current thread */
|
||||
|
@ -33,6 +33,9 @@
|
||||
* and then arrange for the ftrace function to be called.
|
||||
*/
|
||||
.macro ftrace_regs_entry allregs
|
||||
/* Create a minimal stack frame for representing B */
|
||||
PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1)
|
||||
|
||||
/* Create our stack frame + pt_regs */
|
||||
PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
@ -42,7 +45,7 @@
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Save the original return address in A's stack frame */
|
||||
std r0, LRSAVE+SWITCH_FRAME_SIZE(r1)
|
||||
std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
|
||||
/* Ok to continue? */
|
||||
lbz r3, PACA_FTRACE_ENABLED(r13)
|
||||
cmpdi r3, 0
|
||||
@ -77,6 +80,8 @@
|
||||
mflr r7
|
||||
/* Save it as pt_regs->nip */
|
||||
PPC_STL r7, _NIP(r1)
|
||||
/* Also save it in B's stackframe header for proper unwind */
|
||||
PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
|
||||
/* Save the read LR in pt_regs->link */
|
||||
PPC_STL r0, _LINK(r1)
|
||||
|
||||
@ -142,7 +147,7 @@
|
||||
#endif
|
||||
|
||||
/* Pop our stack frame */
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH_64
|
||||
/* Based on the cmpd above, if the NIP was altered handle livepatch */
|
||||
|
@ -314,8 +314,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
|
||||
start = ALIGN_DOWN(start, page_size);
|
||||
if (altmap) {
|
||||
alt_start = altmap->base_pfn;
|
||||
alt_end = altmap->base_pfn + altmap->reserve +
|
||||
altmap->free + altmap->alloc + altmap->align;
|
||||
alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
|
||||
}
|
||||
|
||||
pr_debug("vmemmap_free %lx...%lx\n", start, end);
|
||||
|
@ -180,7 +180,7 @@ static void wake_hw_thread(void *info)
|
||||
unsigned long inia;
|
||||
int cpu = *(const int *)info;
|
||||
|
||||
inia = *(unsigned long *)fsl_secondary_thread_init;
|
||||
inia = ppc_function_entry(fsl_secondary_thread_init);
|
||||
book3e_start_thread(cpu_thread_in_core(cpu), inia);
|
||||
}
|
||||
#endif
|
||||
|
@ -26,8 +26,8 @@
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include <asm/early_ioremap.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/nvram.h>
|
||||
@ -182,7 +182,7 @@ static int __init via_calibrate_decr(void)
|
||||
return 0;
|
||||
}
|
||||
of_node_put(vias);
|
||||
via = ioremap(rsrc.start, resource_size(&rsrc));
|
||||
via = early_ioremap(rsrc.start, resource_size(&rsrc));
|
||||
if (via == NULL) {
|
||||
printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
|
||||
return 0;
|
||||
@ -207,7 +207,7 @@ static int __init via_calibrate_decr(void)
|
||||
|
||||
ppc_tb_freq = (dstart - dend) * 100 / 6;
|
||||
|
||||
iounmap(via);
|
||||
early_iounmap((void *)via, resource_size(&rsrc));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ typedef u64 phys_cpuid_t;
|
||||
#define PHYS_CPUID_INVALID INVALID_HARTID
|
||||
|
||||
/* ACPI table mapping after acpi_permanent_mmap is set */
|
||||
void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
|
||||
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
|
||||
#define acpi_os_ioremap acpi_os_ioremap
|
||||
|
||||
#define acpi_strict 1 /* No out-of-spec workarounds on RISC-V */
|
||||
|
@ -215,9 +215,9 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
|
||||
early_iounmap(map, size);
|
||||
}
|
||||
|
||||
void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return memremap(phys, size, MEMREMAP_WB);
|
||||
return (void __iomem *)memremap(phys, size, MEMREMAP_WB);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
|
@ -18,4 +18,6 @@ void arch_crash_save_vmcoreinfo(void)
|
||||
vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
|
||||
#endif
|
||||
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
|
||||
vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
|
||||
kernel_map.va_kernel_pa_offset);
|
||||
}
|
||||
|
@ -2593,6 +2593,13 @@ config CPU_IBRS_ENTRY
|
||||
This mitigates both spectre_v2 and retbleed at great cost to
|
||||
performance.
|
||||
|
||||
config CPU_SRSO
|
||||
bool "Mitigate speculative RAS overflow on AMD"
|
||||
depends on CPU_SUP_AMD && X86_64 && RETHUNK
|
||||
default y
|
||||
help
|
||||
Enable the SRSO mitigation needed on AMD Zen1-4 machines.
|
||||
|
||||
config SLS
|
||||
bool "Mitigate Straight-Line-Speculation"
|
||||
depends on CC_HAS_SLS && X86_64
|
||||
@ -2603,6 +2610,25 @@ config SLS
|
||||
against straight line speculation. The kernel image might be slightly
|
||||
larger.
|
||||
|
||||
config GDS_FORCE_MITIGATION
|
||||
bool "Force GDS Mitigation"
|
||||
depends on CPU_SUP_INTEL
|
||||
default n
|
||||
help
|
||||
Gather Data Sampling (GDS) is a hardware vulnerability which allows
|
||||
unprivileged speculative access to data which was previously stored in
|
||||
vector registers.
|
||||
|
||||
This option is equivalent to setting gather_data_sampling=force on the
|
||||
command line. The microcode mitigation is used if present, otherwise
|
||||
AVX is disabled as a mitigation. On affected systems that are missing
|
||||
the microcode any userspace code that unconditionally uses AVX will
|
||||
break with this option set.
|
||||
|
||||
Setting this option on systems not vulnerable to GDS has no effect.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
endif
|
||||
|
||||
config ARCH_HAS_ADD_PAGES
|
||||
|
@ -107,7 +107,6 @@ static bool cpu_is_self(int cpu)
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
{
|
||||
struct hv_send_ipi_ex **arg;
|
||||
struct hv_send_ipi_ex *ipi_arg;
|
||||
unsigned long flags;
|
||||
int nr_bank = 0;
|
||||
@ -117,9 +116,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
|
||||
return false;
|
||||
|
||||
local_irq_save(flags);
|
||||
arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
ipi_arg = *this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
ipi_arg = *arg;
|
||||
if (unlikely(!ipi_arg))
|
||||
goto ipi_mask_ex_done;
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/apic.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/ibt.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
#include <asm/mshyperv.h>
|
||||
@ -471,6 +472,26 @@ void __init hyperv_init(void)
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
}
|
||||
|
||||
/*
|
||||
* Some versions of Hyper-V that provide IBT in guest VMs have a bug
|
||||
* in that there's no ENDBR64 instruction at the entry to the
|
||||
* hypercall page. Because hypercalls are invoked via an indirect call
|
||||
* to the hypercall page, all hypercall attempts fail when IBT is
|
||||
* enabled, and Linux panics. For such buggy versions, disable IBT.
|
||||
*
|
||||
* Fixed versions of Hyper-V always provide ENDBR64 on the hypercall
|
||||
* page, so if future Linux kernel versions enable IBT for 32-bit
|
||||
* builds, additional hypercall page hackery will be required here
|
||||
* to provide an ENDBR32.
|
||||
*/
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
if (cpu_feature_enabled(X86_FEATURE_IBT) &&
|
||||
*(u32 *)hv_hypercall_pg != gen_endbr()) {
|
||||
setup_clear_cpu_cap(X86_FEATURE_IBT);
|
||||
pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* hyperv_init() is called before LAPIC is initialized: see
|
||||
* apic_intr_mode_init() -> x86_platform.apic_post_init() and
|
||||
|
@ -25,6 +25,10 @@ void __init hv_vtl_init_platform(void)
|
||||
x86_init.irqs.pre_vector_init = x86_init_noop;
|
||||
x86_init.timers.timer_init = x86_init_noop;
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
|
||||
|
||||
x86_platform.get_wallclock = get_rtc_noop;
|
||||
x86_platform.set_wallclock = set_rtc_noop;
|
||||
x86_platform.get_nmi_reason = hv_get_nmi_reason;
|
||||
|
@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
|
||||
static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
|
||||
enum hv_mem_host_visibility visibility)
|
||||
{
|
||||
struct hv_gpa_range_for_visibility **input_pcpu, *input;
|
||||
struct hv_gpa_range_for_visibility *input;
|
||||
u16 pages_processed;
|
||||
u64 hv_status;
|
||||
unsigned long flags;
|
||||
@ -263,9 +263,8 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
input_pcpu = (struct hv_gpa_range_for_visibility **)
|
||||
this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
input = *input_pcpu;
|
||||
input = *this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
if (unlikely(!input)) {
|
||||
local_irq_restore(flags);
|
||||
return -EINVAL;
|
||||
|
@ -61,7 +61,6 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
|
||||
const struct flush_tlb_info *info)
|
||||
{
|
||||
int cpu, vcpu, gva_n, max_gvas;
|
||||
struct hv_tlb_flush **flush_pcpu;
|
||||
struct hv_tlb_flush *flush;
|
||||
u64 status;
|
||||
unsigned long flags;
|
||||
@ -74,10 +73,7 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
flush_pcpu = (struct hv_tlb_flush **)
|
||||
this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
flush = *flush_pcpu;
|
||||
flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
if (unlikely(!flush)) {
|
||||
local_irq_restore(flags);
|
||||
@ -178,17 +174,13 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
|
||||
const struct flush_tlb_info *info)
|
||||
{
|
||||
int nr_bank = 0, max_gvas, gva_n;
|
||||
struct hv_tlb_flush_ex **flush_pcpu;
|
||||
struct hv_tlb_flush_ex *flush;
|
||||
u64 status;
|
||||
|
||||
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
|
||||
return HV_STATUS_INVALID_PARAMETER;
|
||||
|
||||
flush_pcpu = (struct hv_tlb_flush_ex **)
|
||||
this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
flush = *flush_pcpu;
|
||||
flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
if (info->mm) {
|
||||
/*
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
int hyperv_flush_guest_mapping(u64 as)
|
||||
{
|
||||
struct hv_guest_mapping_flush **flush_pcpu;
|
||||
struct hv_guest_mapping_flush *flush;
|
||||
u64 status;
|
||||
unsigned long flags;
|
||||
@ -30,10 +29,7 @@ int hyperv_flush_guest_mapping(u64 as)
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
flush_pcpu = (struct hv_guest_mapping_flush **)
|
||||
this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
flush = *flush_pcpu;
|
||||
flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
if (unlikely(!flush)) {
|
||||
local_irq_restore(flags);
|
||||
@ -90,7 +86,6 @@ EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
|
||||
int hyperv_flush_guest_mapping_range(u64 as,
|
||||
hyperv_fill_flush_list_func fill_flush_list_func, void *data)
|
||||
{
|
||||
struct hv_guest_mapping_flush_list **flush_pcpu;
|
||||
struct hv_guest_mapping_flush_list *flush;
|
||||
u64 status;
|
||||
unsigned long flags;
|
||||
@ -102,10 +97,8 @@ int hyperv_flush_guest_mapping_range(u64 as,
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
flush_pcpu = (struct hv_guest_mapping_flush_list **)
|
||||
this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
flush = *flush_pcpu;
|
||||
if (unlikely(!flush)) {
|
||||
local_irq_restore(flags);
|
||||
goto fault;
|
||||
|
@ -14,7 +14,7 @@
|
||||
* Defines x86 CPU feature bits
|
||||
*/
|
||||
#define NCAPINTS 21 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 1 /* N 32-bit bug flags */
|
||||
#define NBUGINTS 2 /* N 32-bit bug flags */
|
||||
|
||||
/*
|
||||
* Note: If the comment begins with a quoted string, that string is used
|
||||
@ -309,6 +309,10 @@
|
||||
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
|
||||
|
||||
#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
|
||||
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
|
||||
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
||||
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
|
||||
@ -442,6 +446,10 @@
|
||||
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
|
||||
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
|
||||
|
||||
#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
|
||||
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
||||
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
@ -483,5 +491,9 @@
|
||||
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
|
||||
#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */
|
||||
|
||||
/* BUG word 2 */
|
||||
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
|
||||
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/msi.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/paravirt.h>
|
||||
|
@ -57,6 +57,7 @@
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
||||
#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */
|
||||
|
||||
#define MSR_PPIN_CTL 0x0000004e
|
||||
#define MSR_PPIN 0x0000004f
|
||||
@ -155,6 +156,15 @@
|
||||
* Not susceptible to Post-Barrier
|
||||
* Return Stack Buffer Predictions.
|
||||
*/
|
||||
#define ARCH_CAP_GDS_CTRL BIT(25) /*
|
||||
* CPU is vulnerable to Gather
|
||||
* Data Sampling (GDS) and
|
||||
* has controls for mitigation.
|
||||
*/
|
||||
#define ARCH_CAP_GDS_NO BIT(26) /*
|
||||
* CPU is not vulnerable to Gather
|
||||
* Data Sampling (GDS).
|
||||
*/
|
||||
|
||||
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
|
||||
* IA32_XAPIC_DISABLE_STATUS MSR
|
||||
@ -178,6 +188,8 @@
|
||||
#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
|
||||
#define RTM_ALLOW BIT(1) /* TSX development mode */
|
||||
#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
|
||||
#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */
|
||||
#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */
|
||||
|
||||
#define MSR_IA32_SYSENTER_CS 0x00000174
|
||||
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
||||
|
@ -211,7 +211,8 @@
|
||||
* eventually turn into it's own annotation.
|
||||
*/
|
||||
.macro VALIDATE_UNRET_END
|
||||
#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
|
||||
#if defined(CONFIG_NOINSTR_VALIDATION) && \
|
||||
(defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
|
||||
ANNOTATE_RETPOLINE_SAFE
|
||||
nop
|
||||
#endif
|
||||
@ -289,13 +290,18 @@
|
||||
*/
|
||||
.macro UNTRAIN_RET
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
|
||||
defined(CONFIG_CALL_DEPTH_TRACKING)
|
||||
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
|
||||
VALIDATE_UNRET_END
|
||||
ALTERNATIVE_3 "", \
|
||||
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
|
||||
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
|
||||
"call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro UNTRAIN_RET_FROM_CALL
|
||||
@ -307,6 +313,11 @@
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
|
||||
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
|
||||
"call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
||||
@ -332,6 +343,8 @@ extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
|
||||
|
||||
extern void __x86_return_thunk(void);
|
||||
extern void zen_untrain_ret(void);
|
||||
extern void srso_untrain_ret(void);
|
||||
extern void srso_untrain_ret_alias(void);
|
||||
extern void entry_ibpb(void);
|
||||
|
||||
#ifdef CONFIG_CALL_THUNKS
|
||||
@ -479,11 +492,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
extern u64 x86_pred_cmd;
|
||||
|
||||
static inline void indirect_branch_prediction_barrier(void)
|
||||
{
|
||||
u64 val = PRED_CMD_IBPB;
|
||||
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
|
||||
}
|
||||
|
||||
/* The Intel SPEC CTRL MSR base value cache */
|
||||
|
@ -682,9 +682,13 @@ extern u16 get_llc_id(unsigned int cpu);
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
extern u32 amd_get_nodes_per_socket(void);
|
||||
extern u32 amd_get_highest_perf(void);
|
||||
extern bool cpu_has_ibpb_brtype_microcode(void);
|
||||
extern void amd_clear_divider(void);
|
||||
#else
|
||||
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
|
||||
static inline u32 amd_get_highest_perf(void) { return 0; }
|
||||
static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
|
||||
static inline void amd_clear_divider(void) { }
|
||||
#endif
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
|
@ -75,6 +75,10 @@ static const int amd_zenbleed[] =
|
||||
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
|
||||
|
||||
static const int amd_div0[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
|
||||
|
||||
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
{
|
||||
int osvw_id = *erratum++;
|
||||
@ -1130,6 +1134,11 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
||||
|
||||
zenbleed_check(c);
|
||||
|
||||
if (cpu_has_amd_erratum(c, amd_div0)) {
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -1290,3 +1299,32 @@ void amd_check_microcode(void)
|
||||
{
|
||||
on_each_cpu(zenbleed_check_cpu, NULL, 1);
|
||||
}
|
||||
|
||||
bool cpu_has_ibpb_brtype_microcode(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86) {
|
||||
/* Zen1/2 IBPB flushes branch type predictions too. */
|
||||
case 0x17:
|
||||
return boot_cpu_has(X86_FEATURE_AMD_IBPB);
|
||||
case 0x19:
|
||||
/* Poke the MSR bit on Zen3/4 to check its presence. */
|
||||
if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SBPB);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a DIV 0/1 insn to clear any division data from previous DIV
|
||||
* operations.
|
||||
*/
|
||||
void noinstr amd_clear_divider(void)
|
||||
{
|
||||
asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
|
||||
:: "a" (0), "d" (0), "r" (1));
|
||||
}
|
||||
|
@ -47,6 +47,8 @@ static void __init taa_select_mitigation(void);
|
||||
static void __init mmio_select_mitigation(void);
|
||||
static void __init srbds_select_mitigation(void);
|
||||
static void __init l1d_flush_select_mitigation(void);
|
||||
static void __init srso_select_mitigation(void);
|
||||
static void __init gds_select_mitigation(void);
|
||||
|
||||
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
|
||||
u64 x86_spec_ctrl_base;
|
||||
@ -56,6 +58,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
|
||||
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
|
||||
EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||
@ -160,6 +165,8 @@ void __init cpu_select_mitigations(void)
|
||||
md_clear_select_mitigation();
|
||||
srbds_select_mitigation();
|
||||
l1d_flush_select_mitigation();
|
||||
srso_select_mitigation();
|
||||
gds_select_mitigation();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -645,6 +652,149 @@ static int __init l1d_flush_parse_cmdline(char *str)
|
||||
}
|
||||
early_param("l1d_flush", l1d_flush_parse_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "GDS: " fmt
|
||||
|
||||
enum gds_mitigations {
|
||||
GDS_MITIGATION_OFF,
|
||||
GDS_MITIGATION_UCODE_NEEDED,
|
||||
GDS_MITIGATION_FORCE,
|
||||
GDS_MITIGATION_FULL,
|
||||
GDS_MITIGATION_FULL_LOCKED,
|
||||
GDS_MITIGATION_HYPERVISOR,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION)
|
||||
static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
|
||||
#else
|
||||
static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
|
||||
#endif
|
||||
|
||||
static const char * const gds_strings[] = {
|
||||
[GDS_MITIGATION_OFF] = "Vulnerable",
|
||||
[GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
|
||||
[GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
|
||||
[GDS_MITIGATION_FULL] = "Mitigation: Microcode",
|
||||
[GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
|
||||
[GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
|
||||
};
|
||||
|
||||
bool gds_ucode_mitigated(void)
|
||||
{
|
||||
return (gds_mitigation == GDS_MITIGATION_FULL ||
|
||||
gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
|
||||
|
||||
void update_gds_msr(void)
|
||||
{
|
||||
u64 mcu_ctrl_after;
|
||||
u64 mcu_ctrl;
|
||||
|
||||
switch (gds_mitigation) {
|
||||
case GDS_MITIGATION_OFF:
|
||||
rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
|
||||
mcu_ctrl |= GDS_MITG_DIS;
|
||||
break;
|
||||
case GDS_MITIGATION_FULL_LOCKED:
|
||||
/*
|
||||
* The LOCKED state comes from the boot CPU. APs might not have
|
||||
* the same state. Make sure the mitigation is enabled on all
|
||||
* CPUs.
|
||||
*/
|
||||
case GDS_MITIGATION_FULL:
|
||||
rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
|
||||
mcu_ctrl &= ~GDS_MITG_DIS;
|
||||
break;
|
||||
case GDS_MITIGATION_FORCE:
|
||||
case GDS_MITIGATION_UCODE_NEEDED:
|
||||
case GDS_MITIGATION_HYPERVISOR:
|
||||
return;
|
||||
};
|
||||
|
||||
wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
|
||||
|
||||
/*
|
||||
* Check to make sure that the WRMSR value was not ignored. Writes to
|
||||
* GDS_MITG_DIS will be ignored if this processor is locked but the boot
|
||||
* processor was not.
|
||||
*/
|
||||
rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
|
||||
WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
|
||||
}
|
||||
|
||||
static void __init gds_select_mitigation(void)
|
||||
{
|
||||
u64 mcu_ctrl;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_GDS))
|
||||
return;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||
gds_mitigation = GDS_MITIGATION_HYPERVISOR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cpu_mitigations_off())
|
||||
gds_mitigation = GDS_MITIGATION_OFF;
|
||||
/* Will verify below that mitigation _can_ be disabled */
|
||||
|
||||
/* No microcode */
|
||||
if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
|
||||
if (gds_mitigation == GDS_MITIGATION_FORCE) {
|
||||
/*
|
||||
* This only needs to be done on the boot CPU so do it
|
||||
* here rather than in update_gds_msr()
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX);
|
||||
pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
|
||||
} else {
|
||||
gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Microcode has mitigation, use it */
|
||||
if (gds_mitigation == GDS_MITIGATION_FORCE)
|
||||
gds_mitigation = GDS_MITIGATION_FULL;
|
||||
|
||||
rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
|
||||
if (mcu_ctrl & GDS_MITG_LOCKED) {
|
||||
if (gds_mitigation == GDS_MITIGATION_OFF)
|
||||
pr_warn("Mitigation locked. Disable failed.\n");
|
||||
|
||||
/*
|
||||
* The mitigation is selected from the boot CPU. All other CPUs
|
||||
* _should_ have the same state. If the boot CPU isn't locked
|
||||
* but others are then update_gds_msr() will WARN() of the state
|
||||
* mismatch. If the boot CPU is locked update_gds_msr() will
|
||||
* ensure the other CPUs have the mitigation enabled.
|
||||
*/
|
||||
gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
|
||||
}
|
||||
|
||||
update_gds_msr();
|
||||
out:
|
||||
pr_info("%s\n", gds_strings[gds_mitigation]);
|
||||
}
|
||||
|
||||
static int __init gds_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_GDS))
|
||||
return 0;
|
||||
|
||||
if (!strcmp(str, "off"))
|
||||
gds_mitigation = GDS_MITIGATION_OFF;
|
||||
else if (!strcmp(str, "force"))
|
||||
gds_mitigation = GDS_MITIGATION_FORCE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("gather_data_sampling", gds_parse_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V1 : " fmt
|
||||
|
||||
@ -2187,6 +2337,165 @@ static int __init l1tf_cmdline(char *str)
|
||||
}
|
||||
early_param("l1tf", l1tf_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
|
||||
|
||||
enum srso_mitigation {
|
||||
SRSO_MITIGATION_NONE,
|
||||
SRSO_MITIGATION_MICROCODE,
|
||||
SRSO_MITIGATION_SAFE_RET,
|
||||
SRSO_MITIGATION_IBPB,
|
||||
SRSO_MITIGATION_IBPB_ON_VMEXIT,
|
||||
};
|
||||
|
||||
enum srso_mitigation_cmd {
|
||||
SRSO_CMD_OFF,
|
||||
SRSO_CMD_MICROCODE,
|
||||
SRSO_CMD_SAFE_RET,
|
||||
SRSO_CMD_IBPB,
|
||||
SRSO_CMD_IBPB_ON_VMEXIT,
|
||||
};
|
||||
|
||||
static const char * const srso_strings[] = {
|
||||
[SRSO_MITIGATION_NONE] = "Vulnerable",
|
||||
[SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode",
|
||||
[SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET",
|
||||
[SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
|
||||
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
|
||||
};
|
||||
|
||||
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
|
||||
static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
|
||||
|
||||
static int __init srso_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "off"))
|
||||
srso_cmd = SRSO_CMD_OFF;
|
||||
else if (!strcmp(str, "microcode"))
|
||||
srso_cmd = SRSO_CMD_MICROCODE;
|
||||
else if (!strcmp(str, "safe-ret"))
|
||||
srso_cmd = SRSO_CMD_SAFE_RET;
|
||||
else if (!strcmp(str, "ibpb"))
|
||||
srso_cmd = SRSO_CMD_IBPB;
|
||||
else if (!strcmp(str, "ibpb-vmexit"))
|
||||
srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
|
||||
else
|
||||
pr_err("Ignoring unknown SRSO option (%s).", str);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("spec_rstack_overflow", srso_parse_cmdline);
|
||||
|
||||
#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
|
||||
|
||||
static void __init srso_select_mitigation(void)
|
||||
{
|
||||
bool has_microcode;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
|
||||
goto pred_cmd;
|
||||
|
||||
/*
|
||||
* The first check is for the kernel running as a guest in order
|
||||
* for guests to verify whether IBPB is a viable mitigation.
|
||||
*/
|
||||
has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
|
||||
if (!has_microcode) {
|
||||
pr_warn("IBPB-extending microcode not applied!\n");
|
||||
pr_warn(SRSO_NOTICE);
|
||||
} else {
|
||||
/*
|
||||
* Enable the synthetic (even if in a real CPUID leaf)
|
||||
* flags for guests.
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
|
||||
|
||||
/*
|
||||
* Zen1/2 with SMT off aren't vulnerable after the right
|
||||
* IBPB microcode has been applied.
|
||||
*/
|
||||
if ((boot_cpu_data.x86 < 0x19) &&
|
||||
(!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
|
||||
}
|
||||
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
|
||||
if (has_microcode) {
|
||||
pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
|
||||
srso_mitigation = SRSO_MITIGATION_IBPB;
|
||||
goto pred_cmd;
|
||||
}
|
||||
}
|
||||
|
||||
switch (srso_cmd) {
|
||||
case SRSO_CMD_OFF:
|
||||
return;
|
||||
|
||||
case SRSO_CMD_MICROCODE:
|
||||
if (has_microcode) {
|
||||
srso_mitigation = SRSO_MITIGATION_MICROCODE;
|
||||
pr_warn(SRSO_NOTICE);
|
||||
}
|
||||
break;
|
||||
|
||||
case SRSO_CMD_SAFE_RET:
|
||||
if (IS_ENABLED(CONFIG_CPU_SRSO)) {
|
||||
/*
|
||||
* Enable the return thunk for generated code
|
||||
* like ftrace, static_call, etc.
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
|
||||
if (boot_cpu_data.x86 == 0x19)
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
|
||||
else
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO);
|
||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
|
||||
goto pred_cmd;
|
||||
}
|
||||
break;
|
||||
|
||||
case SRSO_CMD_IBPB:
|
||||
if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
|
||||
if (has_microcode) {
|
||||
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
|
||||
srso_mitigation = SRSO_MITIGATION_IBPB;
|
||||
}
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
|
||||
goto pred_cmd;
|
||||
}
|
||||
break;
|
||||
|
||||
case SRSO_CMD_IBPB_ON_VMEXIT:
|
||||
if (IS_ENABLED(CONFIG_CPU_SRSO)) {
|
||||
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
|
||||
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
|
||||
}
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
|
||||
goto pred_cmd;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
|
||||
|
||||
pred_cmd:
|
||||
if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
|
||||
boot_cpu_has(X86_FEATURE_SBPB))
|
||||
x86_pred_cmd = PRED_CMD_SBPB;
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) fmt
|
||||
|
||||
@ -2385,6 +2694,18 @@ static ssize_t retbleed_show_state(char *buf)
|
||||
return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t srso_show_state(char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%s%s\n",
|
||||
srso_strings[srso_mitigation],
|
||||
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
|
||||
}
|
||||
|
||||
static ssize_t gds_show_state(char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
@ -2434,6 +2755,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
case X86_BUG_RETBLEED:
|
||||
return retbleed_show_state(buf);
|
||||
|
||||
case X86_BUG_SRSO:
|
||||
return srso_show_state(buf);
|
||||
|
||||
case X86_BUG_GDS:
|
||||
return gds_show_state(buf);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -2498,4 +2825,14 @@ ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, cha
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
|
||||
}
|
||||
#endif
|
||||
|
@ -1250,6 +1250,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
#define RETBLEED BIT(3)
|
||||
/* CPU is affected by SMT (cross-thread) return predictions */
|
||||
#define SMT_RSB BIT(4)
|
||||
/* CPU is affected by SRSO */
|
||||
#define SRSO BIT(5)
|
||||
/* CPU is affected by GDS */
|
||||
#define GDS BIT(6)
|
||||
|
||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
||||
@ -1262,27 +1266,30 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
|
||||
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
|
||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
|
||||
|
||||
VULNBL_AMD(0x15, RETBLEED),
|
||||
VULNBL_AMD(0x16, RETBLEED),
|
||||
VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
|
||||
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
|
||||
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
|
||||
VULNBL_AMD(0x19, SRSO),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1406,6 +1413,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
|
||||
setup_force_cpu_bug(X86_BUG_SMT_RSB);
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, SRSO))
|
||||
setup_force_cpu_bug(X86_BUG_SRSO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if CPU is vulnerable to GDS. If running in a virtual machine on
|
||||
* an affected processor, the VMM may have disabled the use of GATHER by
|
||||
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
|
||||
* which means that AVX will be disabled.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX))
|
||||
setup_force_cpu_bug(X86_BUG_GDS);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
@ -1962,6 +1984,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
|
||||
validate_apic_and_package_id(c);
|
||||
x86_spec_ctrl_setup_ap();
|
||||
update_srbds_msr();
|
||||
if (boot_cpu_has_bug(X86_BUG_GDS))
|
||||
update_gds_msr();
|
||||
|
||||
tsx_ap_init();
|
||||
}
|
||||
|
@ -83,6 +83,7 @@ void cpu_select_mitigations(void);
|
||||
|
||||
extern void x86_spec_ctrl_setup_ap(void);
|
||||
extern void update_srbds_msr(void);
|
||||
extern void update_gds_msr(void);
|
||||
|
||||
extern enum spectre_v2_mitigation spectre_v2_enabled;
|
||||
|
||||
|
@ -206,6 +206,8 @@ DEFINE_IDTENTRY(exc_divide_error)
|
||||
{
|
||||
do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
|
||||
FPE_INTDIV, error_get_trap_addr(regs));
|
||||
|
||||
amd_clear_divider();
|
||||
}
|
||||
|
||||
DEFINE_IDTENTRY(exc_overflow)
|
||||
|
@ -134,13 +134,27 @@ SECTIONS
|
||||
SOFTIRQENTRY_TEXT
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
__indirect_thunk_start = .;
|
||||
*(.text.__x86.*)
|
||||
*(.text.__x86.indirect_thunk)
|
||||
*(.text.__x86.return_thunk)
|
||||
__indirect_thunk_end = .;
|
||||
#endif
|
||||
STATIC_CALL_TEXT
|
||||
|
||||
ALIGN_ENTRY_TEXT_BEGIN
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
*(.text.__x86.rethunk_untrain)
|
||||
#endif
|
||||
|
||||
ENTRY_TEXT
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
/*
|
||||
* See the comment above srso_untrain_ret_alias()'s
|
||||
* definition.
|
||||
*/
|
||||
. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
|
||||
*(.text.__x86.rethunk_safe)
|
||||
#endif
|
||||
ALIGN_ENTRY_TEXT_END
|
||||
*(.gnu.warning)
|
||||
|
||||
@ -509,7 +523,18 @@ INIT_PER_CPU(irq_stack_backing_store);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
. = ASSERT((__x86_return_thunk & 0x3f) == 0, "__x86_return_thunk not cacheline-aligned");
|
||||
. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
|
||||
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
/*
|
||||
* GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
|
||||
* of the two function addresses:
|
||||
*/
|
||||
. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
|
||||
(srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
|
||||
"SRSO function pair won't alias");
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
@ -729,6 +729,9 @@ void kvm_set_cpu_caps(void)
|
||||
F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */
|
||||
);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_NO))
|
||||
kvm_cpu_cap_set(X86_FEATURE_SRSO_NO);
|
||||
|
||||
kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
|
||||
F(PERFMON_V2)
|
||||
);
|
||||
|
@ -2417,15 +2417,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
||||
*/
|
||||
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
|
||||
|
||||
vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
|
||||
BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
|
||||
memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
|
||||
|
||||
svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
|
||||
vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
|
||||
|
||||
if (ghcb_xcr0_is_valid(ghcb)) {
|
||||
svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
|
||||
|
||||
if (kvm_ghcb_xcr0_is_valid(svm)) {
|
||||
vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
|
||||
kvm_update_cpuid_runtime(vcpu);
|
||||
}
|
||||
@ -2436,84 +2439,88 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
||||
control->exit_code_hi = upper_32_bits(exit_code);
|
||||
control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
|
||||
control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
|
||||
svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
|
||||
|
||||
/* Clear the valid entries fields */
|
||||
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
||||
}
|
||||
|
||||
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
|
||||
{
|
||||
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
|
||||
}
|
||||
|
||||
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct ghcb *ghcb;
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
u64 exit_code;
|
||||
u64 reason;
|
||||
|
||||
ghcb = svm->sev_es.ghcb;
|
||||
|
||||
/*
|
||||
* Retrieve the exit code now even though it may not be marked valid
|
||||
* as it could help with debugging.
|
||||
*/
|
||||
exit_code = ghcb_get_sw_exit_code(ghcb);
|
||||
exit_code = kvm_ghcb_get_sw_exit_code(control);
|
||||
|
||||
/* Only GHCB Usage code 0 is supported */
|
||||
if (ghcb->ghcb_usage) {
|
||||
if (svm->sev_es.ghcb->ghcb_usage) {
|
||||
reason = GHCB_ERR_INVALID_USAGE;
|
||||
goto vmgexit_err;
|
||||
}
|
||||
|
||||
reason = GHCB_ERR_MISSING_INPUT;
|
||||
|
||||
if (!ghcb_sw_exit_code_is_valid(ghcb) ||
|
||||
!ghcb_sw_exit_info_1_is_valid(ghcb) ||
|
||||
!ghcb_sw_exit_info_2_is_valid(ghcb))
|
||||
if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
|
||||
!kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
|
||||
!kvm_ghcb_sw_exit_info_2_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
|
||||
switch (ghcb_get_sw_exit_code(ghcb)) {
|
||||
switch (exit_code) {
|
||||
case SVM_EXIT_READ_DR7:
|
||||
break;
|
||||
case SVM_EXIT_WRITE_DR7:
|
||||
if (!ghcb_rax_is_valid(ghcb))
|
||||
if (!kvm_ghcb_rax_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
break;
|
||||
case SVM_EXIT_RDTSC:
|
||||
break;
|
||||
case SVM_EXIT_RDPMC:
|
||||
if (!ghcb_rcx_is_valid(ghcb))
|
||||
if (!kvm_ghcb_rcx_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
break;
|
||||
case SVM_EXIT_CPUID:
|
||||
if (!ghcb_rax_is_valid(ghcb) ||
|
||||
!ghcb_rcx_is_valid(ghcb))
|
||||
if (!kvm_ghcb_rax_is_valid(svm) ||
|
||||
!kvm_ghcb_rcx_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
if (ghcb_get_rax(ghcb) == 0xd)
|
||||
if (!ghcb_xcr0_is_valid(ghcb))
|
||||
if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
|
||||
if (!kvm_ghcb_xcr0_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
break;
|
||||
case SVM_EXIT_INVD:
|
||||
break;
|
||||
case SVM_EXIT_IOIO:
|
||||
if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
|
||||
if (!ghcb_sw_scratch_is_valid(ghcb))
|
||||
if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
|
||||
if (!kvm_ghcb_sw_scratch_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
} else {
|
||||
if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
|
||||
if (!ghcb_rax_is_valid(ghcb))
|
||||
if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
|
||||
if (!kvm_ghcb_rax_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
}
|
||||
break;
|
||||
case SVM_EXIT_MSR:
|
||||
if (!ghcb_rcx_is_valid(ghcb))
|
||||
if (!kvm_ghcb_rcx_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
if (ghcb_get_sw_exit_info_1(ghcb)) {
|
||||
if (!ghcb_rax_is_valid(ghcb) ||
|
||||
!ghcb_rdx_is_valid(ghcb))
|
||||
if (control->exit_info_1) {
|
||||
if (!kvm_ghcb_rax_is_valid(svm) ||
|
||||
!kvm_ghcb_rdx_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
}
|
||||
break;
|
||||
case SVM_EXIT_VMMCALL:
|
||||
if (!ghcb_rax_is_valid(ghcb) ||
|
||||
!ghcb_cpl_is_valid(ghcb))
|
||||
if (!kvm_ghcb_rax_is_valid(svm) ||
|
||||
!kvm_ghcb_cpl_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
break;
|
||||
case SVM_EXIT_RDTSCP:
|
||||
@ -2521,19 +2528,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
||||
case SVM_EXIT_WBINVD:
|
||||
break;
|
||||
case SVM_EXIT_MONITOR:
|
||||
if (!ghcb_rax_is_valid(ghcb) ||
|
||||
!ghcb_rcx_is_valid(ghcb) ||
|
||||
!ghcb_rdx_is_valid(ghcb))
|
||||
if (!kvm_ghcb_rax_is_valid(svm) ||
|
||||
!kvm_ghcb_rcx_is_valid(svm) ||
|
||||
!kvm_ghcb_rdx_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
break;
|
||||
case SVM_EXIT_MWAIT:
|
||||
if (!ghcb_rax_is_valid(ghcb) ||
|
||||
!ghcb_rcx_is_valid(ghcb))
|
||||
if (!kvm_ghcb_rax_is_valid(svm) ||
|
||||
!kvm_ghcb_rcx_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
break;
|
||||
case SVM_VMGEXIT_MMIO_READ:
|
||||
case SVM_VMGEXIT_MMIO_WRITE:
|
||||
if (!ghcb_sw_scratch_is_valid(ghcb))
|
||||
if (!kvm_ghcb_sw_scratch_is_valid(svm))
|
||||
goto vmgexit_err;
|
||||
break;
|
||||
case SVM_VMGEXIT_NMI_COMPLETE:
|
||||
@ -2549,11 +2556,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
||||
return 0;
|
||||
|
||||
vmgexit_err:
|
||||
vcpu = &svm->vcpu;
|
||||
|
||||
if (reason == GHCB_ERR_INVALID_USAGE) {
|
||||
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
|
||||
ghcb->ghcb_usage);
|
||||
svm->sev_es.ghcb->ghcb_usage);
|
||||
} else if (reason == GHCB_ERR_INVALID_EVENT) {
|
||||
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
|
||||
exit_code);
|
||||
@ -2563,11 +2568,8 @@ vmgexit_err:
|
||||
dump_ghcb(svm);
|
||||
}
|
||||
|
||||
/* Clear the valid entries fields */
|
||||
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
||||
|
||||
ghcb_set_sw_exit_info_1(ghcb, 2);
|
||||
ghcb_set_sw_exit_info_2(ghcb, reason);
|
||||
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
|
||||
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason);
|
||||
|
||||
/* Resume the guest to "return" the error code. */
|
||||
return 1;
|
||||
@ -2586,7 +2588,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
||||
*/
|
||||
if (svm->sev_es.ghcb_sa_sync) {
|
||||
kvm_write_guest(svm->vcpu.kvm,
|
||||
ghcb_get_sw_scratch(svm->sev_es.ghcb),
|
||||
svm->sev_es.sw_scratch,
|
||||
svm->sev_es.ghcb_sa,
|
||||
svm->sev_es.ghcb_sa_len);
|
||||
svm->sev_es.ghcb_sa_sync = false;
|
||||
@ -2632,12 +2634,11 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
|
||||
static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
||||
{
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
struct ghcb *ghcb = svm->sev_es.ghcb;
|
||||
u64 ghcb_scratch_beg, ghcb_scratch_end;
|
||||
u64 scratch_gpa_beg, scratch_gpa_end;
|
||||
void *scratch_va;
|
||||
|
||||
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
|
||||
scratch_gpa_beg = svm->sev_es.sw_scratch;
|
||||
if (!scratch_gpa_beg) {
|
||||
pr_err("vmgexit: scratch gpa not provided\n");
|
||||
goto e_scratch;
|
||||
@ -2708,8 +2709,8 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
||||
return 0;
|
||||
|
||||
e_scratch:
|
||||
ghcb_set_sw_exit_info_1(ghcb, 2);
|
||||
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
|
||||
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
|
||||
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -2822,7 +2823,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
u64 ghcb_gpa, exit_code;
|
||||
struct ghcb *ghcb;
|
||||
int ret;
|
||||
|
||||
/* Validate the GHCB */
|
||||
@ -2847,20 +2847,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
|
||||
ghcb = svm->sev_es.ghcb_map.hva;
|
||||
|
||||
trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
|
||||
|
||||
exit_code = ghcb_get_sw_exit_code(ghcb);
|
||||
trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
|
||||
|
||||
sev_es_sync_from_ghcb(svm);
|
||||
ret = sev_es_validate_vmgexit(svm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sev_es_sync_from_ghcb(svm);
|
||||
ghcb_set_sw_exit_info_1(ghcb, 0);
|
||||
ghcb_set_sw_exit_info_2(ghcb, 0);
|
||||
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
|
||||
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
|
||||
|
||||
exit_code = kvm_ghcb_get_sw_exit_code(control);
|
||||
switch (exit_code) {
|
||||
case SVM_VMGEXIT_MMIO_READ:
|
||||
ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
|
||||
@ -2898,13 +2896,13 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
case 1:
|
||||
/* Get AP jump table address */
|
||||
ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
|
||||
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
|
||||
break;
|
||||
default:
|
||||
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
|
||||
control->exit_info_1);
|
||||
ghcb_set_sw_exit_info_1(ghcb, 2);
|
||||
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
|
||||
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
|
||||
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
|
||||
}
|
||||
|
||||
ret = 1;
|
||||
|
@ -1498,7 +1498,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
if (sd->current_vmcb != svm->vmcb) {
|
||||
sd->current_vmcb = svm->vmcb;
|
||||
indirect_branch_prediction_barrier();
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
|
||||
indirect_branch_prediction_barrier();
|
||||
}
|
||||
if (kvm_vcpu_apicv_active(vcpu))
|
||||
avic_vcpu_load(vcpu, cpu);
|
||||
|
@ -190,10 +190,12 @@ struct vcpu_sev_es_state {
|
||||
/* SEV-ES support */
|
||||
struct sev_es_save_area *vmsa;
|
||||
struct ghcb *ghcb;
|
||||
u8 valid_bitmap[16];
|
||||
struct kvm_host_map ghcb_map;
|
||||
bool received_first_sipi;
|
||||
|
||||
/* SEV-ES scratch area support */
|
||||
u64 sw_scratch;
|
||||
void *ghcb_sa;
|
||||
u32 ghcb_sa_len;
|
||||
bool ghcb_sa_sync;
|
||||
@ -744,4 +746,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
|
||||
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
|
||||
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
|
||||
|
||||
#define DEFINE_KVM_GHCB_ACCESSORS(field) \
|
||||
static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
|
||||
{ \
|
||||
return test_bit(GHCB_BITMAP_IDX(field), \
|
||||
(unsigned long *)&svm->sev_es.valid_bitmap); \
|
||||
} \
|
||||
\
|
||||
static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
|
||||
{ \
|
||||
return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
|
||||
} \
|
||||
|
||||
DEFINE_KVM_GHCB_ACCESSORS(cpl)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(rax)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(rcx)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(rdx)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(rbx)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(rsi)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
|
||||
DEFINE_KVM_GHCB_ACCESSORS(xcr0)
|
||||
|
||||
#endif
|
||||
|
@ -224,6 +224,9 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
*/
|
||||
UNTRAIN_RET
|
||||
|
||||
/* SRSO */
|
||||
ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT
|
||||
|
||||
/*
|
||||
* Clear all general purpose registers except RSP and RAX to prevent
|
||||
* speculative use of the guest's values, even those that are reloaded
|
||||
|
@ -314,6 +314,8 @@ u64 __read_mostly host_xcr0;
|
||||
|
||||
static struct kmem_cache *x86_emulator_cache;
|
||||
|
||||
extern bool gds_ucode_mitigated(void);
|
||||
|
||||
/*
|
||||
* When called, it means the previous get/set msr reached an invalid msr.
|
||||
* Return true if we want to ignore/silent this failed msr access.
|
||||
@ -1616,7 +1618,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
|
||||
ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
|
||||
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
|
||||
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
|
||||
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO)
|
||||
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO)
|
||||
|
||||
static u64 kvm_get_arch_capabilities(void)
|
||||
{
|
||||
@ -1673,6 +1675,9 @@ static u64 kvm_get_arch_capabilities(void)
|
||||
*/
|
||||
}
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
|
||||
data |= ARCH_CAP_GDS_NO;
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <asm/unwind_hints.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/frame.h>
|
||||
#include <asm/nops.h>
|
||||
|
||||
.section .text.__x86.indirect_thunk
|
||||
|
||||
@ -131,6 +132,46 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
|
||||
*/
|
||||
#ifdef CONFIG_RETHUNK
|
||||
|
||||
/*
|
||||
* srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
|
||||
* special addresses:
|
||||
*
|
||||
* - srso_untrain_ret_alias() is 2M aligned
|
||||
* - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
|
||||
* and 20 in its virtual address are set (while those bits in the
|
||||
* srso_untrain_ret_alias() function are cleared).
|
||||
*
|
||||
* This guarantees that those two addresses will alias in the branch
|
||||
* target buffer of Zen3/4 generations, leading to any potential
|
||||
* poisoned entries at that BTB slot to get evicted.
|
||||
*
|
||||
* As a result, srso_safe_ret_alias() becomes a safe return.
|
||||
*/
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
.section .text.__x86.rethunk_untrain
|
||||
|
||||
SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_NOENDBR
|
||||
ASM_NOP2
|
||||
lfence
|
||||
jmp __x86_return_thunk
|
||||
SYM_FUNC_END(srso_untrain_ret_alias)
|
||||
__EXPORT_THUNK(srso_untrain_ret_alias)
|
||||
|
||||
.section .text.__x86.rethunk_safe
|
||||
#endif
|
||||
|
||||
/* Needs a definition for the __x86_return_thunk alternative below. */
|
||||
SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
add $8, %_ASM_SP
|
||||
UNWIND_HINT_FUNC
|
||||
#endif
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_safe_ret_alias)
|
||||
|
||||
.section .text.__x86.return_thunk
|
||||
|
||||
/*
|
||||
@ -143,7 +184,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
|
||||
* from re-poisioning the BTB prediction.
|
||||
*/
|
||||
.align 64
|
||||
.skip 64 - (__x86_return_thunk - zen_untrain_ret), 0xcc
|
||||
.skip 64 - (__ret - zen_untrain_ret), 0xcc
|
||||
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_NOENDBR
|
||||
/*
|
||||
@ -175,10 +216,10 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
* evicted, __x86_return_thunk will suffer Straight Line Speculation
|
||||
* which will be contained safely by the INT3.
|
||||
*/
|
||||
SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL)
|
||||
SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
SYM_CODE_END(__ret)
|
||||
|
||||
/*
|
||||
* Ensure the TEST decoding / BTB invalidation is complete.
|
||||
@ -189,11 +230,45 @@ SYM_CODE_END(__x86_return_thunk)
|
||||
* Jump back and execute the RET in the middle of the TEST instruction.
|
||||
* INT3 is for SLS protection.
|
||||
*/
|
||||
jmp __x86_return_thunk
|
||||
jmp __ret
|
||||
int3
|
||||
SYM_FUNC_END(zen_untrain_ret)
|
||||
__EXPORT_THUNK(zen_untrain_ret)
|
||||
|
||||
/*
|
||||
* SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
|
||||
* above. On kernel entry, srso_untrain_ret() is executed which is a
|
||||
*
|
||||
* movabs $0xccccccc308c48348,%rax
|
||||
*
|
||||
* and when the return thunk executes the inner label srso_safe_ret()
|
||||
* later, it is a stack manipulation and a RET which is mispredicted and
|
||||
* thus a "safe" one to use.
|
||||
*/
|
||||
.align 64
|
||||
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
|
||||
SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_NOENDBR
|
||||
.byte 0x48, 0xb8
|
||||
|
||||
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
|
||||
add $8, %_ASM_SP
|
||||
ret
|
||||
int3
|
||||
int3
|
||||
int3
|
||||
lfence
|
||||
call srso_safe_ret
|
||||
int3
|
||||
SYM_CODE_END(srso_safe_ret)
|
||||
SYM_FUNC_END(srso_untrain_ret)
|
||||
__EXPORT_THUNK(srso_untrain_ret)
|
||||
|
||||
SYM_FUNC_START(__x86_return_thunk)
|
||||
ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
|
||||
"call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
EXPORT_SYMBOL(__x86_return_thunk)
|
||||
|
||||
#endif /* CONFIG_RETHUNK */
|
||||
|
@ -1100,7 +1100,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
||||
}
|
||||
} else {
|
||||
sdev->sector_size = ata_id_logical_sector_size(dev->id);
|
||||
/*
|
||||
* Stop the drive on suspend but do not issue START STOP UNIT
|
||||
* on resume as this is not necessary and may fail: the device
|
||||
* will be woken up by ata_port_pm_resume() with a port reset
|
||||
* and device revalidation.
|
||||
*/
|
||||
sdev->manage_start_stop = 1;
|
||||
sdev->no_start_on_resume = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -577,6 +577,18 @@ ssize_t __weak cpu_show_retbleed(struct device *dev,
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_gds(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
@ -588,6 +600,8 @@ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
|
||||
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
|
||||
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
|
||||
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
|
||||
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
|
||||
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
@ -601,6 +615,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_srbds.attr,
|
||||
&dev_attr_mmio_stale_data.attr,
|
||||
&dev_attr_retbleed.attr,
|
||||
&dev_attr_spec_rstack_overflow.attr,
|
||||
&dev_attr_gather_data_sampling.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -3675,7 +3675,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
|
||||
ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
|
||||
RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
|
||||
RBD_LOCK_TAG, "", 0);
|
||||
if (ret)
|
||||
if (ret && ret != -EEXIST)
|
||||
return ret;
|
||||
|
||||
__rbd_lock(rbd_dev, cookie);
|
||||
@ -3878,7 +3878,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
|
||||
&rbd_dev->header_oloc, RBD_LOCK_NAME,
|
||||
&lock_type, &lock_tag, &lockers, &num_lockers);
|
||||
if (ret) {
|
||||
rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
|
||||
rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
@ -3940,8 +3940,10 @@ static int find_watcher(struct rbd_device *rbd_dev,
|
||||
ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
|
||||
&rbd_dev->header_oloc, &watchers,
|
||||
&num_watchers);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
|
||||
for (i = 0; i < num_watchers; i++) {
|
||||
@ -3985,8 +3987,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
|
||||
locker = refreshed_locker = NULL;
|
||||
|
||||
ret = rbd_lock(rbd_dev);
|
||||
if (ret != -EBUSY)
|
||||
if (!ret)
|
||||
goto out;
|
||||
if (ret != -EBUSY) {
|
||||
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* determine if the current lock holder is still alive */
|
||||
locker = get_lock_owner_info(rbd_dev);
|
||||
@ -4089,11 +4095,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
|
||||
|
||||
ret = rbd_try_lock(rbd_dev);
|
||||
if (ret < 0) {
|
||||
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
|
||||
if (ret == -EBLOCKLISTED)
|
||||
goto out;
|
||||
|
||||
ret = 1; /* request lock anyway */
|
||||
rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
if (ret > 0) {
|
||||
up_write(&rbd_dev->lock_rwsem);
|
||||
@ -6627,12 +6630,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
|
||||
cancel_delayed_work_sync(&rbd_dev->lock_dwork);
|
||||
if (!ret)
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
|
||||
return ret;
|
||||
rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The lock may have been released by now, unless automatic lock
|
||||
|
@ -510,70 +510,6 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some AMD fTPM versions may cause stutter
|
||||
* https://www.amd.com/en/support/kb/faq/pa-410
|
||||
*
|
||||
* Fixes are available in two series of fTPM firmware:
|
||||
* 6.x.y.z series: 6.0.18.6 +
|
||||
* 3.x.y.z series: 3.57.y.5 +
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
||||
{
|
||||
u32 val1, val2;
|
||||
u64 version;
|
||||
int ret;
|
||||
|
||||
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
|
||||
return false;
|
||||
|
||||
ret = tpm_request_locality(chip);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
|
||||
if (ret)
|
||||
goto release;
|
||||
if (val1 != 0x414D4400U /* AMD */) {
|
||||
ret = -ENODEV;
|
||||
goto release;
|
||||
}
|
||||
ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
|
||||
if (ret)
|
||||
goto release;
|
||||
ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
|
||||
|
||||
release:
|
||||
tpm_relinquish_locality(chip);
|
||||
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
version = ((u64)val1 << 32) | val2;
|
||||
if ((version >> 48) == 6) {
|
||||
if (version >= 0x0006000000180006ULL)
|
||||
return false;
|
||||
} else if ((version >> 48) == 3) {
|
||||
if (version >= 0x0003005700000005ULL)
|
||||
return false;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
dev_warn(&chip->dev,
|
||||
"AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
|
||||
version);
|
||||
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
{
|
||||
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
|
||||
@ -585,10 +521,20 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
return tpm_get_random(chip, data, max);
|
||||
}
|
||||
|
||||
static bool tpm_is_hwrng_enabled(struct tpm_chip *chip)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
|
||||
return false;
|
||||
if (tpm_is_firmware_upgrade(chip))
|
||||
return false;
|
||||
if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int tpm_add_hwrng(struct tpm_chip *chip)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
|
||||
tpm_amd_is_rng_defective(chip))
|
||||
if (!tpm_is_hwrng_enabled(chip))
|
||||
return 0;
|
||||
|
||||
snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
|
||||
@ -693,7 +639,7 @@ int tpm_chip_register(struct tpm_chip *chip)
|
||||
return 0;
|
||||
|
||||
out_hwrng:
|
||||
if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
|
||||
if (tpm_is_hwrng_enabled(chip))
|
||||
hwrng_unregister(&chip->hwrng);
|
||||
out_ppi:
|
||||
tpm_bios_log_teardown(chip);
|
||||
@ -718,8 +664,7 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
|
||||
void tpm_chip_unregister(struct tpm_chip *chip)
|
||||
{
|
||||
tpm_del_legacy_sysfs(chip);
|
||||
if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
|
||||
!tpm_amd_is_rng_defective(chip))
|
||||
if (tpm_is_hwrng_enabled(chip))
|
||||
hwrng_unregister(&chip->hwrng);
|
||||
tpm_bios_log_teardown(chip);
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
|
||||
|
@ -463,6 +463,28 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
|
||||
return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
|
||||
}
|
||||
|
||||
static int crb_check_flags(struct tpm_chip *chip)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = crb_request_locality(chip, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
|
||||
if (ret)
|
||||
goto release;
|
||||
|
||||
if (val == 0x414D4400U /* AMD */)
|
||||
chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
|
||||
|
||||
release:
|
||||
crb_relinquish_locality(chip, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct tpm_class_ops tpm_crb = {
|
||||
.flags = TPM_OPS_AUTO_STARTUP,
|
||||
.status = crb_status,
|
||||
@ -800,6 +822,14 @@ static int crb_acpi_add(struct acpi_device *device)
|
||||
chip->acpi_dev_handle = device->handle;
|
||||
chip->flags = TPM_CHIP_FLAG_TPM2;
|
||||
|
||||
rc = tpm_chip_bootstrap(chip);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = crb_check_flags(chip);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = tpm_chip_register(chip);
|
||||
|
||||
out:
|
||||
|
@ -162,6 +162,22 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L590"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "ThinkStation P620",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P620"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "TUXEDO InfinityBook S 15/17 Gen7",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TUXEDO InfinityBook S 15/17 Gen7"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "UPX-TGL",
|
||||
|
@ -444,6 +444,7 @@ config COMMON_CLK_BD718XX
|
||||
config COMMON_CLK_FIXED_MMIO
|
||||
bool "Clock driver for Memory Mapped Fixed values"
|
||||
depends on COMMON_CLK && OF
|
||||
depends on HAS_IOMEM
|
||||
help
|
||||
Support for Memory Mapped IO Fixed clocks
|
||||
|
||||
|
@ -291,7 +291,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
|
||||
anatop_base = devm_of_iomap(dev, np, 0, NULL);
|
||||
of_node_put(np);
|
||||
if (WARN_ON(IS_ERR(anatop_base))) {
|
||||
ret = PTR_ERR(base);
|
||||
ret = PTR_ERR(anatop_base);
|
||||
goto unregister_hws;
|
||||
}
|
||||
|
||||
|
@ -328,6 +328,14 @@ static const char * const atb_parents[] = {
|
||||
"syspll_d5"
|
||||
};
|
||||
|
||||
static const char * const sspm_parents[] = {
|
||||
"clk26m",
|
||||
"univpll_d2_d4",
|
||||
"syspll_d2_d2",
|
||||
"univpll_d2_d2",
|
||||
"syspll_d3"
|
||||
};
|
||||
|
||||
static const char * const dpi0_parents[] = {
|
||||
"clk26m",
|
||||
"tvdpll_d2",
|
||||
@ -507,6 +515,9 @@ static const struct mtk_mux top_muxes[] = {
|
||||
/* CLK_CFG_6 */
|
||||
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel",
|
||||
atb_parents, 0xa0, 0xa4, 0xa8, 0, 2, 7, 0x004, 24),
|
||||
MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SSPM, "sspm_sel",
|
||||
sspm_parents, 0xa0, 0xa4, 0xa8, 8, 3, 15, 0x004, 25,
|
||||
CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
|
||||
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel",
|
||||
dpi0_parents, 0xa0, 0xa4, 0xa8, 16, 4, 23, 0x004, 26),
|
||||
MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel",
|
||||
@ -673,10 +684,18 @@ static const struct mtk_gate_regs infra3_cg_regs = {
|
||||
GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, \
|
||||
&mtk_clk_gate_ops_setclr)
|
||||
|
||||
#define GATE_INFRA2_FLAGS(_id, _name, _parent, _shift, _flag) \
|
||||
GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs, \
|
||||
_shift, &mtk_clk_gate_ops_setclr, _flag)
|
||||
|
||||
#define GATE_INFRA3(_id, _name, _parent, _shift) \
|
||||
GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift, \
|
||||
&mtk_clk_gate_ops_setclr)
|
||||
|
||||
#define GATE_INFRA3_FLAGS(_id, _name, _parent, _shift, _flag) \
|
||||
GATE_MTK_FLAGS(_id, _name, _parent, &infra3_cg_regs, \
|
||||
_shift, &mtk_clk_gate_ops_setclr, _flag)
|
||||
|
||||
static const struct mtk_gate infra_clks[] = {
|
||||
/* INFRA0 */
|
||||
GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "axi_sel", 0),
|
||||
@ -748,7 +767,11 @@ static const struct mtk_gate infra_clks[] = {
|
||||
GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick", "fufs_sel", 12),
|
||||
GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck", "fufs_sel", 13),
|
||||
GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk", "axi_sel", 14),
|
||||
/* infra_sspm is main clock in co-processor, should not be closed in Linux. */
|
||||
GATE_INFRA2_FLAGS(CLK_INFRA_SSPM, "infra_sspm", "sspm_sel", 15, CLK_IS_CRITICAL),
|
||||
GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", "axi_sel", 16),
|
||||
/* infra_sspm_bus_hclk is main clock in co-processor, should not be closed in Linux. */
|
||||
GATE_INFRA2_FLAGS(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk", "axi_sel", 17, CLK_IS_CRITICAL),
|
||||
GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", "i2c_sel", 18),
|
||||
GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", "i2c_sel", 19),
|
||||
GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm", "i2c_sel", 20),
|
||||
@ -766,6 +789,10 @@ static const struct mtk_gate infra_clks[] = {
|
||||
GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self", "msdc50_0_sel", 0),
|
||||
GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self", "msdc50_0_sel", 1),
|
||||
GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", "msdc50_0_sel", 2),
|
||||
/* infra_sspm_26m_self is main clock in co-processor, should not be closed in Linux. */
|
||||
GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", "f_f26m_ck", 3, CLK_IS_CRITICAL),
|
||||
/* infra_sspm_32k_self is main clock in co-processor, should not be closed in Linux. */
|
||||
GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "f_f26m_ck", 4, CLK_IS_CRITICAL),
|
||||
GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5),
|
||||
GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6),
|
||||
GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_hclk_sel", 7),
|
||||
|
@ -367,9 +367,9 @@ static int meson_clk_pll_enable(struct clk_hw *hw)
|
||||
* 3. enable the lock detect module
|
||||
*/
|
||||
if (MESON_PARM_APPLICABLE(&pll->current_en)) {
|
||||
usleep_range(10, 20);
|
||||
udelay(10);
|
||||
meson_parm_write(clk->map, &pll->current_en, 1);
|
||||
usleep_range(40, 50);
|
||||
udelay(40);
|
||||
}
|
||||
|
||||
if (MESON_PARM_APPLICABLE(&pll->l_detect)) {
|
||||
|
@ -121,6 +121,45 @@ static bool cxl_is_security_command(u16 opcode)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
|
||||
u16 opcode)
|
||||
{
|
||||
switch (opcode) {
|
||||
case CXL_MBOX_OP_SANITIZE:
|
||||
set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
|
||||
break;
|
||||
case CXL_MBOX_OP_SECURE_ERASE:
|
||||
set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
|
||||
security->enabled_cmds);
|
||||
break;
|
||||
case CXL_MBOX_OP_GET_SECURITY_STATE:
|
||||
set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
|
||||
security->enabled_cmds);
|
||||
break;
|
||||
case CXL_MBOX_OP_SET_PASSPHRASE:
|
||||
set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
|
||||
security->enabled_cmds);
|
||||
break;
|
||||
case CXL_MBOX_OP_DISABLE_PASSPHRASE:
|
||||
set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
|
||||
security->enabled_cmds);
|
||||
break;
|
||||
case CXL_MBOX_OP_UNLOCK:
|
||||
set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
|
||||
break;
|
||||
case CXL_MBOX_OP_FREEZE_SECURITY:
|
||||
set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
|
||||
security->enabled_cmds);
|
||||
break;
|
||||
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
|
||||
set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
|
||||
security->enabled_cmds);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static bool cxl_is_poison_command(u16 opcode)
|
||||
{
|
||||
#define CXL_MBOX_OP_POISON_CMDS 0x43
|
||||
@ -677,7 +716,8 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
|
||||
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
|
||||
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
|
||||
|
||||
if (!cmd && !cxl_is_poison_command(opcode)) {
|
||||
if (!cmd && (!cxl_is_poison_command(opcode) ||
|
||||
!cxl_is_security_command(opcode))) {
|
||||
dev_dbg(dev,
|
||||
"Opcode 0x%04x unsupported by driver\n", opcode);
|
||||
continue;
|
||||
@ -689,6 +729,9 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
|
||||
if (cxl_is_poison_command(opcode))
|
||||
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
|
||||
|
||||
if (cxl_is_security_command(opcode))
|
||||
cxl_set_security_cmd_enabled(&mds->security, opcode);
|
||||
|
||||
dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
|
||||
}
|
||||
}
|
||||
|
@ -477,9 +477,28 @@ static struct attribute_group cxl_memdev_pmem_attribute_group = {
|
||||
.attrs = cxl_memdev_pmem_attributes,
|
||||
};
|
||||
|
||||
static umode_t cxl_memdev_security_visible(struct kobject *kobj,
|
||||
struct attribute *a, int n)
|
||||
{
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
|
||||
|
||||
if (a == &dev_attr_security_sanitize.attr &&
|
||||
!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
|
||||
return 0;
|
||||
|
||||
if (a == &dev_attr_security_erase.attr &&
|
||||
!test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds))
|
||||
return 0;
|
||||
|
||||
return a->mode;
|
||||
}
|
||||
|
||||
static struct attribute_group cxl_memdev_security_attribute_group = {
|
||||
.name = "security",
|
||||
.attrs = cxl_memdev_security_attributes,
|
||||
.is_visible = cxl_memdev_security_visible,
|
||||
};
|
||||
|
||||
static const struct attribute_group *cxl_memdev_attribute_groups[] = {
|
||||
|
@ -244,6 +244,19 @@ enum poison_cmd_enabled_bits {
|
||||
CXL_POISON_ENABLED_MAX
|
||||
};
|
||||
|
||||
/* Device enabled security commands */
|
||||
enum security_cmd_enabled_bits {
|
||||
CXL_SEC_ENABLED_SANITIZE,
|
||||
CXL_SEC_ENABLED_SECURE_ERASE,
|
||||
CXL_SEC_ENABLED_GET_SECURITY_STATE,
|
||||
CXL_SEC_ENABLED_SET_PASSPHRASE,
|
||||
CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
|
||||
CXL_SEC_ENABLED_UNLOCK,
|
||||
CXL_SEC_ENABLED_FREEZE_SECURITY,
|
||||
CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
|
||||
CXL_SEC_ENABLED_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cxl_poison_state - Driver poison state info
|
||||
*
|
||||
@ -346,6 +359,7 @@ struct cxl_fw_state {
|
||||
* struct cxl_security_state - Device security state
|
||||
*
|
||||
* @state: state of last security operation
|
||||
* @enabled_cmds: All security commands enabled in the CEL
|
||||
* @poll: polling for sanitization is enabled, device has no mbox irq support
|
||||
* @poll_tmo_secs: polling timeout
|
||||
* @poll_dwork: polling work item
|
||||
@ -353,6 +367,7 @@ struct cxl_fw_state {
|
||||
*/
|
||||
struct cxl_security_state {
|
||||
unsigned long state;
|
||||
DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
|
||||
bool poll;
|
||||
int poll_tmo_secs;
|
||||
struct delayed_work poll_dwork;
|
||||
@ -434,6 +449,7 @@ struct cxl_dev_state {
|
||||
* @next_persistent_bytes: persistent capacity change pending device reset
|
||||
* @event: event log driver state
|
||||
* @poison: poison driver state info
|
||||
* @security: security driver state info
|
||||
* @fw: firmware upload / activation state
|
||||
* @mbox_send: @dev specific transport for transmitting mailbox commands
|
||||
*
|
||||
|
@ -211,6 +211,7 @@ config FSL_DMA
|
||||
config FSL_EDMA
|
||||
tristate "Freescale eDMA engine support"
|
||||
depends on OF
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
@ -280,6 +281,7 @@ config IMX_SDMA
|
||||
|
||||
config INTEL_IDMA64
|
||||
tristate "Intel integrated DMA 64-bit support"
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
@ -384,9 +384,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
||||
wq->threshold = 0;
|
||||
wq->priority = 0;
|
||||
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
|
||||
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
|
||||
clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
|
||||
wq->flags = 0;
|
||||
memset(wq->name, 0, WQ_NAME_SIZE);
|
||||
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
|
||||
idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
|
||||
|
@ -190,7 +190,13 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
chans = pdata->dma_channels;
|
||||
if (!pdata->dma_channels) {
|
||||
dev_info(&pdev->dev, "setting default channel number to 64");
|
||||
chans = 64;
|
||||
} else {
|
||||
chans = pdata->dma_channels;
|
||||
}
|
||||
|
||||
len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
|
||||
mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
|
||||
if (!mcf_edma)
|
||||
@ -202,11 +208,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
||||
mcf_edma->drvdata = &mcf_data;
|
||||
mcf_edma->big_endian = 1;
|
||||
|
||||
if (!mcf_edma->n_chans) {
|
||||
dev_info(&pdev->dev, "setting default channel number to 64");
|
||||
mcf_edma->n_chans = 64;
|
||||
}
|
||||
|
||||
mutex_init(&mcf_edma->fsl_edma_mutex);
|
||||
|
||||
mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
|
||||
|
@ -192,7 +192,7 @@ struct owl_dma_pchan {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct owl_dma_pchan - Wrapper for DMA ENGINE channel
|
||||
* struct owl_dma_vchan - Wrapper for DMA ENGINE channel
|
||||
* @vc: wrapped virtual channel
|
||||
* @pchan: the physical channel utilized by this channel
|
||||
* @txd: active transaction on this channel
|
||||
|
@ -403,6 +403,12 @@ enum desc_status {
|
||||
* of a channel can be BUSY at any time.
|
||||
*/
|
||||
BUSY,
|
||||
/*
|
||||
* Pause was called while descriptor was BUSY. Due to hardware
|
||||
* limitations, only termination is possible for descriptors
|
||||
* that have been paused.
|
||||
*/
|
||||
PAUSED,
|
||||
/*
|
||||
* Sitting on the channel work_list but xfer done
|
||||
* by PL330 core
|
||||
@ -2041,7 +2047,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
|
||||
list_for_each_entry(desc, &pch->work_list, node) {
|
||||
|
||||
/* If already submitted */
|
||||
if (desc->status == BUSY)
|
||||
if (desc->status == BUSY || desc->status == PAUSED)
|
||||
continue;
|
||||
|
||||
ret = pl330_submit_req(pch->thread, desc);
|
||||
@ -2326,6 +2332,7 @@ static int pl330_pause(struct dma_chan *chan)
|
||||
{
|
||||
struct dma_pl330_chan *pch = to_pchan(chan);
|
||||
struct pl330_dmac *pl330 = pch->dmac;
|
||||
struct dma_pl330_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
pm_runtime_get_sync(pl330->ddma.dev);
|
||||
@ -2335,6 +2342,10 @@ static int pl330_pause(struct dma_chan *chan)
|
||||
_stop(pch->thread);
|
||||
spin_unlock(&pl330->lock);
|
||||
|
||||
list_for_each_entry(desc, &pch->work_list, node) {
|
||||
if (desc->status == BUSY)
|
||||
desc->status = PAUSED;
|
||||
}
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
pm_runtime_mark_last_busy(pl330->ddma.dev);
|
||||
pm_runtime_put_autosuspend(pl330->ddma.dev);
|
||||
@ -2425,7 +2436,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||||
else if (running && desc == running)
|
||||
transferred =
|
||||
pl330_get_current_xferred_count(pch, desc);
|
||||
else if (desc->status == BUSY)
|
||||
else if (desc->status == BUSY || desc->status == PAUSED)
|
||||
/*
|
||||
* Busy but not running means either just enqueued,
|
||||
* or finished and not yet marked done
|
||||
@ -2442,6 +2453,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||||
case DONE:
|
||||
ret = DMA_COMPLETE;
|
||||
break;
|
||||
case PAUSED:
|
||||
ret = DMA_PAUSED;
|
||||
break;
|
||||
case PREP:
|
||||
case BUSY:
|
||||
ret = DMA_IN_PROGRESS;
|
||||
|
@ -668,6 +668,8 @@ static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
|
||||
val |= irq_start << shift;
|
||||
irq_start++;
|
||||
irq_num--;
|
||||
if (!irq_num)
|
||||
break;
|
||||
}
|
||||
|
||||
/* write IRQ register */
|
||||
@ -715,7 +717,7 @@ static int xdma_irq_init(struct xdma_device *xdev)
|
||||
ret = request_irq(irq, xdma_channel_isr, 0,
|
||||
"xdma-c2h-channel", &xdev->c2h_chans[j]);
|
||||
if (ret) {
|
||||
xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
|
||||
xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
|
||||
j, irq, ret);
|
||||
goto failed_init_c2h;
|
||||
}
|
||||
@ -892,7 +894,7 @@ static int xdma_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
reg_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (!reg_base) {
|
||||
if (IS_ERR(reg_base)) {
|
||||
xdma_err(xdev, "ioremap failed");
|
||||
goto failed;
|
||||
}
|
||||
|
@ -165,14 +165,60 @@ static u32 preparser_disable(bool state)
|
||||
return MI_ARB_CHECK | 1 << 8 | state;
|
||||
}
|
||||
|
||||
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
|
||||
static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
|
||||
{
|
||||
u32 gsi_offset = gt->uncore->gsi_offset;
|
||||
switch (engine->id) {
|
||||
case RCS0:
|
||||
return GEN12_CCS_AUX_INV;
|
||||
case BCS0:
|
||||
return GEN12_BCS0_AUX_INV;
|
||||
case VCS0:
|
||||
return GEN12_VD0_AUX_INV;
|
||||
case VCS2:
|
||||
return GEN12_VD2_AUX_INV;
|
||||
case VECS0:
|
||||
return GEN12_VE0_AUX_INV;
|
||||
case CCS0:
|
||||
return GEN12_CCS0_AUX_INV;
|
||||
default:
|
||||
return INVALID_MMIO_REG;
|
||||
}
|
||||
}
|
||||
|
||||
static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
|
||||
{
|
||||
i915_reg_t reg = gen12_get_aux_inv_reg(engine);
|
||||
|
||||
if (IS_PONTEVECCHIO(engine->i915))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* So far platforms supported by i915 having flat ccs do not require
|
||||
* AUX invalidation. Check also whether the engine requires it.
|
||||
*/
|
||||
return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
|
||||
}
|
||||
|
||||
u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
|
||||
{
|
||||
i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
|
||||
u32 gsi_offset = engine->gt->uncore->gsi_offset;
|
||||
|
||||
if (!gen12_needs_ccs_aux_inv(engine))
|
||||
return cs;
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
|
||||
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
|
||||
*cs++ = AUX_INV;
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
*cs++ = MI_SEMAPHORE_WAIT_TOKEN |
|
||||
MI_SEMAPHORE_REGISTER_POLL |
|
||||
MI_SEMAPHORE_POLL |
|
||||
MI_SEMAPHORE_SAD_EQ_SDD;
|
||||
*cs++ = 0;
|
||||
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
|
||||
return cs;
|
||||
}
|
||||
@ -202,8 +248,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
||||
{
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
|
||||
if (mode & EMIT_FLUSH) {
|
||||
u32 flags = 0;
|
||||
/*
|
||||
* On Aux CCS platforms the invalidation of the Aux
|
||||
* table requires quiescing memory traffic beforehand
|
||||
*/
|
||||
if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
|
||||
u32 bit_group_0 = 0;
|
||||
u32 bit_group_1 = 0;
|
||||
int err;
|
||||
u32 *cs;
|
||||
|
||||
@ -211,32 +262,40 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_FLUSH_L3;
|
||||
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
|
||||
bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
|
||||
|
||||
/*
|
||||
* When required, in MTL and beyond platforms we
|
||||
* need to set the CCS_FLUSH bit in the pipe control
|
||||
*/
|
||||
if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
|
||||
bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
|
||||
|
||||
bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
|
||||
bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
|
||||
bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
|
||||
bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
|
||||
/* Wa_1409600907:tgl,adl-p */
|
||||
flags |= PIPE_CONTROL_DEPTH_STALL;
|
||||
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
|
||||
flags |= PIPE_CONTROL_FLUSH_ENABLE;
|
||||
bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
|
||||
bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
|
||||
bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
|
||||
|
||||
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
|
||||
flags |= PIPE_CONTROL_QW_WRITE;
|
||||
bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
|
||||
bit_group_1 |= PIPE_CONTROL_QW_WRITE;
|
||||
|
||||
flags |= PIPE_CONTROL_CS_STALL;
|
||||
bit_group_1 |= PIPE_CONTROL_CS_STALL;
|
||||
|
||||
if (!HAS_3D_PIPELINE(engine->i915))
|
||||
flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
|
||||
bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
|
||||
else if (engine->class == COMPUTE_CLASS)
|
||||
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
|
||||
bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
|
||||
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
cs = gen12_emit_pipe_control(cs,
|
||||
PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
|
||||
flags, LRC_PPHWSP_SCRATCH_ADDR);
|
||||
cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
|
||||
LRC_PPHWSP_SCRATCH_ADDR);
|
||||
intel_ring_advance(rq, cs);
|
||||
}
|
||||
|
||||
@ -267,10 +326,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
||||
else if (engine->class == COMPUTE_CLASS)
|
||||
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
|
||||
|
||||
if (!HAS_FLAT_CCS(rq->engine->i915))
|
||||
count = 8 + 4;
|
||||
else
|
||||
count = 8;
|
||||
count = 8;
|
||||
if (gen12_needs_ccs_aux_inv(rq->engine))
|
||||
count += 8;
|
||||
|
||||
cs = intel_ring_begin(rq, count);
|
||||
if (IS_ERR(cs))
|
||||
@ -285,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
||||
|
||||
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
|
||||
|
||||
if (!HAS_FLAT_CCS(rq->engine->i915)) {
|
||||
/* hsdes: 1809175790 */
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_GFX_CCS_AUX_NV);
|
||||
}
|
||||
cs = gen12_emit_aux_table_inv(engine, cs);
|
||||
|
||||
*cs++ = preparser_disable(false);
|
||||
intel_ring_advance(rq, cs);
|
||||
@ -300,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
||||
|
||||
int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
|
||||
{
|
||||
intel_engine_mask_t aux_inv = 0;
|
||||
u32 cmd, *cs;
|
||||
u32 cmd = 4;
|
||||
u32 *cs;
|
||||
|
||||
cmd = 4;
|
||||
if (mode & EMIT_INVALIDATE) {
|
||||
cmd += 2;
|
||||
|
||||
if (!HAS_FLAT_CCS(rq->engine->i915) &&
|
||||
(rq->engine->class == VIDEO_DECODE_CLASS ||
|
||||
rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
|
||||
aux_inv = rq->engine->mask &
|
||||
~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
|
||||
if (aux_inv)
|
||||
cmd += 4;
|
||||
}
|
||||
if (gen12_needs_ccs_aux_inv(rq->engine))
|
||||
cmd += 8;
|
||||
}
|
||||
|
||||
cs = intel_ring_begin(rq, cmd);
|
||||
@ -338,6 +385,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
|
||||
cmd |= MI_INVALIDATE_TLB;
|
||||
if (rq->engine->class == VIDEO_DECODE_CLASS)
|
||||
cmd |= MI_INVALIDATE_BSD;
|
||||
|
||||
if (gen12_needs_ccs_aux_inv(rq->engine) &&
|
||||
rq->engine->class == COPY_ENGINE_CLASS)
|
||||
cmd |= MI_FLUSH_DW_CCS;
|
||||
}
|
||||
|
||||
*cs++ = cmd;
|
||||
@ -345,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
|
||||
*cs++ = 0; /* upper addr */
|
||||
*cs++ = 0; /* value */
|
||||
|
||||
if (aux_inv) { /* hsdes: 1809175790 */
|
||||
if (rq->engine->class == VIDEO_DECODE_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_VD0_AUX_NV);
|
||||
else
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_VE0_AUX_NV);
|
||||
}
|
||||
cs = gen12_emit_aux_table_inv(rq->engine, cs);
|
||||
|
||||
if (mode & EMIT_INVALIDATE)
|
||||
*cs++ = preparser_disable(false);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "intel_gt_regs.h"
|
||||
#include "intel_gpu_commands.h"
|
||||
|
||||
struct intel_engine_cs;
|
||||
struct intel_gt;
|
||||
struct i915_request;
|
||||
|
||||
@ -46,28 +47,32 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
|
||||
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
|
||||
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
|
||||
|
||||
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
|
||||
u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
|
||||
|
||||
static inline u32 *
|
||||
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
|
||||
__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0,
|
||||
u32 bit_group_1, u32 offset)
|
||||
{
|
||||
memset(batch, 0, 6 * sizeof(u32));
|
||||
|
||||
batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
|
||||
batch[1] = flags1;
|
||||
batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
|
||||
batch[1] = bit_group_1;
|
||||
batch[2] = offset;
|
||||
|
||||
return batch + 6;
|
||||
}
|
||||
|
||||
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
|
||||
static inline u32 *gen8_emit_pipe_control(u32 *batch,
|
||||
u32 bit_group_1, u32 offset)
|
||||
{
|
||||
return __gen8_emit_pipe_control(batch, 0, flags, offset);
|
||||
return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset);
|
||||
}
|
||||
|
||||
static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
|
||||
static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0,
|
||||
u32 bit_group_1, u32 offset)
|
||||
{
|
||||
return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
|
||||
return __gen8_emit_pipe_control(batch, bit_group_0,
|
||||
bit_group_1, offset);
|
||||
}
|
||||
|
||||
static inline u32 *
|
||||
|
@ -121,6 +121,7 @@
|
||||
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
|
||||
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
|
||||
#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
|
||||
#define MI_SEMAPHORE_REGISTER_POLL (1 << 16)
|
||||
#define MI_SEMAPHORE_POLL (1 << 15)
|
||||
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
|
||||
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
|
||||
@ -299,6 +300,7 @@
|
||||
#define PIPE_CONTROL_QW_WRITE (1<<14)
|
||||
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
|
||||
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
|
||||
#define PIPE_CONTROL_CCS_FLUSH (1<<13) /* MTL+ */
|
||||
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
|
||||
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
|
||||
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
|
||||
|
@ -332,9 +332,11 @@
|
||||
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
|
||||
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
|
||||
#define BSD_HWS_PGA_GEN7 _MMIO(0x4180)
|
||||
#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
|
||||
#define GEN12_VD0_AUX_NV _MMIO(0x4218)
|
||||
#define GEN12_VD1_AUX_NV _MMIO(0x4228)
|
||||
|
||||
#define GEN12_CCS_AUX_INV _MMIO(0x4208)
|
||||
#define GEN12_VD0_AUX_INV _MMIO(0x4218)
|
||||
#define GEN12_VE0_AUX_INV _MMIO(0x4238)
|
||||
#define GEN12_BCS0_AUX_INV _MMIO(0x4248)
|
||||
|
||||
#define GEN8_RTCR _MMIO(0x4260)
|
||||
#define GEN8_M1TCR _MMIO(0x4264)
|
||||
@ -342,14 +344,12 @@
|
||||
#define GEN8_BTCR _MMIO(0x426c)
|
||||
#define GEN8_VTCR _MMIO(0x4270)
|
||||
|
||||
#define GEN12_VD2_AUX_NV _MMIO(0x4298)
|
||||
#define GEN12_VD3_AUX_NV _MMIO(0x42a8)
|
||||
#define GEN12_VE0_AUX_NV _MMIO(0x4238)
|
||||
|
||||
#define BLT_HWS_PGA_GEN7 _MMIO(0x4280)
|
||||
|
||||
#define GEN12_VE1_AUX_NV _MMIO(0x42b8)
|
||||
#define GEN12_VD2_AUX_INV _MMIO(0x4298)
|
||||
#define GEN12_CCS0_AUX_INV _MMIO(0x42c8)
|
||||
#define AUX_INV REG_BIT(0)
|
||||
|
||||
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380)
|
||||
|
||||
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
|
||||
|
@ -1364,10 +1364,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
|
||||
IS_DG2_G11(ce->engine->i915))
|
||||
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
|
||||
|
||||
/* hsdes: 1809175790 */
|
||||
if (!HAS_FLAT_CCS(ce->engine->i915))
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_GFX_CCS_AUX_NV);
|
||||
cs = gen12_emit_aux_table_inv(ce->engine, cs);
|
||||
|
||||
/* Wa_16014892111 */
|
||||
if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) ||
|
||||
@ -1392,17 +1389,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
|
||||
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
|
||||
0);
|
||||
|
||||
/* hsdes: 1809175790 */
|
||||
if (!HAS_FLAT_CCS(ce->engine->i915)) {
|
||||
if (ce->engine->class == VIDEO_DECODE_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_VD0_AUX_NV);
|
||||
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_VE0_AUX_NV);
|
||||
}
|
||||
|
||||
return cs;
|
||||
return gen12_emit_aux_table_inv(ce->engine, cs);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -491,7 +491,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
|
||||
return;
|
||||
}
|
||||
|
||||
msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, reg);
|
||||
msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, value);
|
||||
|
||||
// check the msg in DATA register.
|
||||
msg = vgpu_vreg(vgpu, offset + 4);
|
||||
|
@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
|
||||
}
|
||||
} while (unlikely(is_barrier(active)));
|
||||
|
||||
if (!__i915_active_fence_set(active, fence))
|
||||
fence = __i915_active_fence_set(active, fence);
|
||||
if (!fence)
|
||||
__i915_active_acquire(ref);
|
||||
else
|
||||
dma_fence_put(fence);
|
||||
|
||||
out:
|
||||
i915_active_release(ref);
|
||||
@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
prev = __i915_active_fence_set(active, fence);
|
||||
if (prev)
|
||||
prev = dma_fence_get_rcu(prev);
|
||||
else
|
||||
if (!prev)
|
||||
__i915_active_acquire(ref);
|
||||
rcu_read_unlock();
|
||||
|
||||
return prev;
|
||||
}
|
||||
@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
|
||||
*
|
||||
* Records the new @fence as the last active fence along its timeline in
|
||||
* this active tracker, moving the tracking callbacks from the previous
|
||||
* fence onto this one. Returns the previous fence (if not already completed),
|
||||
* which the caller must ensure is executed before the new fence. To ensure
|
||||
* that the order of fences within the timeline of the i915_active_fence is
|
||||
* understood, it should be locked by the caller.
|
||||
* fence onto this one. Gets and returns a reference to the previous fence
|
||||
* (if not already completed), which the caller must put after making sure
|
||||
* that it is executed before the new fence. To ensure that the order of
|
||||
* fences within the timeline of the i915_active_fence is understood, it
|
||||
* should be locked by the caller.
|
||||
*/
|
||||
struct dma_fence *
|
||||
__i915_active_fence_set(struct i915_active_fence *active,
|
||||
@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
|
||||
struct dma_fence *prev;
|
||||
unsigned long flags;
|
||||
|
||||
if (fence == rcu_access_pointer(active->fence))
|
||||
/*
|
||||
* In case of fences embedded in i915_requests, their memory is
|
||||
* SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
|
||||
* by new requests. Then, there is a risk of passing back a pointer
|
||||
* to a new, completely unrelated fence that reuses the same memory
|
||||
* while tracked under a different active tracker. Combined with i915
|
||||
* perf open/close operations that build await dependencies between
|
||||
* engine kernel context requests and user requests from different
|
||||
* timelines, this can lead to dependency loops and infinite waits.
|
||||
*
|
||||
* As a countermeasure, we try to get a reference to the active->fence
|
||||
* first, so if we succeed and pass it back to our user then it is not
|
||||
* released and potentially reused by an unrelated request before the
|
||||
* user has a chance to set up an await dependency on it.
|
||||
*/
|
||||
prev = i915_active_fence_get(active);
|
||||
if (fence == prev)
|
||||
return fence;
|
||||
|
||||
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
|
||||
@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
|
||||
* Consider that we have two threads arriving (A and B), with
|
||||
* C already resident as the active->fence.
|
||||
*
|
||||
* A does the xchg first, and so it sees C or NULL depending
|
||||
* on the timing of the interrupt handler. If it is NULL, the
|
||||
* previous fence must have been signaled and we know that
|
||||
* we are first on the timeline. If it is still present,
|
||||
* we acquire the lock on that fence and serialise with the interrupt
|
||||
* handler, in the process removing it from any future interrupt
|
||||
* callback. A will then wait on C before executing (if present).
|
||||
*
|
||||
* As B is second, it sees A as the previous fence and so waits for
|
||||
* it to complete its transition and takes over the occupancy for
|
||||
* itself -- remembering that it needs to wait on A before executing.
|
||||
* Both A and B have got a reference to C or NULL, depending on the
|
||||
* timing of the interrupt handler. Let's assume that if A has got C
|
||||
* then it has locked C first (before B).
|
||||
*
|
||||
* Note the strong ordering of the timeline also provides consistent
|
||||
* nesting rules for the fence->lock; the inner lock is always the
|
||||
* older lock.
|
||||
*/
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
prev = xchg(__active_fence_slot(active), fence);
|
||||
if (prev) {
|
||||
GEM_BUG_ON(prev == fence);
|
||||
if (prev)
|
||||
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
/*
|
||||
* A does the cmpxchg first, and so it sees C or NULL, as before, or
|
||||
* something else, depending on the timing of other threads and/or
|
||||
* interrupt handler. If not the same as before then A unlocks C if
|
||||
* applicable and retries, starting from an attempt to get a new
|
||||
* active->fence. Meanwhile, B follows the same path as A.
|
||||
* Once A succeeds with cmpxch, B fails again, retires, gets A from
|
||||
* active->fence, locks it as soon as A completes, and possibly
|
||||
* succeeds with cmpxchg.
|
||||
*/
|
||||
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
|
||||
if (prev) {
|
||||
spin_unlock(prev->lock);
|
||||
dma_fence_put(prev);
|
||||
}
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
prev = i915_active_fence_get(active);
|
||||
GEM_BUG_ON(prev == fence);
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
if (prev)
|
||||
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
/*
|
||||
* If prev is NULL then the previous fence must have been signaled
|
||||
* and we know that we are first on the timeline. If it is still
|
||||
* present then, having the lock on that fence already acquired, we
|
||||
* serialise with the interrupt handler, in the process of removing it
|
||||
* from any future interrupt callback. A will then wait on C before
|
||||
* executing (if present).
|
||||
*
|
||||
* As B is second, it sees A as the previous fence and so waits for
|
||||
* it to complete its transition and takes over the occupancy for
|
||||
* itself -- remembering that it needs to wait on A before executing.
|
||||
*/
|
||||
if (prev) {
|
||||
__list_del_entry(&active->cb.node);
|
||||
spin_unlock(prev->lock); /* serialise with prev->cb_list */
|
||||
}
|
||||
@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
|
||||
int err = 0;
|
||||
|
||||
/* Must maintain timeline ordering wrt previous active requests */
|
||||
rcu_read_lock();
|
||||
fence = __i915_active_fence_set(active, &rq->fence);
|
||||
if (fence) /* but the previous fence may not belong to that timeline! */
|
||||
fence = dma_fence_get_rcu(fence);
|
||||
rcu_read_unlock();
|
||||
if (fence) {
|
||||
err = i915_request_await_dma_fence(rq, fence);
|
||||
dma_fence_put(fence);
|
||||
|
@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
|
||||
|
||||
request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
|
||||
|
||||
/*
|
||||
* Users have to put a reference potentially got by
|
||||
* __i915_active_fence_set() to the returned request
|
||||
* when no longer needed
|
||||
*/
|
||||
return to_request(__i915_active_fence_set(&timeline->last_request,
|
||||
&rq->fence));
|
||||
}
|
||||
@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq,
|
||||
0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Users have to put the reference to prev potentially got
|
||||
* by __i915_active_fence_set() when no longer needed
|
||||
*/
|
||||
return prev;
|
||||
}
|
||||
|
||||
@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
|
||||
prev = __i915_request_ensure_ordering(rq, timeline);
|
||||
else
|
||||
prev = __i915_request_ensure_parallel_ordering(rq, timeline);
|
||||
if (prev)
|
||||
i915_request_put(prev);
|
||||
|
||||
/*
|
||||
* Make sure that no request gazumped us - if it was allocated after
|
||||
|
@ -310,7 +310,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n",
|
||||
sig_cfg.mode.hactive, new_hactive);
|
||||
|
||||
sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive;
|
||||
sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive;
|
||||
sig_cfg.mode.hactive = new_hactive;
|
||||
}
|
||||
|
||||
|
@ -569,6 +569,7 @@ static const struct of_device_id s6d7aa0_of_match[] = {
|
||||
},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, s6d7aa0_of_match);
|
||||
|
||||
static struct mipi_dsi_driver s6d7aa0_driver = {
|
||||
.probe = s6d7aa0_probe,
|
||||
|
@ -519,7 +519,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
|
||||
|
||||
if (bo->pin_count) {
|
||||
*locked = false;
|
||||
*busy = false;
|
||||
if (busy)
|
||||
*busy = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -209,8 +209,7 @@ int vmbus_connect(void)
|
||||
* Setup the vmbus event connection for channel interrupt
|
||||
* abstraction stuff
|
||||
*/
|
||||
vmbus_connection.int_page =
|
||||
(void *)hv_alloc_hyperv_zeroed_page();
|
||||
vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
|
||||
if (vmbus_connection.int_page == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
@ -225,8 +224,8 @@ int vmbus_connect(void)
|
||||
* Setup the monitor notification facility. The 1st page for
|
||||
* parent->child and the 2nd page for child->parent
|
||||
*/
|
||||
vmbus_connection.monitor_pages[0] = (void *)hv_alloc_hyperv_page();
|
||||
vmbus_connection.monitor_pages[1] = (void *)hv_alloc_hyperv_page();
|
||||
vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
|
||||
vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
|
||||
if ((vmbus_connection.monitor_pages[0] == NULL) ||
|
||||
(vmbus_connection.monitor_pages[1] == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
@ -333,15 +332,15 @@ void vmbus_disconnect(void)
|
||||
destroy_workqueue(vmbus_connection.work_queue);
|
||||
|
||||
if (vmbus_connection.int_page) {
|
||||
hv_free_hyperv_page((unsigned long)vmbus_connection.int_page);
|
||||
hv_free_hyperv_page(vmbus_connection.int_page);
|
||||
vmbus_connection.int_page = NULL;
|
||||
}
|
||||
|
||||
set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
|
||||
set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
|
||||
|
||||
hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
|
||||
hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
|
||||
hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
|
||||
hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
|
||||
vmbus_connection.monitor_pages[0] = NULL;
|
||||
vmbus_connection.monitor_pages[1] = NULL;
|
||||
}
|
||||
|
@ -1628,7 +1628,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
|
||||
WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
|
||||
WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order));
|
||||
local_irq_save(flags);
|
||||
hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
hint = *this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
if (!hint) {
|
||||
local_irq_restore(flags);
|
||||
return -ENOSPC;
|
||||
|
@ -115,12 +115,12 @@ void *hv_alloc_hyperv_zeroed_page(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
|
||||
|
||||
void hv_free_hyperv_page(unsigned long addr)
|
||||
void hv_free_hyperv_page(void *addr)
|
||||
{
|
||||
if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
|
||||
free_page(addr);
|
||||
free_page((unsigned long)addr);
|
||||
else
|
||||
kfree((void *)addr);
|
||||
kfree(addr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
|
||||
|
||||
@ -253,7 +253,7 @@ static void hv_kmsg_dump_unregister(void)
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||
&hyperv_panic_report_block);
|
||||
|
||||
hv_free_hyperv_page((unsigned long)hv_panic_page);
|
||||
hv_free_hyperv_page(hv_panic_page);
|
||||
hv_panic_page = NULL;
|
||||
}
|
||||
|
||||
@ -270,7 +270,7 @@ static void hv_kmsg_dump_register(void)
|
||||
ret = kmsg_dump_register(&hv_kmsg_dumper);
|
||||
if (ret) {
|
||||
pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
|
||||
hv_free_hyperv_page((unsigned long)hv_panic_page);
|
||||
hv_free_hyperv_page(hv_panic_page);
|
||||
hv_panic_page = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
|
||||
extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
|
||||
extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
|
||||
extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
|
||||
extern void dsp_cmx_send(void *arg);
|
||||
extern void dsp_cmx_send(struct timer_list *arg);
|
||||
extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
|
||||
extern int dsp_cmx_del_conf_member(struct dsp *dsp);
|
||||
extern int dsp_cmx_del_conf(struct dsp_conf *conf);
|
||||
|
@ -1614,7 +1614,7 @@ static u16 dsp_count; /* last sample count */
|
||||
static int dsp_count_valid; /* if we have last sample count */
|
||||
|
||||
void
|
||||
dsp_cmx_send(void *arg)
|
||||
dsp_cmx_send(struct timer_list *arg)
|
||||
{
|
||||
struct dsp_conf *conf;
|
||||
struct dsp_conf_member *member;
|
||||
|
@ -1195,7 +1195,7 @@ static int __init dsp_init(void)
|
||||
}
|
||||
|
||||
/* set sample timer */
|
||||
timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0);
|
||||
timer_setup(&dsp_spl_tl, dsp_cmx_send, 0);
|
||||
dsp_spl_tl.expires = jiffies + dsp_tics;
|
||||
dsp_spl_jiffies = dsp_spl_tl.expires;
|
||||
add_timer(&dsp_spl_tl);
|
||||
|
@ -251,8 +251,8 @@ int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
|
||||
|
||||
pkt->extradata_size = 0;
|
||||
pkt->shdr.hdr.size =
|
||||
struct_size((struct hfi_session_set_buffers_pkt *)0,
|
||||
buffer_info, bd->num_buffers);
|
||||
struct_size_t(struct hfi_session_set_buffers_pkt,
|
||||
buffer_info, bd->num_buffers);
|
||||
}
|
||||
|
||||
pkt->response_req = bd->response_required;
|
||||
|
@ -338,13 +338,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
|
||||
return;
|
||||
}
|
||||
for (len = 0; len < remain && len < host->fifo_width;) {
|
||||
/* SCR data must be read in big endian. */
|
||||
if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
|
||||
*sgp = ioread32be(host->base +
|
||||
REG_DATA_WINDOW);
|
||||
else
|
||||
*sgp = ioread32(host->base +
|
||||
REG_DATA_WINDOW);
|
||||
*sgp = ioread32(host->base + REG_DATA_WINDOW);
|
||||
sgp++;
|
||||
len += 4;
|
||||
}
|
||||
|
@ -29,9 +29,16 @@ struct f_sdhost_priv {
|
||||
bool enable_cmd_dat_delay;
|
||||
};
|
||||
|
||||
static void *sdhci_f_sdhost_priv(struct sdhci_host *host)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
|
||||
return sdhci_pltfm_priv(pltfm_host);
|
||||
}
|
||||
|
||||
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
|
||||
{
|
||||
struct f_sdhost_priv *priv = sdhci_priv(host);
|
||||
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
|
||||
u32 ctrl = 0;
|
||||
|
||||
usleep_range(2500, 3000);
|
||||
@ -64,7 +71,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
|
||||
|
||||
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
|
||||
{
|
||||
struct f_sdhost_priv *priv = sdhci_priv(host);
|
||||
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
|
||||
u32 ctl;
|
||||
|
||||
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
|
||||
@ -95,30 +102,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = {
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = {
|
||||
.ops = &sdhci_f_sdh30_ops,
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
|
||||
| SDHCI_QUIRK_INVERTED_WRITE_PROTECT,
|
||||
.quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE
|
||||
| SDHCI_QUIRK2_TUNING_WORK_AROUND,
|
||||
};
|
||||
|
||||
static int sdhci_f_sdh30_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sdhci_host *host;
|
||||
struct device *dev = &pdev->dev;
|
||||
int irq, ctrl = 0, ret = 0;
|
||||
int ctrl = 0, ret = 0;
|
||||
struct f_sdhost_priv *priv;
|
||||
struct sdhci_pltfm_host *pltfm_host;
|
||||
u32 reg = 0;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
|
||||
host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data,
|
||||
sizeof(struct f_sdhost_priv));
|
||||
if (IS_ERR(host))
|
||||
return PTR_ERR(host);
|
||||
|
||||
priv = sdhci_priv(host);
|
||||
pltfm_host = sdhci_priv(host);
|
||||
priv = sdhci_pltfm_priv(pltfm_host);
|
||||
priv->dev = dev;
|
||||
|
||||
host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
|
||||
SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
|
||||
host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
|
||||
SDHCI_QUIRK2_TUNING_WORK_AROUND;
|
||||
|
||||
priv->enable_cmd_dat_delay = device_property_read_bool(dev,
|
||||
"fujitsu,cmd-dat-delay-select");
|
||||
|
||||
@ -126,18 +135,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
platform_set_drvdata(pdev, host);
|
||||
|
||||
host->hw_name = "f_sdh30";
|
||||
host->ops = &sdhci_f_sdh30_ops;
|
||||
host->irq = irq;
|
||||
|
||||
host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(host->ioaddr)) {
|
||||
ret = PTR_ERR(host->ioaddr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (dev_of_node(dev)) {
|
||||
sdhci_get_of_property(pdev);
|
||||
|
||||
@ -204,24 +201,21 @@ err_rst:
|
||||
err_clk:
|
||||
clk_disable_unprepare(priv->clk_iface);
|
||||
err:
|
||||
sdhci_free_host(host);
|
||||
sdhci_pltfm_free(pdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sdhci_f_sdh30_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct sdhci_host *host = platform_get_drvdata(pdev);
|
||||
struct f_sdhost_priv *priv = sdhci_priv(host);
|
||||
|
||||
sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
|
||||
0xffffffff);
|
||||
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
|
||||
|
||||
reset_control_assert(priv->rst);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_disable_unprepare(priv->clk_iface);
|
||||
|
||||
sdhci_free_host(host);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
sdhci_pltfm_unregister(pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
if (op->cs > NAND_MAX_CHIPS)
|
||||
if (op->cs >= NAND_MAX_CHIPS)
|
||||
return -EINVAL;
|
||||
|
||||
if (check_only)
|
||||
|
@ -1278,7 +1278,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
|
||||
struct meson_nfc *nfc = nand_get_controller_data(nand);
|
||||
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
|
||||
struct mtd_info *mtd = nand_to_mtd(nand);
|
||||
int nsectors = mtd->writesize / 1024;
|
||||
int raw_writesize;
|
||||
int ret;
|
||||
|
||||
@ -1304,7 +1303,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
|
||||
nand->options |= NAND_NO_SUBPAGE_WRITE;
|
||||
|
||||
ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
|
||||
mtd->oobsize - 2 * nsectors);
|
||||
mtd->oobsize - 2);
|
||||
if (ret) {
|
||||
dev_err(nfc->dev, "failed to ECC init\n");
|
||||
return -EINVAL;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user