mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. No conflicts or adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
59be3baa8d
4
.mailmap
4
.mailmap
@ -241,11 +241,13 @@ Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
|
||||
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
|
||||
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
|
||||
John Crispin <john@phrozen.org> <blogic@openwrt.org>
|
||||
John Fastabend <john.fastabend@gmail.com> <john.r.fastabend@intel.com>
|
||||
John Keeping <john@keeping.me.uk> <john@metanate.com>
|
||||
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
John Stultz <johnstul@us.ibm.com>
|
||||
<jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
|
||||
<jon.toppins+linux@gmail.com> <jtoppins@redhat.com>
|
||||
Jonas Gorski <jonas.gorski@gmail.com> <jogo@openwrt.org>
|
||||
Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
|
||||
<josh@joshtriplett.org> <josh@freedesktop.org>
|
||||
<josh@joshtriplett.org> <josh@kernel.org>
|
||||
@ -453,6 +455,8 @@ Sebastian Reichel <sre@kernel.org> <sre@debian.org>
|
||||
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
|
||||
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
|
||||
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
|
||||
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
|
||||
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
|
||||
|
@ -994,7 +994,7 @@ Description: This file shows the amount of physical memory needed
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/rpm_lvl
|
||||
What: /sys/bus/platform/devices/*.ufs/rpm_lvl
|
||||
Date: September 2014
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry could be used to set or show the UFS device
|
||||
runtime power management level. The current driver
|
||||
implementation supports 7 levels with next target states:
|
||||
@ -1021,7 +1021,7 @@ Description: This entry could be used to set or show the UFS device
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
|
||||
What: /sys/bus/platform/devices/*.ufs/rpm_target_dev_state
|
||||
Date: February 2018
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry shows the target power mode of an UFS device
|
||||
for the chosen runtime power management level.
|
||||
|
||||
@ -1030,7 +1030,7 @@ Description: This entry shows the target power mode of an UFS device
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_link_state
|
||||
What: /sys/bus/platform/devices/*.ufs/rpm_target_link_state
|
||||
Date: February 2018
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry shows the target state of an UFS UIC link
|
||||
for the chosen runtime power management level.
|
||||
|
||||
@ -1039,7 +1039,7 @@ Description: This entry shows the target state of an UFS UIC link
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/spm_lvl
|
||||
What: /sys/bus/platform/devices/*.ufs/spm_lvl
|
||||
Date: September 2014
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry could be used to set or show the UFS device
|
||||
system power management level. The current driver
|
||||
implementation supports 7 levels with next target states:
|
||||
@ -1066,7 +1066,7 @@ Description: This entry could be used to set or show the UFS device
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
|
||||
What: /sys/bus/platform/devices/*.ufs/spm_target_dev_state
|
||||
Date: February 2018
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry shows the target power mode of an UFS device
|
||||
for the chosen system power management level.
|
||||
|
||||
@ -1075,7 +1075,7 @@ Description: This entry shows the target power mode of an UFS device
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_link_state
|
||||
What: /sys/bus/platform/devices/*.ufs/spm_target_link_state
|
||||
Date: February 2018
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This entry shows the target state of an UFS UIC link
|
||||
for the chosen system power management level.
|
||||
|
||||
@ -1084,7 +1084,7 @@ Description: This entry shows the target state of an UFS UIC link
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_enable
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/monitor_enable
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the status of performance monitor enablement
|
||||
and it can be used to start/stop the monitor. When the monitor
|
||||
is stopped, the performance data collected is also cleared.
|
||||
@ -1092,7 +1092,7 @@ Description: This file shows the status of performance monitor enablement
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_chunk_size
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/monitor_chunk_size
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file tells the monitor to focus on requests transferring
|
||||
data of specific chunk size (in Bytes). 0 means any chunk size.
|
||||
It can only be changed when monitor is disabled.
|
||||
@ -1100,7 +1100,7 @@ Description: This file tells the monitor to focus on requests transferring
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_sectors
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_total_sectors
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how many sectors (in 512 Bytes) have been
|
||||
sent from device to host after monitor gets started.
|
||||
|
||||
@ -1109,7 +1109,7 @@ Description: This file shows how many sectors (in 512 Bytes) have been
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_busy
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_total_busy
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how long (in micro seconds) has been spent
|
||||
sending data from device to host after monitor gets started.
|
||||
|
||||
@ -1118,7 +1118,7 @@ Description: This file shows how long (in micro seconds) has been spent
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_nr_requests
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_nr_requests
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how many read requests have been sent after
|
||||
monitor gets started.
|
||||
|
||||
@ -1127,7 +1127,7 @@ Description: This file shows how many read requests have been sent after
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_max
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_max
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the maximum latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
@ -1136,7 +1136,7 @@ Description: This file shows the maximum latency (in micro seconds) of
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_min
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_min
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the minimum latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
@ -1145,7 +1145,7 @@ Description: This file shows the minimum latency (in micro seconds) of
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_avg
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_avg
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the average latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
@ -1154,7 +1154,7 @@ Description: This file shows the average latency (in micro seconds) of
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_sum
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_sum
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the total latency (in micro seconds) of
|
||||
read requests sent after monitor gets started.
|
||||
|
||||
@ -1163,7 +1163,7 @@ Description: This file shows the total latency (in micro seconds) of
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_sectors
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_total_sectors
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how many sectors (in 512 Bytes) have been sent
|
||||
from host to device after monitor gets started.
|
||||
|
||||
@ -1172,7 +1172,7 @@ Description: This file shows how many sectors (in 512 Bytes) have been sent
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_busy
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_total_busy
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how long (in micro seconds) has been spent
|
||||
sending data from host to device after monitor gets started.
|
||||
|
||||
@ -1181,7 +1181,7 @@ Description: This file shows how long (in micro seconds) has been spent
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_nr_requests
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_nr_requests
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows how many write requests have been sent after
|
||||
monitor gets started.
|
||||
|
||||
@ -1190,7 +1190,7 @@ Description: This file shows how many write requests have been sent after
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_max
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_max
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the maximum latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
@ -1199,7 +1199,7 @@ Description: This file shows the maximum latency (in micro seconds) of write
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_min
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_min
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the minimum latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
@ -1208,7 +1208,7 @@ Description: This file shows the minimum latency (in micro seconds) of write
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_avg
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_avg
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the average latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
@ -1217,7 +1217,7 @@ Description: This file shows the average latency (in micro seconds) of write
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_sum
|
||||
What: /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_sum
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Contact: Can Guo <quic_cang@quicinc.com>
|
||||
Description: This file shows the total latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
@ -1226,7 +1226,7 @@ Description: This file shows the total latency (in micro seconds) of write
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_presv_us_en
|
||||
What: /sys/bus/platform/devices/*.ufs/device_descriptor/wb_presv_us_en
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows if preserve user-space was configured
|
||||
|
||||
The file is read only.
|
||||
@ -1234,7 +1234,7 @@ Description: This entry shows if preserve user-space was configured
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_shared_alloc_units
|
||||
What: /sys/bus/platform/devices/*.ufs/device_descriptor/wb_shared_alloc_units
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the shared allocated units of WB buffer
|
||||
|
||||
The file is read only.
|
||||
@ -1242,7 +1242,7 @@ Description: This entry shows the shared allocated units of WB buffer
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_type
|
||||
What: /sys/bus/platform/devices/*.ufs/device_descriptor/wb_type
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the configured WB type.
|
||||
0x1 for shared buffer mode. 0x0 for dedicated buffer mode.
|
||||
|
||||
@ -1251,7 +1251,7 @@ Description: This entry shows the configured WB type.
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_buff_cap_adj
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_buff_cap_adj
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the total user-space decrease in shared
|
||||
buffer mode.
|
||||
The value of this parameter is 3 for TLC NAND when SLC mode
|
||||
@ -1262,7 +1262,7 @@ Description: This entry shows the total user-space decrease in shared
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_max_alloc_units
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_max_alloc_units
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the Maximum total WriteBooster Buffer size
|
||||
which is supported by the entire device.
|
||||
|
||||
@ -1271,7 +1271,7 @@ Description: This entry shows the Maximum total WriteBooster Buffer size
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_max_wb_luns
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_max_wb_luns
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the maximum number of luns that can support
|
||||
WriteBooster.
|
||||
|
||||
@ -1280,7 +1280,7 @@ Description: This entry shows the maximum number of luns that can support
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_sup_red_type
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_sup_red_type
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: The supportability of user space reduction mode
|
||||
and preserve user space mode.
|
||||
00h: WriteBooster Buffer can be configured only in
|
||||
@ -1295,7 +1295,7 @@ Description: The supportability of user space reduction mode
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_sup_wb_type
|
||||
What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_sup_wb_type
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: The supportability of WriteBooster Buffer type.
|
||||
|
||||
=== ==========================================================
|
||||
@ -1310,7 +1310,7 @@ Description: The supportability of WriteBooster Buffer type.
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_enable
|
||||
What: /sys/bus/platform/devices/*.ufs/flags/wb_enable
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the status of WriteBooster.
|
||||
|
||||
== ============================
|
||||
@ -1323,7 +1323,7 @@ Description: This entry shows the status of WriteBooster.
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_flush_en
|
||||
What: /sys/bus/platform/devices/*.ufs/flags/wb_flush_en
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows if flush is enabled.
|
||||
|
||||
== =================================
|
||||
@ -1336,7 +1336,7 @@ Description: This entry shows if flush is enabled.
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_flush_during_h8
|
||||
What: /sys/bus/platform/devices/*.ufs/flags/wb_flush_during_h8
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: Flush WriteBooster Buffer during hibernate state.
|
||||
|
||||
== =================================================
|
||||
@ -1351,7 +1351,7 @@ Description: Flush WriteBooster Buffer during hibernate state.
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_avail_buf
|
||||
What: /sys/bus/platform/devices/*.ufs/attributes/wb_avail_buf
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the amount of unused WriteBooster buffer
|
||||
available.
|
||||
|
||||
@ -1360,7 +1360,7 @@ Description: This entry shows the amount of unused WriteBooster buffer
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_cur_buf
|
||||
What: /sys/bus/platform/devices/*.ufs/attributes/wb_cur_buf
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the amount of unused current buffer.
|
||||
|
||||
The file is read only.
|
||||
@ -1368,7 +1368,7 @@ Description: This entry shows the amount of unused current buffer.
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_flush_status
|
||||
What: /sys/bus/platform/devices/*.ufs/attributes/wb_flush_status
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the flush operation status.
|
||||
|
||||
|
||||
@ -1385,7 +1385,7 @@ Description: This entry shows the flush operation status.
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_life_time_est
|
||||
What: /sys/bus/platform/devices/*.ufs/attributes/wb_life_time_est
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows an indication of the WriteBooster Buffer
|
||||
lifetime based on the amount of performed program/erase cycles
|
||||
|
||||
@ -1399,7 +1399,7 @@ Description: This entry shows an indication of the WriteBooster Buffer
|
||||
|
||||
What: /sys/class/scsi_device/*/device/unit_descriptor/wb_buf_alloc_units
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
Contact: Asutosh Das <quic_asutoshd@quicinc.com>
|
||||
Description: This entry shows the configured size of WriteBooster buffer.
|
||||
0400h corresponds to 4GB.
|
||||
|
||||
|
@ -49,7 +49,7 @@ The following keys are defined:
|
||||
privileged ISA, with the following known exceptions (more exceptions may be
|
||||
added, but only if it can be demonstrated that the user ABI is not broken):
|
||||
|
||||
* The :fence.i: instruction cannot be directly executed by userspace
|
||||
* The ``fence.i`` instruction cannot be directly executed by userspace
|
||||
programs (it may still be executed in userspace via a
|
||||
kernel-controlled mechanism such as the vDSO).
|
||||
|
||||
|
14
MAINTAINERS
14
MAINTAINERS
@ -4124,6 +4124,13 @@ F: Documentation/devicetree/bindings/spi/brcm,bcm63xx-hsspi.yaml
|
||||
F: drivers/spi/spi-bcm63xx-hsspi.c
|
||||
F: drivers/spi/spi-bcmbca-hsspi.c
|
||||
|
||||
BROADCOM BCM6348/BCM6358 SPI controller DRIVER
|
||||
M: Jonas Gorski <jonas.gorski@gmail.com>
|
||||
L: linux-spi@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
F: Documentation/devicetree/bindings/spi/spi-bcm63xx.txt
|
||||
F: drivers/spi/spi-bcm63xx.c
|
||||
|
||||
BROADCOM ETHERNET PHY DRIVERS
|
||||
M: Florian Fainelli <florian.fainelli@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
@ -8684,8 +8691,11 @@ S: Maintained
|
||||
F: drivers/input/touchscreen/resistive-adc-touch.c
|
||||
|
||||
GENERIC STRING LIBRARY
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
R: Andy Shevchenko <andy@kernel.org>
|
||||
S: Maintained
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
F: include/linux/string.h
|
||||
F: include/linux/string_choices.h
|
||||
F: include/linux/string_helpers.h
|
||||
@ -13980,7 +13990,7 @@ T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
|
||||
F: drivers/soc/microchip/
|
||||
|
||||
MICROCHIP SPI DRIVER
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
M: Ryan Wanner <ryan.wanner@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/spi/spi-atmel.*
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -322,7 +322,13 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
|
||||
*
|
||||
*/
|
||||
|
||||
emit_bti(A64_BTI_C, ctx);
|
||||
/* bpf function may be invoked by 3 instruction types:
|
||||
* 1. bl, attached via freplace to bpf prog via short jump
|
||||
* 2. br, attached via freplace to bpf prog via long jump
|
||||
* 3. blr, working as a function pointer, used by emit_call.
|
||||
* So BTI_JC should used here to support both br and blr.
|
||||
*/
|
||||
emit_bti(A64_BTI_JC, ctx);
|
||||
|
||||
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
|
||||
emit(A64_NOP, ctx);
|
||||
|
@ -136,12 +136,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
BUG();
|
||||
|
@ -263,11 +263,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
|
||||
(_PAGE_PTE | H_PAGE_THP_HUGE));
|
||||
}
|
||||
|
||||
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
|
||||
{
|
||||
return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
|
||||
}
|
||||
|
||||
static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
|
||||
|
@ -132,6 +132,11 @@ static inline int get_region_id(unsigned long ea)
|
||||
return region_id;
|
||||
}
|
||||
|
||||
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
|
||||
{
|
||||
return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
|
||||
}
|
||||
|
||||
#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
|
||||
#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
|
||||
static inline int hash__p4d_bad(p4d_t p4d)
|
||||
|
@ -5,6 +5,7 @@
|
||||
* Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/page.h>
|
||||
@ -66,7 +67,7 @@
|
||||
#define SPECIAL_EXC_LOAD(reg, name) \
|
||||
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
|
||||
|
||||
special_reg_save:
|
||||
SYM_CODE_START_LOCAL(special_reg_save)
|
||||
/*
|
||||
* We only need (or have stack space) to save this stuff if
|
||||
* we interrupted the kernel.
|
||||
@ -131,8 +132,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
||||
SPECIAL_EXC_STORE(r10,CSRR1)
|
||||
|
||||
blr
|
||||
SYM_CODE_END(special_reg_save)
|
||||
|
||||
ret_from_level_except:
|
||||
SYM_CODE_START_LOCAL(ret_from_level_except)
|
||||
ld r3,_MSR(r1)
|
||||
andi. r3,r3,MSR_PR
|
||||
beq 1f
|
||||
@ -206,6 +208,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
||||
mtxer r11
|
||||
|
||||
blr
|
||||
SYM_CODE_END(ret_from_level_except)
|
||||
|
||||
.macro ret_from_level srr0 srr1 paca_ex scratch
|
||||
bl ret_from_level_except
|
||||
@ -232,13 +235,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
||||
mfspr r13,\scratch
|
||||
.endm
|
||||
|
||||
ret_from_crit_except:
|
||||
SYM_CODE_START_LOCAL(ret_from_crit_except)
|
||||
ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
|
||||
rfci
|
||||
SYM_CODE_END(ret_from_crit_except)
|
||||
|
||||
ret_from_mc_except:
|
||||
SYM_CODE_START_LOCAL(ret_from_mc_except)
|
||||
ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
|
||||
rfmci
|
||||
SYM_CODE_END(ret_from_mc_except)
|
||||
|
||||
/* Exception prolog code for all exceptions */
|
||||
#define EXCEPTION_PROLOG(n, intnum, type, addition) \
|
||||
@ -978,20 +983,22 @@ masked_interrupt_book3e_0x2c0:
|
||||
* r14 and r15 containing the fault address and error code, with the
|
||||
* original values stashed away in the PACA
|
||||
*/
|
||||
storage_fault_common:
|
||||
SYM_CODE_START_LOCAL(storage_fault_common)
|
||||
addi r3,r1,STACK_INT_FRAME_REGS
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
SYM_CODE_END(storage_fault_common)
|
||||
|
||||
/*
|
||||
* Alignment exception doesn't fit entirely in the 0x100 bytes so it
|
||||
* continues here.
|
||||
*/
|
||||
alignment_more:
|
||||
SYM_CODE_START_LOCAL(alignment_more)
|
||||
addi r3,r1,STACK_INT_FRAME_REGS
|
||||
bl alignment_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
SYM_CODE_END(alignment_more)
|
||||
|
||||
/*
|
||||
* Trampolines used when spotting a bad kernel stack pointer in
|
||||
@ -1030,8 +1037,7 @@ BAD_STACK_TRAMPOLINE(0xe00)
|
||||
BAD_STACK_TRAMPOLINE(0xf00)
|
||||
BAD_STACK_TRAMPOLINE(0xf20)
|
||||
|
||||
.globl bad_stack_book3e
|
||||
bad_stack_book3e:
|
||||
_GLOBAL(bad_stack_book3e)
|
||||
/* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
|
||||
mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */
|
||||
ld r1,PACAEMERGSP(r13)
|
||||
@ -1285,8 +1291,7 @@ have_hes:
|
||||
* ever takes any parameters, the SCOM code must also be updated to
|
||||
* provide them.
|
||||
*/
|
||||
.globl a2_tlbinit_code_start
|
||||
a2_tlbinit_code_start:
|
||||
_GLOBAL(a2_tlbinit_code_start)
|
||||
|
||||
ori r11,r3,MAS0_WQ_ALLWAYS
|
||||
oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
|
||||
@ -1479,8 +1484,7 @@ _GLOBAL(book3e_secondary_thread_init)
|
||||
mflr r28
|
||||
b 3b
|
||||
|
||||
.globl init_core_book3e
|
||||
init_core_book3e:
|
||||
_GLOBAL(init_core_book3e)
|
||||
/* Establish the interrupt vector base */
|
||||
tovirt(r2,r2)
|
||||
LOAD_REG_ADDR(r3, interrupt_base_book3e)
|
||||
@ -1488,7 +1492,7 @@ init_core_book3e:
|
||||
sync
|
||||
blr
|
||||
|
||||
init_thread_book3e:
|
||||
SYM_CODE_START_LOCAL(init_thread_book3e)
|
||||
lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
|
||||
mtspr SPRN_EPCR,r3
|
||||
|
||||
@ -1502,6 +1506,7 @@ init_thread_book3e:
|
||||
mtspr SPRN_TSR,r3
|
||||
|
||||
blr
|
||||
SYM_CODE_END(init_thread_book3e)
|
||||
|
||||
_GLOBAL(__setup_base_ivors)
|
||||
SET_IVOR(0, 0x020) /* Critical Input */
|
||||
|
@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
|
||||
|
||||
static int ssb_prctl_get(struct task_struct *task)
|
||||
{
|
||||
if (stf_enabled_flush_types == STF_BARRIER_NONE)
|
||||
/*
|
||||
* We don't have an explicit signal from firmware that we're
|
||||
* vulnerable or not, we only have certain CPU revisions that
|
||||
* are known to be vulnerable.
|
||||
*
|
||||
* We assume that if we're on another CPU, where the barrier is
|
||||
* NONE, then we are not vulnerable.
|
||||
*/
|
||||
/*
|
||||
* The STF_BARRIER feature is on by default, so if it's off that means
|
||||
* firmware has explicitly said the CPU is not vulnerable via either
|
||||
* the hypercall or device tree.
|
||||
*/
|
||||
if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
else
|
||||
/*
|
||||
* If we do have a barrier type then we are vulnerable. The
|
||||
* barrier is not a global or per-process mitigation, so the
|
||||
* only value we can report here is PR_SPEC_ENABLE, which
|
||||
* appears as "vulnerable" in /proc.
|
||||
*/
|
||||
return PR_SPEC_ENABLE;
|
||||
|
||||
return -EINVAL;
|
||||
/*
|
||||
* If the system's CPU has no known barrier (see setup_stf_barrier())
|
||||
* then assume that the CPU is not vulnerable.
|
||||
*/
|
||||
if (stf_enabled_flush_types == STF_BARRIER_NONE)
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
|
||||
/*
|
||||
* Otherwise the CPU is vulnerable. The barrier is not a global or
|
||||
* per-process mitigation, so the only value that can be reported here
|
||||
* is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
|
||||
*/
|
||||
return PR_SPEC_ENABLE;
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
|
@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
|
||||
|
||||
static long native_hpte_remove(unsigned long hpte_group)
|
||||
{
|
||||
unsigned long hpte_v, flags;
|
||||
struct hash_pte *hptep;
|
||||
int i;
|
||||
int slot_offset;
|
||||
unsigned long hpte_v;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
DBG_LOW(" remove(group=%lx)\n", hpte_group);
|
||||
|
||||
@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
|
||||
slot_offset &= 0x7;
|
||||
}
|
||||
|
||||
if (i == HPTES_PER_GROUP)
|
||||
return -1;
|
||||
if (i == HPTES_PER_GROUP) {
|
||||
i = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Invalidate the hpte. NOTE: this also unlocks it */
|
||||
release_hpte_lock();
|
||||
hptep->v = 0;
|
||||
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -317,19 +317,14 @@ void __init riscv_fill_hwcap(void)
|
||||
#undef SET_ISA_EXT_MAP
|
||||
}
|
||||
|
||||
/*
|
||||
* Linux requires the following extensions, so we may as well
|
||||
* always set them.
|
||||
*/
|
||||
set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
|
||||
set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
|
||||
|
||||
/*
|
||||
* These ones were as they were part of the base ISA when the
|
||||
* port & dt-bindings were upstreamed, and so can be set
|
||||
* unconditionally where `i` is in riscv,isa on DT systems.
|
||||
*/
|
||||
if (acpi_disabled) {
|
||||
set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
|
||||
set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
|
||||
set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
|
||||
set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
|
||||
}
|
||||
|
@ -1346,7 +1346,7 @@ static void __init reserve_crashkernel(void)
|
||||
*/
|
||||
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
|
||||
search_start,
|
||||
min(search_end, (unsigned long) SZ_4G));
|
||||
min(search_end, (unsigned long)(SZ_4G - 1)));
|
||||
if (crash_base == 0) {
|
||||
/* Try again without restricting region to 32bit addressible memory */
|
||||
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
|
||||
|
@ -15,7 +15,7 @@
|
||||
unsigned long __xchg_u32(volatile u32 *m, u32 new);
|
||||
void __xchg_called_with_bad_pointer(void);
|
||||
|
||||
static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
|
||||
static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 4:
|
||||
|
@ -87,7 +87,7 @@ xchg16(__volatile__ unsigned short *m, unsigned short val)
|
||||
return (load32 & mask) >> bit_shift;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
static __always_inline unsigned long
|
||||
__arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
|
||||
{
|
||||
switch (size) {
|
||||
|
@ -437,7 +437,7 @@ void __init arch_cpu_finalize_init(void)
|
||||
os_check_bugs();
|
||||
}
|
||||
|
||||
void apply_ibt_endbr(s32 *start, s32 *end)
|
||||
void apply_seal_endbr(s32 *start, s32 *end)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -719,26 +719,6 @@ SYM_CODE_START(__switch_to_asm)
|
||||
SYM_CODE_END(__switch_to_asm)
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* The unwinder expects the last frame on the stack to always be at the same
|
||||
* offset from the end of the page, which allows it to validate the stack.
|
||||
* Calling schedule_tail() directly would break that convention because its an
|
||||
* asmlinkage function so its argument has to be pushed on the stack. This
|
||||
* wrapper creates a proper "end of stack" frame header before the call.
|
||||
*/
|
||||
.pushsection .text, "ax"
|
||||
SYM_FUNC_START(schedule_tail_wrapper)
|
||||
FRAME_BEGIN
|
||||
|
||||
pushl %eax
|
||||
call schedule_tail
|
||||
popl %eax
|
||||
|
||||
FRAME_END
|
||||
RET
|
||||
SYM_FUNC_END(schedule_tail_wrapper)
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* A newly forked process directly context switches into this address.
|
||||
*
|
||||
@ -747,29 +727,22 @@ SYM_FUNC_END(schedule_tail_wrapper)
|
||||
* edi: kernel thread arg
|
||||
*/
|
||||
.pushsection .text, "ax"
|
||||
SYM_CODE_START(ret_from_fork)
|
||||
call schedule_tail_wrapper
|
||||
SYM_CODE_START(ret_from_fork_asm)
|
||||
movl %esp, %edx /* regs */
|
||||
|
||||
testl %ebx, %ebx
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
/* return address for the stack unwinder */
|
||||
pushl $.Lsyscall_32_done
|
||||
|
||||
2:
|
||||
/* When we fork, we trace the syscall return in the child, too. */
|
||||
movl %esp, %eax
|
||||
call syscall_exit_to_user_mode
|
||||
jmp .Lsyscall_32_done
|
||||
FRAME_BEGIN
|
||||
/* prev already in EAX */
|
||||
movl %ebx, %ecx /* fn */
|
||||
pushl %edi /* fn_arg */
|
||||
call ret_from_fork
|
||||
addl $4, %esp
|
||||
FRAME_END
|
||||
|
||||
/* kernel thread */
|
||||
1: movl %edi, %eax
|
||||
CALL_NOSPEC ebx
|
||||
/*
|
||||
* A kernel thread is allowed to return here after successfully
|
||||
* calling kernel_execve(). Exit to userspace to complete the execve()
|
||||
* syscall.
|
||||
*/
|
||||
movl $0, PT_EAX(%esp)
|
||||
jmp 2b
|
||||
SYM_CODE_END(ret_from_fork)
|
||||
RET
|
||||
SYM_CODE_END(ret_from_fork_asm)
|
||||
.popsection
|
||||
|
||||
SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
|
@ -284,36 +284,19 @@ SYM_FUNC_END(__switch_to_asm)
|
||||
* r12: kernel thread arg
|
||||
*/
|
||||
.pushsection .text, "ax"
|
||||
__FUNC_ALIGN
|
||||
SYM_CODE_START_NOALIGN(ret_from_fork)
|
||||
UNWIND_HINT_END_OF_STACK
|
||||
SYM_CODE_START(ret_from_fork_asm)
|
||||
UNWIND_HINT_REGS
|
||||
ANNOTATE_NOENDBR // copy_thread
|
||||
CALL_DEPTH_ACCOUNT
|
||||
movq %rax, %rdi
|
||||
call schedule_tail /* rdi: 'prev' task parameter */
|
||||
|
||||
testq %rbx, %rbx /* from kernel_thread? */
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
movq %rax, %rdi /* prev */
|
||||
movq %rsp, %rsi /* regs */
|
||||
movq %rbx, %rdx /* fn */
|
||||
movq %r12, %rcx /* fn_arg */
|
||||
call ret_from_fork
|
||||
|
||||
2:
|
||||
UNWIND_HINT_REGS
|
||||
movq %rsp, %rdi
|
||||
call syscall_exit_to_user_mode /* returns with IRQs disabled */
|
||||
jmp swapgs_restore_regs_and_return_to_usermode
|
||||
|
||||
1:
|
||||
/* kernel thread */
|
||||
UNWIND_HINT_END_OF_STACK
|
||||
movq %r12, %rdi
|
||||
CALL_NOSPEC rbx
|
||||
/*
|
||||
* A kernel thread is allowed to return here after successfully
|
||||
* calling kernel_execve(). Exit to userspace to complete the execve()
|
||||
* syscall.
|
||||
*/
|
||||
movq $0, RAX(%rsp)
|
||||
jmp 2b
|
||||
SYM_CODE_END(ret_from_fork)
|
||||
SYM_CODE_END(ret_from_fork_asm)
|
||||
.popsection
|
||||
|
||||
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
|
||||
|
@ -3993,6 +3993,13 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||
struct perf_event *leader = event->group_leader;
|
||||
struct perf_event *sibling = NULL;
|
||||
|
||||
/*
|
||||
* When this memload event is also the first event (no group
|
||||
* exists yet), then there is no aux event before it.
|
||||
*/
|
||||
if (leader == event)
|
||||
return -ENODATA;
|
||||
|
||||
if (!is_mem_loads_aux_event(leader)) {
|
||||
for_each_sibling_event(sibling, leader) {
|
||||
if (is_mem_loads_aux_event(sibling))
|
||||
|
@ -96,7 +96,7 @@ extern void alternative_instructions(void);
|
||||
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
|
||||
extern void apply_retpolines(s32 *start, s32 *end);
|
||||
extern void apply_returns(s32 *start, s32 *end);
|
||||
extern void apply_ibt_endbr(s32 *start, s32 *end);
|
||||
extern void apply_seal_endbr(s32 *start, s32 *end);
|
||||
extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine,
|
||||
s32 *start_cfi, s32 *end_cfi);
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
/*
|
||||
* Create a dummy function pointer reference to prevent objtool from marking
|
||||
* the function as needing to be "sealed" (i.e. ENDBR converted to NOP by
|
||||
* apply_ibt_endbr()).
|
||||
* apply_seal_endbr()).
|
||||
*/
|
||||
#define IBT_NOSEAL(fname) \
|
||||
".pushsection .discard.ibt_endbr_noseal\n\t" \
|
||||
|
@ -234,6 +234,10 @@
|
||||
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
||||
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
||||
* attack.
|
||||
*
|
||||
* NOTE: these do not take kCFI into account and are thus not comparable to C
|
||||
* indirect calls, take care when using. The target of these should be an ENDBR
|
||||
* instruction irrespective of kCFI.
|
||||
*/
|
||||
.macro JMP_NOSPEC reg:req
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
|
@ -12,7 +12,9 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
|
||||
__visible struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
asmlinkage void ret_from_fork_asm(void);
|
||||
__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
|
||||
int (*fn)(void *), void *fn_arg);
|
||||
|
||||
/*
|
||||
* This is the structure pointed to by thread.sp for an inactive task. The
|
||||
|
@ -778,6 +778,8 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
|
||||
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
|
||||
static void poison_cfi(void *addr);
|
||||
|
||||
static void __init_or_module poison_endbr(void *addr, bool warn)
|
||||
{
|
||||
u32 endbr, poison = gen_endbr_poison();
|
||||
@ -802,8 +804,11 @@ static void __init_or_module poison_endbr(void *addr, bool warn)
|
||||
|
||||
/*
|
||||
* Generated by: objtool --ibt
|
||||
*
|
||||
* Seal the functions for indirect calls by clobbering the ENDBR instructions
|
||||
* and the kCFI hash value.
|
||||
*/
|
||||
void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
|
||||
void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
|
||||
{
|
||||
s32 *s;
|
||||
|
||||
@ -812,13 +817,13 @@ void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
|
||||
|
||||
poison_endbr(addr, true);
|
||||
if (IS_ENABLED(CONFIG_FINEIBT))
|
||||
poison_endbr(addr - 16, false);
|
||||
poison_cfi(addr - 16);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void __init_or_module apply_ibt_endbr(s32 *start, s32 *end) { }
|
||||
void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
|
||||
|
||||
#endif /* CONFIG_X86_KERNEL_IBT */
|
||||
|
||||
@ -1063,6 +1068,17 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cfi_rewrite_endbr(s32 *start, s32 *end)
|
||||
{
|
||||
s32 *s;
|
||||
|
||||
for (s = start; s < end; s++) {
|
||||
void *addr = (void *)s + *s;
|
||||
|
||||
poison_endbr(addr+16, false);
|
||||
}
|
||||
}
|
||||
|
||||
/* .retpoline_sites */
|
||||
static int cfi_rand_callers(s32 *start, s32 *end)
|
||||
{
|
||||
@ -1157,14 +1173,19 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
||||
return;
|
||||
|
||||
case CFI_FINEIBT:
|
||||
/* place the FineIBT preamble at func()-16 */
|
||||
ret = cfi_rewrite_preamble(start_cfi, end_cfi);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* rewrite the callers to target func()-16 */
|
||||
ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* now that nobody targets func()+0, remove ENDBR there */
|
||||
cfi_rewrite_endbr(start_cfi, end_cfi);
|
||||
|
||||
if (builtin)
|
||||
pr_info("Using FineIBT CFI\n");
|
||||
return;
|
||||
@ -1177,6 +1198,41 @@ err:
|
||||
pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
|
||||
}
|
||||
|
||||
static inline void poison_hash(void *addr)
|
||||
{
|
||||
*(u32 *)addr = 0;
|
||||
}
|
||||
|
||||
static void poison_cfi(void *addr)
|
||||
{
|
||||
switch (cfi_mode) {
|
||||
case CFI_FINEIBT:
|
||||
/*
|
||||
* __cfi_\func:
|
||||
* osp nopl (%rax)
|
||||
* subl $0, %r10d
|
||||
* jz 1f
|
||||
* ud2
|
||||
* 1: nop
|
||||
*/
|
||||
poison_endbr(addr, false);
|
||||
poison_hash(addr + fineibt_preamble_hash);
|
||||
break;
|
||||
|
||||
case CFI_KCFI:
|
||||
/*
|
||||
* __cfi_\func:
|
||||
* movl $0, %eax
|
||||
* .skip 11, 0x90
|
||||
*/
|
||||
poison_hash(addr + 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
||||
@ -1184,6 +1240,10 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
static void poison_cfi(void *addr) { }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
||||
@ -1565,7 +1625,10 @@ void __init alternative_instructions(void)
|
||||
*/
|
||||
callthunks_patch_builtin_calls();
|
||||
|
||||
apply_ibt_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
|
||||
/*
|
||||
* Seal all functions that do not have their address taken.
|
||||
*/
|
||||
apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Patch to UP if other cpus not imminent. */
|
||||
|
@ -358,7 +358,7 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
}
|
||||
if (ibt_endbr) {
|
||||
void *iseg = (void *)ibt_endbr->sh_addr;
|
||||
apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
|
||||
apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size);
|
||||
}
|
||||
if (locks) {
|
||||
void *lseg = (void *)locks->sh_addr;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/static_call.h>
|
||||
#include <trace/events/power.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/entry-common.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/apic.h>
|
||||
#include <linux/uaccess.h>
|
||||
@ -134,6 +135,25 @@ static int set_new_tls(struct task_struct *p, unsigned long tls)
|
||||
return do_set_thread_area_64(p, ARCH_SET_FS, tls);
|
||||
}
|
||||
|
||||
__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
|
||||
int (*fn)(void *), void *fn_arg)
|
||||
{
|
||||
schedule_tail(prev);
|
||||
|
||||
/* Is this a kernel thread? */
|
||||
if (unlikely(fn)) {
|
||||
fn(fn_arg);
|
||||
/*
|
||||
* A kernel thread is allowed to return here after successfully
|
||||
* calling kernel_execve(). Exit to userspace to complete the
|
||||
* execve() syscall.
|
||||
*/
|
||||
regs->ax = 0;
|
||||
}
|
||||
|
||||
syscall_exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
{
|
||||
unsigned long clone_flags = args->flags;
|
||||
@ -149,7 +169,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
frame = &fork_frame->frame;
|
||||
|
||||
frame->bp = encode_frame_pointer(childregs);
|
||||
frame->ret_addr = (unsigned long) ret_from_fork;
|
||||
frame->ret_addr = (unsigned long) ret_from_fork_asm;
|
||||
p->thread.sp = (unsigned long) fork_frame;
|
||||
p->thread.io_bitmap = NULL;
|
||||
p->thread.iopl_warn = 0;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/align.S
|
||||
*
|
||||
* Handle unalignment exceptions in kernel space.
|
||||
* Handle unalignment and load/store exceptions.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General
|
||||
* Public License. See the file "COPYING" in the main directory of
|
||||
@ -26,20 +26,18 @@
|
||||
#define LOAD_EXCEPTION_HANDLER
|
||||
#endif
|
||||
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION || defined LOAD_EXCEPTION_HANDLER
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION || defined CONFIG_XTENSA_LOAD_STORE
|
||||
#define STORE_EXCEPTION_HANDLER
|
||||
#endif
|
||||
|
||||
#if defined LOAD_EXCEPTION_HANDLER || defined STORE_EXCEPTION_HANDLER
|
||||
#define ANY_EXCEPTION_HANDLER
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_WINDOWED
|
||||
#if XCHAL_HAVE_WINDOWED && defined CONFIG_MMU
|
||||
#define UNALIGNED_USER_EXCEPTION
|
||||
#endif
|
||||
|
||||
/* First-level exception handler for unaligned exceptions.
|
||||
*
|
||||
* Note: This handler works only for kernel exceptions. Unaligned user
|
||||
* access should get a seg fault.
|
||||
*/
|
||||
|
||||
/* Big and little endian 16-bit values are located in
|
||||
* different halves of a register. HWORD_START helps to
|
||||
* abstract the notion of extracting a 16-bit value from a
|
||||
@ -228,8 +226,6 @@ ENDPROC(fast_load_store)
|
||||
#ifdef ANY_EXCEPTION_HANDLER
|
||||
ENTRY(fast_unaligned)
|
||||
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
|
||||
call0 .Lsave_and_load_instruction
|
||||
|
||||
/* Analyze the instruction (load or store?). */
|
||||
@ -244,8 +240,7 @@ ENTRY(fast_unaligned)
|
||||
/* 'store indicator bit' not set, jump */
|
||||
_bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
|
||||
|
||||
#endif
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#ifdef STORE_EXCEPTION_HANDLER
|
||||
|
||||
/* Store: Jump to table entry to get the value in the source register.*/
|
||||
|
||||
@ -254,7 +249,7 @@ ENTRY(fast_unaligned)
|
||||
addx8 a5, a6, a5
|
||||
jx a5 # jump into table
|
||||
#endif
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION
|
||||
#ifdef LOAD_EXCEPTION_HANDLER
|
||||
|
||||
/* Load: Load memory address. */
|
||||
|
||||
@ -328,7 +323,7 @@ ENTRY(fast_unaligned)
|
||||
mov a14, a3 ; _j .Lexit; .align 8
|
||||
mov a15, a3 ; _j .Lexit; .align 8
|
||||
#endif
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#ifdef STORE_EXCEPTION_HANDLER
|
||||
.Lstore_table:
|
||||
l32i a3, a2, PT_AREG0; _j .Lstore_w; .align 8
|
||||
mov a3, a1; _j .Lstore_w; .align 8 # fishy??
|
||||
@ -348,7 +343,6 @@ ENTRY(fast_unaligned)
|
||||
mov a3, a15 ; _j .Lstore_w; .align 8
|
||||
#endif
|
||||
|
||||
#ifdef ANY_EXCEPTION_HANDLER
|
||||
/* We cannot handle this exception. */
|
||||
|
||||
.extern _kernel_exception
|
||||
@ -377,8 +371,8 @@ ENTRY(fast_unaligned)
|
||||
|
||||
2: movi a0, _user_exception
|
||||
jx a0
|
||||
#endif
|
||||
#if XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
|
||||
#ifdef STORE_EXCEPTION_HANDLER
|
||||
|
||||
# a7: instruction pointer, a4: instruction, a3: value
|
||||
.Lstore_w:
|
||||
@ -444,7 +438,7 @@ ENTRY(fast_unaligned)
|
||||
s32i a6, a4, 4
|
||||
#endif
|
||||
#endif
|
||||
#ifdef ANY_EXCEPTION_HANDLER
|
||||
|
||||
.Lexit:
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
rsr a4, lend # check if we reached LEND
|
||||
@ -539,7 +533,7 @@ ENTRY(fast_unaligned)
|
||||
__src_b a4, a4, a5 # a4 has the instruction
|
||||
|
||||
ret
|
||||
#endif
|
||||
|
||||
ENDPROC(fast_unaligned)
|
||||
|
||||
ENTRY(fast_unaligned_fixup)
|
||||
|
@ -102,7 +102,8 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
|
||||
#endif
|
||||
{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 },
|
||||
/* EXCCAUSE_PRIVILEGED unhandled */
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION || \
|
||||
IS_ENABLED(CONFIG_XTENSA_LOAD_STORE)
|
||||
#ifdef CONFIG_XTENSA_UNALIGNED_USER
|
||||
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
|
||||
#endif
|
||||
|
@ -237,7 +237,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
|
||||
|
||||
init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
|
||||
if (*init == ',') {
|
||||
rem = split_if_spec(init + 1, &mac_str, &dev_name);
|
||||
rem = split_if_spec(init + 1, &mac_str, &dev_name, NULL);
|
||||
if (rem != NULL) {
|
||||
pr_err("%s: extra garbage on specification : '%s'\n",
|
||||
dev->name, rem);
|
||||
@ -540,6 +540,7 @@ static void iss_net_configure(int index, char *init)
|
||||
rtnl_unlock();
|
||||
pr_err("%s: error registering net device!\n", dev->name);
|
||||
platform_device_unregister(&lp->pdev);
|
||||
/* dev is freed by the iss_net_pdev_release callback */
|
||||
return;
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
@ -79,7 +79,14 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
|
||||
unsigned int slot_hashtable_size;
|
||||
|
||||
memset(profile, 0, sizeof(*profile));
|
||||
init_rwsem(&profile->lock);
|
||||
|
||||
/*
|
||||
* profile->lock of an underlying device can nest inside profile->lock
|
||||
* of a device-mapper device, so use a dynamic lock class to avoid
|
||||
* false-positive lockdep reports.
|
||||
*/
|
||||
lockdep_register_key(&profile->lockdep_key);
|
||||
__init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key);
|
||||
|
||||
if (num_slots == 0)
|
||||
return 0;
|
||||
@ -89,7 +96,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
|
||||
profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
|
||||
GFP_KERNEL);
|
||||
if (!profile->slots)
|
||||
return -ENOMEM;
|
||||
goto err_destroy;
|
||||
|
||||
profile->num_slots = num_slots;
|
||||
|
||||
@ -435,6 +442,7 @@ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
|
||||
{
|
||||
if (!profile)
|
||||
return;
|
||||
lockdep_unregister_key(&profile->lockdep_key);
|
||||
kvfree(profile->slot_hashtable);
|
||||
kvfree_sensitive(profile->slots,
|
||||
sizeof(profile->slots[0]) * profile->num_slots);
|
||||
|
@ -189,7 +189,7 @@ static void blk_flush_complete_seq(struct request *rq,
|
||||
case REQ_FSEQ_DATA:
|
||||
list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
|
||||
spin_lock(&q->requeue_lock);
|
||||
list_add_tail(&rq->queuelist, &q->flush_list);
|
||||
list_add(&rq->queuelist, &q->requeue_list);
|
||||
spin_unlock(&q->requeue_lock);
|
||||
blk_mq_kick_requeue_list(q);
|
||||
break;
|
||||
|
@ -328,8 +328,24 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_init);
|
||||
|
||||
/* Set start and alloc time when the allocated request is actually used */
|
||||
static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
|
||||
{
|
||||
if (blk_mq_need_time_stamp(rq))
|
||||
rq->start_time_ns = ktime_get_ns();
|
||||
else
|
||||
rq->start_time_ns = 0;
|
||||
|
||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||
if (blk_queue_rq_alloc_time(rq->q))
|
||||
rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
|
||||
else
|
||||
rq->alloc_time_ns = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
|
||||
struct blk_mq_tags *tags, unsigned int tag)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = data->ctx;
|
||||
struct blk_mq_hw_ctx *hctx = data->hctx;
|
||||
@ -356,14 +372,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
}
|
||||
rq->timeout = 0;
|
||||
|
||||
if (blk_mq_need_time_stamp(rq))
|
||||
rq->start_time_ns = ktime_get_ns();
|
||||
else
|
||||
rq->start_time_ns = 0;
|
||||
rq->part = NULL;
|
||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||
rq->alloc_time_ns = alloc_time_ns;
|
||||
#endif
|
||||
rq->io_start_time_ns = 0;
|
||||
rq->stats_sectors = 0;
|
||||
rq->nr_phys_segments = 0;
|
||||
@ -393,8 +402,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
}
|
||||
|
||||
static inline struct request *
|
||||
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
|
||||
u64 alloc_time_ns)
|
||||
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
|
||||
{
|
||||
unsigned int tag, tag_offset;
|
||||
struct blk_mq_tags *tags;
|
||||
@ -413,7 +421,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
|
||||
tag = tag_offset + i;
|
||||
prefetch(tags->static_rqs[tag]);
|
||||
tag_mask &= ~(1UL << i);
|
||||
rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
|
||||
rq = blk_mq_rq_ctx_init(data, tags, tag);
|
||||
rq_list_add(data->cached_rq, rq);
|
||||
nr++;
|
||||
}
|
||||
@ -474,9 +482,11 @@ retry:
|
||||
* Try batched alloc if we want more than 1 tag.
|
||||
*/
|
||||
if (data->nr_tags > 1) {
|
||||
rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
|
||||
if (rq)
|
||||
rq = __blk_mq_alloc_requests_batch(data);
|
||||
if (rq) {
|
||||
blk_mq_rq_time_init(rq, alloc_time_ns);
|
||||
return rq;
|
||||
}
|
||||
data->nr_tags = 1;
|
||||
}
|
||||
|
||||
@ -499,8 +509,9 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
|
||||
alloc_time_ns);
|
||||
rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
|
||||
blk_mq_rq_time_init(rq, alloc_time_ns);
|
||||
return rq;
|
||||
}
|
||||
|
||||
static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
|
||||
@ -555,6 +566,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
|
||||
return NULL;
|
||||
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
blk_mq_rq_time_init(rq, 0);
|
||||
}
|
||||
|
||||
rq->cmd_flags = opf;
|
||||
@ -656,8 +668,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
tag = blk_mq_get_tag(&data);
|
||||
if (tag == BLK_MQ_NO_TAG)
|
||||
goto out_queue_exit;
|
||||
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
|
||||
alloc_time_ns);
|
||||
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
|
||||
blk_mq_rq_time_init(rq, alloc_time_ns);
|
||||
rq->__data_len = 0;
|
||||
rq->__sector = (sector_t) -1;
|
||||
rq->bio = rq->biotail = NULL;
|
||||
@ -2896,6 +2908,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
rq_qos_throttle(q, *bio);
|
||||
|
||||
blk_mq_rq_time_init(rq, 0);
|
||||
rq->cmd_flags = (*bio)->bi_opf;
|
||||
INIT_LIST_HEAD(&rq->queuelist);
|
||||
return rq;
|
||||
|
@ -442,7 +442,6 @@ struct blk_revalidate_zone_args {
|
||||
unsigned long *conv_zones_bitmap;
|
||||
unsigned long *seq_zones_wlock;
|
||||
unsigned int nr_zones;
|
||||
sector_t zone_sectors;
|
||||
sector_t sector;
|
||||
};
|
||||
|
||||
@ -456,38 +455,34 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
|
||||
struct gendisk *disk = args->disk;
|
||||
struct request_queue *q = disk->queue;
|
||||
sector_t capacity = get_capacity(disk);
|
||||
sector_t zone_sectors = q->limits.chunk_sectors;
|
||||
|
||||
/* Check for bad zones and holes in the zone report */
|
||||
if (zone->start != args->sector) {
|
||||
pr_warn("%s: Zone gap at sectors %llu..%llu\n",
|
||||
disk->disk_name, args->sector, zone->start);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (zone->start >= capacity || !zone->len) {
|
||||
pr_warn("%s: Invalid zone start %llu, length %llu\n",
|
||||
disk->disk_name, zone->start, zone->len);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* All zones must have the same size, with the exception on an eventual
|
||||
* smaller last zone.
|
||||
*/
|
||||
if (zone->start == 0) {
|
||||
if (zone->len == 0 || !is_power_of_2(zone->len)) {
|
||||
pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
|
||||
disk->disk_name, zone->len);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
args->zone_sectors = zone->len;
|
||||
args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
|
||||
} else if (zone->start + args->zone_sectors < capacity) {
|
||||
if (zone->len != args->zone_sectors) {
|
||||
if (zone->start + zone->len < capacity) {
|
||||
if (zone->len != zone_sectors) {
|
||||
pr_warn("%s: Invalid zoned device with non constant zone size\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
if (zone->len > args->zone_sectors) {
|
||||
pr_warn("%s: Invalid zoned device with larger last zone size\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for holes in the zone report */
|
||||
if (zone->start != args->sector) {
|
||||
pr_warn("%s: Zone gap at sectors %llu..%llu\n",
|
||||
disk->disk_name, args->sector, zone->start);
|
||||
} else if (zone->len > zone_sectors) {
|
||||
pr_warn("%s: Invalid zoned device with larger last zone size\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -526,11 +521,13 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
|
||||
* @disk: Target disk
|
||||
* @update_driver_data: Callback to update driver data on the frozen disk
|
||||
*
|
||||
* Helper function for low-level device drivers to (re) allocate and initialize
|
||||
* a disk request queue zone bitmaps. This functions should normally be called
|
||||
* within the disk ->revalidate method for blk-mq based drivers. For BIO based
|
||||
* drivers only q->nr_zones needs to be updated so that the sysfs exposed value
|
||||
* is correct.
|
||||
* Helper function for low-level device drivers to check and (re) allocate and
|
||||
* initialize a disk request queue zone bitmaps. This functions should normally
|
||||
* be called within the disk ->revalidate method for blk-mq based drivers.
|
||||
* Before calling this function, the device driver must already have set the
|
||||
* device zone size (chunk_sector limit) and the max zone append limit.
|
||||
* For BIO based drivers, this function cannot be used. BIO based device drivers
|
||||
* only need to set disk->nr_zones so that the sysfs exposed value is correct.
|
||||
* If the @update_driver_data callback function is not NULL, the callback is
|
||||
* executed with the device request queue frozen after all zones have been
|
||||
* checked.
|
||||
@ -539,9 +536,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
||||
void (*update_driver_data)(struct gendisk *disk))
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
struct blk_revalidate_zone_args args = {
|
||||
.disk = disk,
|
||||
};
|
||||
sector_t zone_sectors = q->limits.chunk_sectors;
|
||||
sector_t capacity = get_capacity(disk);
|
||||
struct blk_revalidate_zone_args args = { };
|
||||
unsigned int noio_flag;
|
||||
int ret;
|
||||
|
||||
@ -550,13 +547,31 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
||||
if (WARN_ON_ONCE(!queue_is_mq(q)))
|
||||
return -EIO;
|
||||
|
||||
if (!get_capacity(disk))
|
||||
return -EIO;
|
||||
if (!capacity)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Checks that the device driver indicated a valid zone size and that
|
||||
* the max zone append limit is set.
|
||||
*/
|
||||
if (!zone_sectors || !is_power_of_2(zone_sectors)) {
|
||||
pr_warn("%s: Invalid non power of two zone size (%llu)\n",
|
||||
disk->disk_name, zone_sectors);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!q->limits.max_zone_append_sectors) {
|
||||
pr_warn("%s: Invalid 0 maximum zone append limit\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all memory allocations in this context are done as if
|
||||
* GFP_NOIO was specified.
|
||||
*/
|
||||
args.disk = disk;
|
||||
args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
|
||||
noio_flag = memalloc_noio_save();
|
||||
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
|
||||
blk_revalidate_zone_cb, &args);
|
||||
@ -570,7 +585,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
||||
* If zones where reported, make sure that the entire disk capacity
|
||||
* has been checked.
|
||||
*/
|
||||
if (ret > 0 && args.sector != get_capacity(disk)) {
|
||||
if (ret > 0 && args.sector != capacity) {
|
||||
pr_warn("%s: Missing zones from sector %llu\n",
|
||||
disk->disk_name, args.sector);
|
||||
ret = -ENODEV;
|
||||
@ -583,7 +598,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
if (ret > 0) {
|
||||
blk_queue_chunk_sectors(q, args.zone_sectors);
|
||||
disk->nr_zones = args.nr_zones;
|
||||
swap(disk->seq_zones_wlock, args.seq_zones_wlock);
|
||||
swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
|
||||
|
@ -176,7 +176,7 @@ static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
|
||||
* zoned writes, start searching from the start of a zone.
|
||||
*/
|
||||
if (blk_rq_is_seq_zoned_write(rq))
|
||||
pos -= round_down(pos, rq->q->limits.chunk_sectors);
|
||||
pos = round_down(pos, rq->q->limits.chunk_sectors);
|
||||
|
||||
while (node) {
|
||||
rq = rb_entry_rq(node);
|
||||
|
@ -90,7 +90,7 @@ int amiga_partition(struct parsed_partitions *state)
|
||||
}
|
||||
blk = be32_to_cpu(rdb->rdb_PartitionList);
|
||||
put_dev_sector(sect);
|
||||
for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
|
||||
for (part = 1; (s32) blk>0 && part<=16; part++, put_dev_sector(sect)) {
|
||||
/* Read in terms partition table understands */
|
||||
if (check_mul_overflow(blk, (sector_t) blksize, &blk)) {
|
||||
pr_err("Dev %s: overflow calculating partition block %llu! Skipping partitions %u and beyond\n",
|
||||
|
@ -75,6 +75,7 @@ struct ivpu_wa_table {
|
||||
bool punit_disabled;
|
||||
bool clear_runtime_mem;
|
||||
bool d3hot_after_power_off;
|
||||
bool interrupt_clear_with_0;
|
||||
};
|
||||
|
||||
struct ivpu_hw_info;
|
||||
|
@ -101,6 +101,9 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
|
||||
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
|
||||
vdev->wa.clear_runtime_mem = false;
|
||||
vdev->wa.d3hot_after_power_off = true;
|
||||
|
||||
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
|
||||
vdev->wa.interrupt_clear_with_0 = true;
|
||||
}
|
||||
|
||||
static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
|
||||
@ -885,7 +888,7 @@ static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
|
||||
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
|
||||
REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
|
||||
REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
|
||||
REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
|
||||
REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
|
||||
}
|
||||
|
||||
static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
|
||||
@ -973,12 +976,15 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
|
||||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear local interrupt status by writing 0 to all bits.
|
||||
* This must be done after interrupts are cleared at the source.
|
||||
* Writing 1 triggers an interrupt, so we can't perform read update write.
|
||||
*/
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
|
||||
/* This must be done after interrupts are cleared at the source. */
|
||||
if (IVPU_WA(interrupt_clear_with_0))
|
||||
/*
|
||||
* Writing 1 triggers an interrupt, so we can't perform read update write.
|
||||
* Clear local interrupt status by writing 0 to all bits.
|
||||
*/
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
|
||||
else
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
|
||||
|
||||
/* Re-enable global interrupt */
|
||||
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
|
||||
|
@ -717,7 +717,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
|
||||
if (!d->config_buf)
|
||||
goto err_alloc;
|
||||
|
||||
for (i = 0; i < chip->num_config_regs; i++) {
|
||||
for (i = 0; i < chip->num_config_bases; i++) {
|
||||
d->config_buf[i] = kcalloc(chip->num_config_regs,
|
||||
sizeof(**d->config_buf),
|
||||
GFP_KERNEL);
|
||||
|
@ -162,21 +162,15 @@ int null_register_zoned_dev(struct nullb *nullb)
|
||||
disk_set_zoned(nullb->disk, BLK_ZONED_HM);
|
||||
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
|
||||
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
|
||||
|
||||
if (queue_is_mq(q)) {
|
||||
int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
blk_queue_chunk_sectors(q, dev->zone_size_sects);
|
||||
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
|
||||
}
|
||||
|
||||
blk_queue_chunk_sectors(q, dev->zone_size_sects);
|
||||
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
|
||||
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
|
||||
disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
|
||||
disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
|
||||
|
||||
if (queue_is_mq(q))
|
||||
return blk_revalidate_disk_zones(nullb->disk, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -751,7 +751,6 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
||||
{
|
||||
u32 v, wg;
|
||||
u8 model;
|
||||
int ret;
|
||||
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.model, &model);
|
||||
@ -806,6 +805,7 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
||||
vblk->zone_sectors);
|
||||
return -ENODEV;
|
||||
}
|
||||
blk_queue_chunk_sectors(q, vblk->zone_sectors);
|
||||
dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
|
||||
@ -814,26 +814,22 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
||||
blk_queue_max_discard_sectors(q, 0);
|
||||
}
|
||||
|
||||
ret = blk_revalidate_disk_zones(vblk->disk, NULL);
|
||||
if (!ret) {
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.max_append_sectors, &v);
|
||||
if (!v) {
|
||||
dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if ((v << SECTOR_SHIFT) < wg) {
|
||||
dev_err(&vdev->dev,
|
||||
"write granularity %u exceeds max_append_sectors %u limit\n",
|
||||
wg, v);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
blk_queue_max_zone_append_sectors(q, v);
|
||||
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.max_append_sectors, &v);
|
||||
if (!v) {
|
||||
dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if ((v << SECTOR_SHIFT) < wg) {
|
||||
dev_err(&vdev->dev,
|
||||
"write granularity %u exceeds max_append_sectors %u limit\n",
|
||||
wg, v);
|
||||
return -ENODEV;
|
||||
}
|
||||
blk_queue_max_zone_append_sectors(q, v);
|
||||
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
|
||||
|
||||
return ret;
|
||||
return blk_revalidate_disk_zones(vblk->disk, NULL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -4104,6 +4104,7 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
BT_DBG("intf %p id %p", intf, id);
|
||||
|
||||
if ((id->driver_info & BTUSB_IFNUM_2) &&
|
||||
(intf->cur_altsetting->desc.bInterfaceNumber != 0) &&
|
||||
(intf->cur_altsetting->desc.bInterfaceNumber != 2))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -518,6 +518,7 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
|
||||
* 6.x.y.z series: 6.0.18.6 +
|
||||
* 3.x.y.z series: 3.57.y.5 +
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
||||
{
|
||||
u32 val1, val2;
|
||||
@ -566,6 +567,12 @@ release:
|
||||
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
{
|
||||
|
@ -563,15 +563,18 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
u32 rsp_size;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&acpi_resource_list);
|
||||
ret = acpi_dev_get_resources(device, &acpi_resource_list,
|
||||
crb_check_resource, iores_array);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
acpi_dev_free_resource_list(&acpi_resource_list);
|
||||
|
||||
/* Pluton doesn't appear to define ACPI memory regions */
|
||||
/*
|
||||
* Pluton sometimes does not define ACPI memory regions.
|
||||
* Mapping is then done in crb_map_pluton
|
||||
*/
|
||||
if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
|
||||
INIT_LIST_HEAD(&acpi_resource_list);
|
||||
ret = acpi_dev_get_resources(device, &acpi_resource_list,
|
||||
crb_check_resource, iores_array);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
acpi_dev_free_resource_list(&acpi_resource_list);
|
||||
|
||||
if (resource_type(iores_array) != IORESOURCE_MEM) {
|
||||
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
|
||||
return -EINVAL;
|
||||
|
@ -114,6 +114,22 @@ static int tpm_tis_disable_irq(const struct dmi_system_id *d)
|
||||
}
|
||||
|
||||
static const struct dmi_system_id tpm_tis_dmi_table[] = {
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "Framework Laptop (12th Gen Intel Core)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop (12th Gen Intel Core)"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "Framework Laptop (13th Gen Intel Core)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop (13th Gen Intel Core)"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "ThinkPad T490s",
|
||||
@ -138,11 +154,20 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "ThinkPad L590",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L590"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "UPX-TGL",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "UPX-TGL"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
|
@ -24,9 +24,12 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/dmi.h>
|
||||
#include "tpm.h"
|
||||
#include "tpm_tis_core.h"
|
||||
|
||||
#define TPM_TIS_MAX_UNHANDLED_IRQS 1000
|
||||
|
||||
static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
|
||||
|
||||
static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
|
||||
@ -468,25 +471,29 @@ out_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void disable_interrupts(struct tpm_chip *chip)
|
||||
static void __tpm_tis_disable_interrupts(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
u32 int_mask = 0;
|
||||
|
||||
tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &int_mask);
|
||||
int_mask &= ~TPM_GLOBAL_INT_ENABLE;
|
||||
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), int_mask);
|
||||
|
||||
chip->flags &= ~TPM_CHIP_FLAG_IRQ;
|
||||
}
|
||||
|
||||
static void tpm_tis_disable_interrupts(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
u32 intmask;
|
||||
int rc;
|
||||
|
||||
if (priv->irq == 0)
|
||||
return;
|
||||
|
||||
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
|
||||
if (rc < 0)
|
||||
intmask = 0;
|
||||
|
||||
intmask &= ~TPM_GLOBAL_INT_ENABLE;
|
||||
rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
|
||||
__tpm_tis_disable_interrupts(chip);
|
||||
|
||||
devm_free_irq(chip->dev.parent, priv->irq, chip);
|
||||
priv->irq = 0;
|
||||
chip->flags &= ~TPM_CHIP_FLAG_IRQ;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -552,7 +559,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
||||
if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
|
||||
tpm_msleep(1);
|
||||
if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
|
||||
disable_interrupts(chip);
|
||||
tpm_tis_disable_interrupts(chip);
|
||||
set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
|
||||
return rc;
|
||||
}
|
||||
@ -752,6 +759,57 @@ static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
|
||||
return status == TPM_STS_COMMAND_READY;
|
||||
}
|
||||
|
||||
static irqreturn_t tpm_tis_revert_interrupts(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
const char *product;
|
||||
const char *vendor;
|
||||
|
||||
dev_warn(&chip->dev, FW_BUG
|
||||
"TPM interrupt storm detected, polling instead\n");
|
||||
|
||||
vendor = dmi_get_system_info(DMI_SYS_VENDOR);
|
||||
product = dmi_get_system_info(DMI_PRODUCT_VERSION);
|
||||
|
||||
if (vendor && product) {
|
||||
dev_info(&chip->dev,
|
||||
"Consider adding the following entry to tpm_tis_dmi_table:\n");
|
||||
dev_info(&chip->dev, "\tDMI_SYS_VENDOR: %s\n", vendor);
|
||||
dev_info(&chip->dev, "\tDMI_PRODUCT_VERSION: %s\n", product);
|
||||
}
|
||||
|
||||
if (tpm_tis_request_locality(chip, 0) != 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
__tpm_tis_disable_interrupts(chip);
|
||||
tpm_tis_relinquish_locality(chip, 0);
|
||||
|
||||
schedule_work(&priv->free_irq_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t tpm_tis_update_unhandled_irqs(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
irqreturn_t irqret = IRQ_HANDLED;
|
||||
|
||||
if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
if (time_after(jiffies, priv->last_unhandled_irq + HZ/10))
|
||||
priv->unhandled_irqs = 1;
|
||||
else
|
||||
priv->unhandled_irqs++;
|
||||
|
||||
priv->last_unhandled_irq = jiffies;
|
||||
|
||||
if (priv->unhandled_irqs > TPM_TIS_MAX_UNHANDLED_IRQS)
|
||||
irqret = tpm_tis_revert_interrupts(chip);
|
||||
|
||||
return irqret;
|
||||
}
|
||||
|
||||
static irqreturn_t tis_int_handler(int dummy, void *dev_id)
|
||||
{
|
||||
struct tpm_chip *chip = dev_id;
|
||||
@ -761,10 +819,10 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
|
||||
|
||||
rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
|
||||
if (rc < 0)
|
||||
return IRQ_NONE;
|
||||
goto err;
|
||||
|
||||
if (interrupt == 0)
|
||||
return IRQ_NONE;
|
||||
goto err;
|
||||
|
||||
set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
|
||||
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
|
||||
@ -780,10 +838,13 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
|
||||
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
|
||||
tpm_tis_relinquish_locality(chip, 0);
|
||||
if (rc < 0)
|
||||
return IRQ_NONE;
|
||||
goto err;
|
||||
|
||||
tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
err:
|
||||
return tpm_tis_update_unhandled_irqs(chip);
|
||||
}
|
||||
|
||||
static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
|
||||
@ -804,6 +865,15 @@ static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
|
||||
chip->flags &= ~TPM_CHIP_FLAG_IRQ;
|
||||
}
|
||||
|
||||
static void tpm_tis_free_irq_func(struct work_struct *work)
|
||||
{
|
||||
struct tpm_tis_data *priv = container_of(work, typeof(*priv), free_irq_work);
|
||||
struct tpm_chip *chip = priv->chip;
|
||||
|
||||
devm_free_irq(chip->dev.parent, priv->irq, chip);
|
||||
priv->irq = 0;
|
||||
}
|
||||
|
||||
/* Register the IRQ and issue a command that will cause an interrupt. If an
|
||||
* irq is seen then leave the chip setup for IRQ operation, otherwise reverse
|
||||
* everything and leave in polling mode. Returns 0 on success.
|
||||
@ -816,6 +886,7 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
|
||||
int rc;
|
||||
u32 int_status;
|
||||
|
||||
INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
|
||||
|
||||
rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
|
||||
tis_int_handler, IRQF_ONESHOT | flags,
|
||||
@ -918,6 +989,7 @@ void tpm_tis_remove(struct tpm_chip *chip)
|
||||
interrupt = 0;
|
||||
|
||||
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
|
||||
flush_work(&priv->free_irq_work);
|
||||
|
||||
tpm_tis_clkrun_enable(chip, false);
|
||||
|
||||
@ -1021,6 +1093,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
||||
chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
|
||||
chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
|
||||
chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
|
||||
priv->chip = chip;
|
||||
priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
|
||||
priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
|
||||
priv->phy_ops = phy_ops;
|
||||
@ -1179,7 +1252,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
||||
rc = tpm_tis_request_locality(chip, 0);
|
||||
if (rc < 0)
|
||||
goto out_err;
|
||||
disable_interrupts(chip);
|
||||
tpm_tis_disable_interrupts(chip);
|
||||
tpm_tis_relinquish_locality(chip, 0);
|
||||
}
|
||||
}
|
||||
|
@ -91,11 +91,15 @@ enum tpm_tis_flags {
|
||||
};
|
||||
|
||||
struct tpm_tis_data {
|
||||
struct tpm_chip *chip;
|
||||
u16 manufacturer_id;
|
||||
struct mutex locality_count_mutex;
|
||||
unsigned int locality_count;
|
||||
int locality;
|
||||
int irq;
|
||||
struct work_struct free_irq_work;
|
||||
unsigned long last_unhandled_irq;
|
||||
unsigned int unhandled_irqs;
|
||||
unsigned int int_mask;
|
||||
unsigned long flags;
|
||||
void __iomem *ilb_base_addr;
|
||||
|
@ -189,21 +189,28 @@ static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < TPM_RETRY; i++) {
|
||||
/* write register */
|
||||
msg.len = sizeof(reg);
|
||||
msg.buf = ®
|
||||
msg.flags = 0;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
u16 read = 0;
|
||||
|
||||
/* read data */
|
||||
msg.buf = result;
|
||||
msg.len = len;
|
||||
msg.flags = I2C_M_RD;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
while (read < len) {
|
||||
/* write register */
|
||||
msg.len = sizeof(reg);
|
||||
msg.buf = ®
|
||||
msg.flags = 0;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* read data */
|
||||
msg.buf = result + read;
|
||||
msg.len = len - read;
|
||||
msg.flags = I2C_M_RD;
|
||||
if (msg.len > I2C_SMBUS_BLOCK_MAX)
|
||||
msg.len = I2C_SMBUS_BLOCK_MAX;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
read += msg.len;
|
||||
}
|
||||
|
||||
ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
|
||||
if (ret == 0)
|
||||
@ -223,19 +230,27 @@ static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
struct i2c_msg msg = { .addr = phy->i2c_client->addr };
|
||||
u8 reg = tpm_tis_i2c_address_to_register(addr);
|
||||
int ret;
|
||||
u16 wrote = 0;
|
||||
|
||||
if (len > TPM_BUFSIZE - 1)
|
||||
return -EIO;
|
||||
|
||||
/* write register and data in one go */
|
||||
phy->io_buf[0] = reg;
|
||||
memcpy(phy->io_buf + sizeof(reg), value, len);
|
||||
|
||||
msg.len = sizeof(reg) + len;
|
||||
msg.buf = phy->io_buf;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
while (wrote < len) {
|
||||
/* write register and data in one go */
|
||||
msg.len = sizeof(reg) + len - wrote;
|
||||
if (msg.len > I2C_SMBUS_BLOCK_MAX)
|
||||
msg.len = I2C_SMBUS_BLOCK_MAX;
|
||||
|
||||
memcpy(phy->io_buf + sizeof(reg), value + wrote,
|
||||
msg.len - sizeof(reg));
|
||||
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
wrote += msg.len - sizeof(reg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -136,6 +136,14 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
}
|
||||
|
||||
exit:
|
||||
if (ret < 0) {
|
||||
/* Deactivate chip select */
|
||||
memset(&spi_xfer, 0, sizeof(spi_xfer));
|
||||
spi_message_init(&m);
|
||||
spi_message_add_tail(&spi_xfer, &m);
|
||||
spi_sync_locked(phy->spi_device, &m);
|
||||
}
|
||||
|
||||
spi_bus_unlock(phy->spi_device->master);
|
||||
return ret;
|
||||
}
|
||||
|
@ -683,37 +683,21 @@ static struct miscdevice vtpmx_miscdev = {
|
||||
.fops = &vtpmx_fops,
|
||||
};
|
||||
|
||||
static int vtpmx_init(void)
|
||||
{
|
||||
return misc_register(&vtpmx_miscdev);
|
||||
}
|
||||
|
||||
static void vtpmx_cleanup(void)
|
||||
{
|
||||
misc_deregister(&vtpmx_miscdev);
|
||||
}
|
||||
|
||||
static int __init vtpm_module_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = vtpmx_init();
|
||||
if (rc) {
|
||||
pr_err("couldn't create vtpmx device\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
workqueue = create_workqueue("tpm-vtpm");
|
||||
if (!workqueue) {
|
||||
pr_err("couldn't create workqueue\n");
|
||||
rc = -ENOMEM;
|
||||
goto err_vtpmx_cleanup;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_vtpmx_cleanup:
|
||||
vtpmx_cleanup();
|
||||
rc = misc_register(&vtpmx_miscdev);
|
||||
if (rc) {
|
||||
pr_err("couldn't create vtpmx device\n");
|
||||
destroy_workqueue(workqueue);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -721,7 +705,7 @@ err_vtpmx_cleanup:
|
||||
static void __exit vtpm_module_exit(void)
|
||||
{
|
||||
destroy_workqueue(workqueue);
|
||||
vtpmx_cleanup();
|
||||
misc_deregister(&vtpmx_miscdev);
|
||||
}
|
||||
|
||||
module_init(vtpm_module_init);
|
||||
|
@ -269,7 +269,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
|
||||
}
|
||||
|
||||
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
static int us2e_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
|
@ -117,7 +117,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
|
||||
}
|
||||
|
||||
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
static int us3_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
|
@ -66,18 +66,36 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
{
|
||||
struct dma_fence_array *result;
|
||||
struct dma_fence *tmp, **array;
|
||||
ktime_t timestamp;
|
||||
unsigned int i;
|
||||
size_t count;
|
||||
|
||||
count = 0;
|
||||
timestamp = ns_to_ktime(0);
|
||||
for (i = 0; i < num_fences; ++i) {
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
|
||||
if (!dma_fence_is_signaled(tmp))
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
|
||||
if (!dma_fence_is_signaled(tmp)) {
|
||||
++count;
|
||||
} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
|
||||
&tmp->flags)) {
|
||||
if (ktime_after(tmp->timestamp, timestamp))
|
||||
timestamp = tmp->timestamp;
|
||||
} else {
|
||||
/*
|
||||
* Use the current time if the fence is
|
||||
* currently signaling.
|
||||
*/
|
||||
timestamp = ktime_get();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we couldn't find a pending fence just return a private signaled
|
||||
* fence with the timestamp of the last signaled one.
|
||||
*/
|
||||
if (count == 0)
|
||||
return dma_fence_get_stub();
|
||||
return dma_fence_allocate_private_stub(timestamp);
|
||||
|
||||
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
|
||||
if (!array)
|
||||
@ -138,7 +156,7 @@ restart:
|
||||
} while (tmp);
|
||||
|
||||
if (count == 0) {
|
||||
tmp = dma_fence_get_stub();
|
||||
tmp = dma_fence_allocate_private_stub(ktime_get());
|
||||
goto return_tmp;
|
||||
}
|
||||
|
||||
|
@ -150,16 +150,17 @@ EXPORT_SYMBOL(dma_fence_get_stub);
|
||||
|
||||
/**
|
||||
* dma_fence_allocate_private_stub - return a private, signaled fence
|
||||
* @timestamp: timestamp when the fence was signaled
|
||||
*
|
||||
* Return a newly allocated and signaled stub fence.
|
||||
*/
|
||||
struct dma_fence *dma_fence_allocate_private_stub(void)
|
||||
struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
|
||||
dma_fence_init(fence,
|
||||
&dma_fence_stub_ops,
|
||||
@ -169,7 +170,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
|
||||
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&fence->flags);
|
||||
|
||||
dma_fence_signal(fence);
|
||||
dma_fence_signal_timestamp(fence, timestamp);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void);
|
||||
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_aspm_support_quirk(void);
|
||||
|
||||
|
@ -2881,6 +2881,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
if (!attachment->is_mapped)
|
||||
continue;
|
||||
|
||||
if (attachment->bo_va->base.bo->tbo.pin_count)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
if (ret) {
|
||||
|
@ -1458,6 +1458,25 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
|
||||
* speed switching. Until we have confirmation from Intel that a specific host
|
||||
* supports it, it's safer that we keep it disabled for all.
|
||||
*
|
||||
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
|
||||
*/
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_should_use_aspm - check if the device should program ASPM
|
||||
*
|
||||
|
@ -295,5 +295,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
|
||||
uint32_t *size,
|
||||
uint32_t pptable_id);
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -2113,7 +2113,6 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
@ -2130,6 +2129,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
@ -3021,7 +3021,6 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
@ -3038,6 +3037,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
@ -2077,89 +2077,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
|
||||
uint32_t *gen_speed_override,
|
||||
uint32_t *lane_width_override)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
*gen_speed_override = 0xff;
|
||||
*lane_width_override = 0xff;
|
||||
|
||||
switch (adev->pdev->device) {
|
||||
case 0x73A0:
|
||||
case 0x73A1:
|
||||
case 0x73A2:
|
||||
case 0x73A3:
|
||||
case 0x73AB:
|
||||
case 0x73AE:
|
||||
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
|
||||
*lane_width_override = 6;
|
||||
break;
|
||||
case 0x73E0:
|
||||
case 0x73E1:
|
||||
case 0x73E3:
|
||||
*lane_width_override = 4;
|
||||
break;
|
||||
case 0x7420:
|
||||
case 0x7421:
|
||||
case 0x7422:
|
||||
case 0x7423:
|
||||
case 0x7424:
|
||||
*lane_width_override = 3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t gen_speed_override, lane_width_override;
|
||||
uint8_t *table_member1, *table_member2;
|
||||
uint32_t min_gen_speed, max_gen_speed;
|
||||
uint32_t min_lane_width, max_lane_width;
|
||||
uint32_t smu_pcie_arg;
|
||||
u32 smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
|
||||
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
|
||||
/* PCIE gen speed and lane width override */
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
|
||||
|
||||
sienna_cichlid_get_override_pcie_settings(smu,
|
||||
&gen_speed_override,
|
||||
&lane_width_override);
|
||||
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
|
||||
|
||||
/* PCIE gen speed override */
|
||||
if (gen_speed_override != 0xff) {
|
||||
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
} else {
|
||||
min_gen_speed = MAX(0, table_member1[0]);
|
||||
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
|
||||
min_gen_speed = min_gen_speed > max_gen_speed ?
|
||||
max_gen_speed : min_gen_speed;
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
}
|
||||
pcie_table->pcie_gen[0] = min_gen_speed;
|
||||
pcie_table->pcie_gen[1] = max_gen_speed;
|
||||
|
||||
/* PCIE lane width override */
|
||||
if (lane_width_override != 0xff) {
|
||||
min_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
max_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
} else {
|
||||
min_lane_width = MAX(1, table_member2[0]);
|
||||
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
|
||||
min_lane_width = min_lane_width > max_lane_width ?
|
||||
max_lane_width : min_lane_width;
|
||||
}
|
||||
pcie_table->pcie_lane[0] = min_lane_width;
|
||||
pcie_table->pcie_lane[1] = max_lane_width;
|
||||
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
smu_pcie_arg = (i << 16 |
|
||||
@ -3842,7 +3789,6 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
@ -3859,6 +3805,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
@ -1525,7 +1525,6 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
@ -1542,6 +1541,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
@ -2424,3 +2424,51 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
int num_of_levels = pcie_table->num_of_link_levels;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
|
||||
|
||||
if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
|
||||
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1645,37 +1645,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct smu_temperature_range smu13_thermal_policy[] = {
|
||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
||||
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
|
||||
@ -2320,7 +2289,6 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
@ -2337,6 +2305,7 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
@ -2654,7 +2623,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.print_clk_levels = smu_v13_0_0_print_clk_levels,
|
||||
.force_clk_levels = smu_v13_0_0_force_clk_levels,
|
||||
.update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
|
||||
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
|
||||
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
|
||||
.register_irq_handler = smu_v13_0_register_irq_handler,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
|
@ -1763,7 +1763,6 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_v13_0_6_request_i2c_xfer(smu, req);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
@ -1780,6 +1779,7 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
@ -1635,37 +1635,6 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct smu_temperature_range smu13_thermal_policy[] =
|
||||
{
|
||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
||||
@ -2234,7 +2203,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.print_clk_levels = smu_v13_0_7_print_clk_levels,
|
||||
.force_clk_levels = smu_v13_0_7_force_clk_levels,
|
||||
.update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
|
||||
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
|
||||
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
|
||||
.register_irq_handler = smu_v13_0_register_irq_handler,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
|
@ -209,10 +209,6 @@ void armada_fbdev_setup(struct drm_device *dev)
|
||||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = armada_fbdev_client_hotplug(&fbh->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fbh->client);
|
||||
|
||||
return;
|
||||
|
@ -1426,9 +1426,9 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
|
||||
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
|
||||
if (dw_hdmi_support_scdc(hdmi, display)) {
|
||||
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
|
||||
drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
|
||||
drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1);
|
||||
else
|
||||
drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
|
||||
drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
|
||||
@ -2116,7 +2116,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
||||
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
|
||||
|
||||
/* Enabled Scrambling in the Sink */
|
||||
drm_scdc_set_scrambling(&hdmi->connector, 1);
|
||||
drm_scdc_set_scrambling(hdmi->curr_conn, 1);
|
||||
|
||||
/*
|
||||
* To activate the scrambler feature, you must ensure
|
||||
@ -2132,7 +2132,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
||||
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
|
||||
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
|
||||
HDMI_MC_SWRSTZ);
|
||||
drm_scdc_set_scrambling(&hdmi->connector, 0);
|
||||
drm_scdc_set_scrambling(hdmi->curr_conn, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3553,6 +3553,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
|
||||
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
|
||||
| DRM_BRIDGE_OP_HPD;
|
||||
hdmi->bridge.interlace_allowed = true;
|
||||
hdmi->bridge.ddc = hdmi->ddc;
|
||||
#ifdef CONFIG_OF
|
||||
hdmi->bridge.of_node = pdev->dev.of_node;
|
||||
#endif
|
||||
|
@ -170,10 +170,10 @@
|
||||
* @pwm_refclk_freq: Cache for the reference clock input to the PWM.
|
||||
*/
|
||||
struct ti_sn65dsi86 {
|
||||
struct auxiliary_device bridge_aux;
|
||||
struct auxiliary_device gpio_aux;
|
||||
struct auxiliary_device aux_aux;
|
||||
struct auxiliary_device pwm_aux;
|
||||
struct auxiliary_device *bridge_aux;
|
||||
struct auxiliary_device *gpio_aux;
|
||||
struct auxiliary_device *aux_aux;
|
||||
struct auxiliary_device *pwm_aux;
|
||||
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
@ -468,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
|
||||
auxiliary_device_delete(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* AUX bus docs say that a non-NULL release is mandatory, but it makes no
|
||||
* sense for the model used here where all of the aux devices are allocated
|
||||
* in the single shared structure. We'll use this noop as a workaround.
|
||||
*/
|
||||
static void ti_sn65dsi86_noop(struct device *dev) {}
|
||||
static void ti_sn65dsi86_aux_device_release(struct device *dev)
|
||||
{
|
||||
struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
|
||||
|
||||
kfree(aux);
|
||||
}
|
||||
|
||||
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
|
||||
struct auxiliary_device *aux,
|
||||
struct auxiliary_device **aux_out,
|
||||
const char *name)
|
||||
{
|
||||
struct device *dev = pdata->dev;
|
||||
struct auxiliary_device *aux;
|
||||
int ret;
|
||||
|
||||
aux = kzalloc(sizeof(*aux), GFP_KERNEL);
|
||||
if (!aux)
|
||||
return -ENOMEM;
|
||||
|
||||
aux->name = name;
|
||||
aux->dev.parent = dev;
|
||||
aux->dev.release = ti_sn65dsi86_noop;
|
||||
aux->dev.release = ti_sn65dsi86_aux_device_release;
|
||||
device_set_of_node_from_dev(&aux->dev, dev);
|
||||
ret = auxiliary_device_init(aux);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(aux);
|
||||
return ret;
|
||||
}
|
||||
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -497,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
|
||||
if (!ret)
|
||||
*aux_out = aux;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
|
||||
* drm_client_register() it is no longer permissible to call drm_client_release()
|
||||
* directly (outside the unregister callback), instead cleanup will happen
|
||||
* automatically on driver unload.
|
||||
*
|
||||
* Registering a client generates a hotplug event that allows the client
|
||||
* to set up its display from pre-existing outputs. The client must have
|
||||
* initialized its state to able to handle the hotplug event successfully.
|
||||
*/
|
||||
void drm_client_register(struct drm_client_dev *client)
|
||||
{
|
||||
struct drm_device *dev = client->dev;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->clientlist_mutex);
|
||||
list_add(&client->list, &dev->clientlist);
|
||||
|
||||
if (client->funcs && client->funcs->hotplug) {
|
||||
/*
|
||||
* Perform an initial hotplug event to pick up the
|
||||
* display configuration for the client. This step
|
||||
* has to be performed *after* registering the client
|
||||
* in the list of clients, or a concurrent hotplug
|
||||
* event might be lost; leaving the display off.
|
||||
*
|
||||
* Hold the clientlist_mutex as for a regular hotplug
|
||||
* event.
|
||||
*/
|
||||
ret = client->funcs->hotplug(client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
}
|
||||
mutex_unlock(&dev->clientlist_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_register);
|
||||
|
@ -217,7 +217,7 @@ static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
|
||||
* drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
|
||||
* @dev: DRM device
|
||||
* @preferred_bpp: Preferred bits per pixel for the device.
|
||||
* @dev->mode_config.preferred_depth is used if this is zero.
|
||||
* 32 is used if this is zero.
|
||||
*
|
||||
* This function sets up fbdev emulation for GEM DMA drivers that support
|
||||
* dumb buffers with a virtual address and that can be mmap'ed.
|
||||
@ -252,10 +252,6 @@ void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
||||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = drm_fbdev_dma_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
@ -339,10 +339,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
||||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = drm_fbdev_generic_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
@ -353,10 +353,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
|
||||
*/
|
||||
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
|
||||
{
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub();
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
|
||||
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_syncobj_replace_fence(syncobj, fence);
|
||||
dma_fence_put(fence);
|
||||
|
@ -215,10 +215,6 @@ void exynos_drm_fbdev_setup(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto err_drm_client_init;
|
||||
|
||||
ret = exynos_drm_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
@ -328,10 +328,6 @@ void psb_fbdev_setup(struct drm_psb_private *dev_priv)
|
||||
goto err_drm_fb_helper_unprepare;
|
||||
}
|
||||
|
||||
ret = psb_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
@ -4564,7 +4564,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
|
||||
saved_state->uapi = slave_crtc_state->uapi;
|
||||
saved_state->scaler_state = slave_crtc_state->scaler_state;
|
||||
saved_state->shared_dpll = slave_crtc_state->shared_dpll;
|
||||
saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
|
||||
saved_state->crc_enabled = slave_crtc_state->crc_enabled;
|
||||
|
||||
intel_crtc_free_hw_state(slave_crtc_state);
|
||||
|
@ -37,9 +37,6 @@ static u64 gen8_pte_encode(dma_addr_t addr,
|
||||
if (unlikely(flags & PTE_READ_ONLY))
|
||||
pte &= ~GEN8_PAGE_RW;
|
||||
|
||||
if (flags & PTE_LM)
|
||||
pte |= GEN12_PPGTT_PTE_LM;
|
||||
|
||||
/*
|
||||
* For pre-gen12 platforms pat_index is the same as enum
|
||||
* i915_cache_level, so the switch-case here is still valid.
|
||||
|
@ -670,7 +670,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
|
@ -868,8 +868,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
|
||||
oa_report_id_clear(stream, report32);
|
||||
oa_timestamp_clear(stream, report32);
|
||||
} else {
|
||||
u8 *oa_buf_end = stream->oa_buffer.vaddr +
|
||||
OA_BUFFER_SIZE;
|
||||
u32 part = oa_buf_end - (u8 *)report32;
|
||||
|
||||
/* Zero out the entire report */
|
||||
memset(report32, 0, report_size);
|
||||
if (report_size <= part) {
|
||||
memset(report32, 0, report_size);
|
||||
} else {
|
||||
memset(report32, 0, part);
|
||||
memset(oa_buf_base, 0, report_size - part);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -246,10 +246,6 @@ void msm_fbdev_setup(struct drm_device *dev)
|
||||
goto err_drm_fb_helper_unprepare;
|
||||
}
|
||||
|
||||
ret = msm_fbdev_client_hotplug(&helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&helper->client);
|
||||
|
||||
return;
|
||||
|
@ -910,15 +910,19 @@ nv50_msto_prepare(struct drm_atomic_state *state,
|
||||
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
||||
struct nv50_mstc *mstc = msto->mstc;
|
||||
struct nv50_mstm *mstm = mstc->mstm;
|
||||
struct drm_dp_mst_atomic_payload *payload;
|
||||
struct drm_dp_mst_topology_state *old_mst_state;
|
||||
struct drm_dp_mst_atomic_payload *payload, *old_payload;
|
||||
|
||||
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
|
||||
|
||||
old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
|
||||
|
||||
payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
|
||||
old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
|
||||
|
||||
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
|
||||
if (msto->disabled) {
|
||||
drm_dp_remove_payload(mgr, mst_state, payload, payload);
|
||||
drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
|
||||
|
||||
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
|
||||
} else {
|
||||
|
@ -90,6 +90,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
|
||||
if (cli)
|
||||
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
|
||||
|
||||
nvif_object_dtor(&chan->blit);
|
||||
nvif_object_dtor(&chan->nvsw);
|
||||
nvif_object_dtor(&chan->gart);
|
||||
nvif_object_dtor(&chan->vram);
|
||||
|
@ -53,6 +53,7 @@ struct nouveau_channel {
|
||||
u32 user_put;
|
||||
|
||||
struct nvif_object user;
|
||||
struct nvif_object blit;
|
||||
|
||||
struct nvif_event kill;
|
||||
atomic_t killed;
|
||||
|
@ -375,15 +375,29 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
|
||||
ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
|
||||
NVDRM_NVSW, nouveau_abi16_swclass(drm),
|
||||
NULL, 0, &drm->channel->nvsw);
|
||||
|
||||
if (ret == 0 && device->info.chipset >= 0x11) {
|
||||
ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
|
||||
0x005f, 0x009f,
|
||||
NULL, 0, &drm->channel->blit);
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
struct nvif_push *push = drm->channel->chan.push;
|
||||
ret = PUSH_WAIT(push, 2);
|
||||
if (ret == 0)
|
||||
ret = PUSH_WAIT(push, 8);
|
||||
if (ret == 0) {
|
||||
if (device->info.chipset >= 0x11) {
|
||||
PUSH_NVSQ(push, NV05F, 0x0000, drm->channel->blit.handle);
|
||||
PUSH_NVSQ(push, NV09F, 0x0120, 0,
|
||||
0x0124, 1,
|
||||
0x0128, 2);
|
||||
}
|
||||
PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
|
||||
NV_ERROR(drm, "failed to allocate sw or blit class, %d\n", ret);
|
||||
nouveau_accel_gr_fini(drm);
|
||||
return;
|
||||
}
|
||||
|
@ -295,6 +295,7 @@ g94_sor = {
|
||||
.clock = nv50_sor_clock,
|
||||
.war_2 = g94_sor_war_2,
|
||||
.war_3 = g94_sor_war_3,
|
||||
.hdmi = &g84_sor_hdmi,
|
||||
.dp = &g94_sor_dp,
|
||||
};
|
||||
|
||||
|
@ -125,7 +125,7 @@ gt215_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 siz
|
||||
pack_hdmi_infoframe(&avi, data, size);
|
||||
|
||||
nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
|
||||
if (size)
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
nvkm_wr32(device, 0x61c528 + soff, avi.header);
|
||||
|
@ -224,7 +224,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
|
||||
u64 falcons;
|
||||
int ret, i;
|
||||
|
||||
if (list_empty(&acr->hsfw)) {
|
||||
if (list_empty(&acr->hsfw) || !acr->func || !acr->func->wpr_layout) {
|
||||
nvkm_debug(subdev, "No HSFW(s)\n");
|
||||
nvkm_acr_cleanup(acr);
|
||||
return 0;
|
||||
|
@ -318,10 +318,6 @@ void omap_fbdev_setup(struct drm_device *dev)
|
||||
|
||||
INIT_WORK(&fbdev->work, pan_worker);
|
||||
|
||||
ret = omap_fbdev_client_hotplug(&helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&helper->client);
|
||||
|
||||
return;
|
||||
|
@ -2178,6 +2178,7 @@ static const struct panel_desc innolux_at043tn24 = {
|
||||
.height = 54,
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
||||
.connector_type = DRM_MODE_CONNECTOR_DPI,
|
||||
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
|
||||
};
|
||||
|
||||
@ -3202,6 +3203,7 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
|
||||
.vsync_start = 480 + 49,
|
||||
.vsync_end = 480 + 49 + 2,
|
||||
.vtotal = 480 + 49 + 2 + 22,
|
||||
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
|
||||
};
|
||||
|
||||
static const struct panel_desc powertip_ph800480t013_idf02 = {
|
||||
|
@ -383,10 +383,6 @@ void radeon_fbdev_setup(struct radeon_device *rdev)
|
||||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = radeon_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(rdev->ddev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
@ -176,16 +176,32 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
||||
{
|
||||
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
|
||||
finish_cb);
|
||||
int r;
|
||||
unsigned long index;
|
||||
|
||||
dma_fence_put(f);
|
||||
|
||||
/* Wait for all dependencies to avoid data corruptions */
|
||||
while (!xa_empty(&job->dependencies)) {
|
||||
f = xa_erase(&job->dependencies, job->last_dependency++);
|
||||
r = dma_fence_add_callback(f, &job->finish_cb,
|
||||
drm_sched_entity_kill_jobs_cb);
|
||||
if (!r)
|
||||
xa_for_each(&job->dependencies, index, f) {
|
||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
||||
|
||||
if (s_fence && f == &s_fence->scheduled) {
|
||||
/* The dependencies array had a reference on the scheduled
|
||||
* fence, and the finished fence refcount might have
|
||||
* dropped to zero. Use dma_fence_get_rcu() so we get
|
||||
* a NULL fence in that case.
|
||||
*/
|
||||
f = dma_fence_get_rcu(&s_fence->finished);
|
||||
|
||||
/* Now that we have a reference on the finished fence,
|
||||
* we can release the reference the dependencies array
|
||||
* had on the scheduled fence.
|
||||
*/
|
||||
dma_fence_put(&s_fence->scheduled);
|
||||
}
|
||||
|
||||
xa_erase(&job->dependencies, index);
|
||||
if (f && !dma_fence_add_callback(f, &job->finish_cb,
|
||||
drm_sched_entity_kill_jobs_cb))
|
||||
return;
|
||||
|
||||
dma_fence_put(f);
|
||||
@ -415,8 +431,17 @@ static struct dma_fence *
|
||||
drm_sched_job_dependency(struct drm_sched_job *job,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
if (!xa_empty(&job->dependencies))
|
||||
return xa_erase(&job->dependencies, job->last_dependency++);
|
||||
struct dma_fence *f;
|
||||
|
||||
/* We keep the fence around, so we can iterate over all dependencies
|
||||
* in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
|
||||
* before killing the job.
|
||||
*/
|
||||
f = xa_load(&job->dependencies, job->last_dependency);
|
||||
if (f) {
|
||||
job->last_dependency++;
|
||||
return dma_fence_get(f);
|
||||
}
|
||||
|
||||
if (job->sched->ops->prepare_job)
|
||||
return job->sched->ops->prepare_job(job, entity);
|
||||
|
@ -48,8 +48,32 @@ static void __exit drm_sched_fence_slab_fini(void)
|
||||
kmem_cache_destroy(sched_fence_slab);
|
||||
}
|
||||
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
|
||||
static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
/*
|
||||
* smp_store_release() to ensure another thread racing us
|
||||
* in drm_sched_fence_set_deadline_finished() sees the
|
||||
* fence's parent set before test_bit()
|
||||
*/
|
||||
smp_store_release(&s_fence->parent, dma_fence_get(fence));
|
||||
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
|
||||
&s_fence->finished.flags))
|
||||
dma_fence_set_deadline(fence, s_fence->deadline);
|
||||
}
|
||||
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
|
||||
struct dma_fence *parent)
|
||||
{
|
||||
/* Set the parent before signaling the scheduled fence, such that,
|
||||
* any waiter expecting the parent to be filled after the job has
|
||||
* been scheduled (which is the case for drivers delegating waits
|
||||
* to some firmware) doesn't have to busy wait for parent to show
|
||||
* up.
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(parent))
|
||||
drm_sched_fence_set_parent(fence, parent);
|
||||
|
||||
dma_fence_signal(&fence->scheduled);
|
||||
}
|
||||
|
||||
@ -181,20 +205,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
|
||||
}
|
||||
EXPORT_SYMBOL(to_drm_sched_fence);
|
||||
|
||||
void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
/*
|
||||
* smp_store_release() to ensure another thread racing us
|
||||
* in drm_sched_fence_set_deadline_finished() sees the
|
||||
* fence's parent set before test_bit()
|
||||
*/
|
||||
smp_store_release(&s_fence->parent, dma_fence_get(fence));
|
||||
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
|
||||
&s_fence->finished.flags))
|
||||
dma_fence_set_deadline(fence, s_fence->deadline);
|
||||
}
|
||||
|
||||
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
|
@ -1043,10 +1043,9 @@ static int drm_sched_main(void *param)
|
||||
trace_drm_run_job(sched_job, entity);
|
||||
fence = sched->ops->run_job(sched_job);
|
||||
complete_all(&entity->entity_idle);
|
||||
drm_sched_fence_scheduled(s_fence);
|
||||
drm_sched_fence_scheduled(s_fence, fence);
|
||||
|
||||
if (!IS_ERR_OR_NULL(fence)) {
|
||||
drm_sched_fence_set_parent(s_fence, fence);
|
||||
/* Drop for original kref_init of the fence */
|
||||
dma_fence_put(fence);
|
||||
|
||||
|
@ -225,10 +225,6 @@ void tegra_fbdev_setup(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto err_drm_client_init;
|
||||
|
||||
ret = tegra_fbdev_client_hotplug(&helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&helper->client);
|
||||
|
||||
return;
|
||||
|
@ -458,18 +458,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
|
||||
goto out;
|
||||
}
|
||||
|
||||
bounce:
|
||||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
|
||||
if (ret == -EMULTIHOP) {
|
||||
do {
|
||||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
|
||||
if (ret != -EMULTIHOP)
|
||||
break;
|
||||
|
||||
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS && ret != -EINTR)
|
||||
pr_err("Buffer eviction failed\n");
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
goto out;
|
||||
}
|
||||
/* try and move to final place now. */
|
||||
goto bounce;
|
||||
} while (!ret);
|
||||
|
||||
if (ret) {
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
if (ret != -ERESTARTSYS && ret != -EINTR)
|
||||
pr_err("Buffer eviction failed\n");
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
@ -517,6 +517,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if (bo->pin_count) {
|
||||
*locked = false;
|
||||
*busy = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (bo->base.resv == ctx->resv) {
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
if (ctx->allow_res_evict)
|
||||
@ -1167,6 +1173,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
||||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
|
||||
if (unlikely(ret != 0)) {
|
||||
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -86,6 +86,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
if (pos->last != res) {
|
||||
if (pos->first == res)
|
||||
pos->first = list_next_entry(res, lru);
|
||||
list_move(&res->lru, &pos->last->lru);
|
||||
pos->last = res;
|
||||
}
|
||||
@ -111,7 +113,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
|
||||
{
|
||||
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
|
||||
|
||||
if (unlikely(pos->first == res && pos->last == res)) {
|
||||
if (unlikely(WARN_ON(!pos->first || !pos->last) ||
|
||||
(pos->first == res && pos->last == res))) {
|
||||
pos->first = NULL;
|
||||
pos->last = NULL;
|
||||
} else if (pos->first == res) {
|
||||
|
@ -34,8 +34,9 @@ static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t ma
|
||||
}
|
||||
|
||||
ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL);
|
||||
if (ret < min)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
mm->pasid = ret;
|
||||
ret = 0;
|
||||
out:
|
||||
|
@ -2891,14 +2891,11 @@ static int iommu_setup_default_domain(struct iommu_group *group,
|
||||
ret = __iommu_group_set_domain_internal(
|
||||
group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
|
||||
if (WARN_ON(ret))
|
||||
goto out_free;
|
||||
goto out_free_old;
|
||||
} else {
|
||||
ret = __iommu_group_set_domain(group, dom);
|
||||
if (ret) {
|
||||
iommu_domain_free(dom);
|
||||
group->default_domain = old_dom;
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err_restore_def_domain;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2911,21 +2908,25 @@ static int iommu_setup_default_domain(struct iommu_group *group,
|
||||
for_each_group_device(group, gdev) {
|
||||
ret = iommu_create_device_direct_mappings(dom, gdev->dev);
|
||||
if (ret)
|
||||
goto err_restore;
|
||||
goto err_restore_domain;
|
||||
}
|
||||
}
|
||||
|
||||
err_restore:
|
||||
if (old_dom) {
|
||||
__iommu_group_set_domain_internal(
|
||||
group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
|
||||
iommu_domain_free(dom);
|
||||
old_dom = NULL;
|
||||
}
|
||||
out_free:
|
||||
out_free_old:
|
||||
if (old_dom)
|
||||
iommu_domain_free(old_dom);
|
||||
return ret;
|
||||
|
||||
err_restore_domain:
|
||||
if (old_dom)
|
||||
__iommu_group_set_domain_internal(
|
||||
group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
|
||||
err_restore_def_domain:
|
||||
if (old_dom) {
|
||||
iommu_domain_free(dom);
|
||||
group->default_domain = old_dom;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -227,6 +227,8 @@ static int
|
||||
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
|
||||
const u8 mode_req, bool nowait)
|
||||
{
|
||||
const struct can_bittiming *bt = &priv->can.bittiming;
|
||||
unsigned long timeout_us = MCP251XFD_POLL_TIMEOUT_US;
|
||||
u32 con = 0, con_reqop, osc = 0;
|
||||
u8 mode;
|
||||
int err;
|
||||
@ -246,12 +248,16 @@ __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
|
||||
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
|
||||
return 0;
|
||||
|
||||
if (bt->bitrate)
|
||||
timeout_us = max_t(unsigned long, timeout_us,
|
||||
MCP251XFD_FRAME_LEN_MAX_BITS * USEC_PER_SEC /
|
||||
bt->bitrate);
|
||||
|
||||
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
|
||||
!mcp251xfd_reg_invalid(con) &&
|
||||
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
|
||||
con) == mode_req,
|
||||
MCP251XFD_POLL_SLEEP_US,
|
||||
MCP251XFD_POLL_TIMEOUT_US);
|
||||
MCP251XFD_POLL_SLEEP_US, timeout_us);
|
||||
if (err != -ETIMEDOUT && err != -EBADMSG)
|
||||
return err;
|
||||
|
||||
|
@ -387,6 +387,7 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
|
||||
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
|
||||
#define MCP251XFD_POLL_SLEEP_US (10)
|
||||
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
|
||||
#define MCP251XFD_FRAME_LEN_MAX_BITS (736)
|
||||
|
||||
/* Misc */
|
||||
#define MCP251XFD_NAPI_WEIGHT 32
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user