forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
6b5567b1b2
@ -1,4 +1,4 @@
|
||||
What: /sys/bus/platform/drivers/aspeed-uart-routing/*/uart*
|
||||
What: /sys/bus/platform/drivers/aspeed-uart-routing/\*/uart\*
|
||||
Date: September 2021
|
||||
Contact: Oskar Senft <osk@google.com>
|
||||
Chia-Wei Wang <chiawei_wang@aspeedtech.com>
|
||||
@ -9,7 +9,7 @@ Description: Selects the RX source of the UARTx device.
|
||||
depends on the selected file.
|
||||
|
||||
e.g.
|
||||
cat /sys/bus/platform/drivers/aspeed-uart-routing/*.uart_routing/uart1
|
||||
cat /sys/bus/platform/drivers/aspeed-uart-routing/\*.uart_routing/uart1
|
||||
[io1] io2 io3 io4 uart2 uart3 uart4 io6
|
||||
|
||||
In this case, UART1 gets its input from IO1 (physical serial port 1).
|
||||
@ -17,7 +17,7 @@ Description: Selects the RX source of the UARTx device.
|
||||
Users: OpenBMC. Proposed changes should be mailed to
|
||||
openbmc@lists.ozlabs.org
|
||||
|
||||
What: /sys/bus/platform/drivers/aspeed-uart-routing/*/io*
|
||||
What: /sys/bus/platform/drivers/aspeed-uart-routing/\*/io\*
|
||||
Date: September 2021
|
||||
Contact: Oskar Senft <osk@google.com>
|
||||
Chia-Wei Wang <chiawei_wang@aspeedtech.com>
|
||||
|
@ -119,6 +119,9 @@ Boards (incomplete list of examples):
|
||||
- OMAP3 BeagleBoard : Low cost community board
|
||||
compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
|
||||
|
||||
- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk
|
||||
compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
|
||||
|
||||
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
|
||||
compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"
|
||||
|
||||
|
@ -35,6 +35,10 @@ description:
|
||||
contains a specific memory layout, which is documented in chapter 8 of the
|
||||
SiFive U5 Coreplex Series Manual <https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf>.
|
||||
|
||||
The thead,c900-plic is different from sifive,plic-1.0.0 in opensbi, the
|
||||
T-HEAD PLIC implementation requires setting a delegation bit to allow access
|
||||
from S-mode. So add thead,c900-plic to distinguish them.
|
||||
|
||||
maintainers:
|
||||
- Sagar Kadam <sagar.kadam@sifive.com>
|
||||
- Paul Walmsley <paul.walmsley@sifive.com>
|
||||
@ -42,12 +46,17 @@ maintainers:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- sifive,fu540-c000-plic
|
||||
- starfive,jh7100-plic
|
||||
- canaan,k210-plic
|
||||
- const: sifive,plic-1.0.0
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- sifive,fu540-c000-plic
|
||||
- starfive,jh7100-plic
|
||||
- canaan,k210-plic
|
||||
- const: sifive,plic-1.0.0
|
||||
- items:
|
||||
- enum:
|
||||
- allwinner,sun20i-d1-plic
|
||||
- const: thead,c900-plic
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
69
MAINTAINERS
69
MAINTAINERS
@ -1620,6 +1620,7 @@ M: Olof Johansson <olof@lixom.net>
|
||||
M: soc@kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
C: irc://irc.libera.chat/armlinux
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
|
||||
F: arch/arm/boot/dts/Makefile
|
||||
F: arch/arm64/boot/dts/Makefile
|
||||
@ -1627,6 +1628,7 @@ F: arch/arm64/boot/dts/Makefile
|
||||
ARM SUB-ARCHITECTURES
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
C: irc://irc.libera.chat/armlinux
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
|
||||
F: arch/arm/mach-*/
|
||||
F: arch/arm/plat-*/
|
||||
@ -1780,6 +1782,7 @@ F: drivers/irqchip/irq-apple-aic.c
|
||||
F: drivers/mailbox/apple-mailbox.c
|
||||
F: drivers/pinctrl/pinctrl-apple-gpio.c
|
||||
F: drivers/soc/apple/*
|
||||
F: drivers/watchdog/apple_wdt.c
|
||||
F: include/dt-bindings/interrupt-controller/apple-aic.h
|
||||
F: include/dt-bindings/pinctrl/apple.h
|
||||
F: include/linux/apple-mailbox.h
|
||||
@ -2570,10 +2573,13 @@ N: rockchip
|
||||
|
||||
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
C: irc://irc.libera.chat/linux-exynos
|
||||
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git
|
||||
F: Documentation/arm/samsung/
|
||||
F: Documentation/devicetree/bindings/arm/samsung/
|
||||
F: Documentation/devicetree/bindings/power/pd-samsung.yaml
|
||||
@ -3133,11 +3139,9 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
|
||||
F: drivers/net/wireless/ath/ath5k/
|
||||
|
||||
ATHEROS ATH6KL WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
S: Orphan
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
F: drivers/net/wireless/ath/ath6kl/
|
||||
|
||||
ATI_REMOTE2 DRIVER
|
||||
@ -5774,7 +5778,7 @@ F: tools/testing/selftests/dma/
|
||||
|
||||
DMA-BUF HEAPS FRAMEWORK
|
||||
M: Sumit Semwal <sumit.semwal@linaro.org>
|
||||
R: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||
R: Benjamin Gaignard <benjamin.gaignard@collabora.com>
|
||||
R: Liam Mark <lmark@codeaurora.org>
|
||||
R: Laura Abbott <labbott@redhat.com>
|
||||
R: Brian Starkey <Brian.Starkey@arm.com>
|
||||
@ -6504,7 +6508,7 @@ F: Documentation/devicetree/bindings/display/rockchip/
|
||||
F: drivers/gpu/drm/rockchip/
|
||||
|
||||
DRM DRIVERS FOR STI
|
||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||
M: Alain Volmat <alain.volmat@foss.st.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
@ -6513,8 +6517,8 @@ F: drivers/gpu/drm/sti
|
||||
|
||||
DRM DRIVERS FOR STM
|
||||
M: Yannick Fertre <yannick.fertre@foss.st.com>
|
||||
M: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
|
||||
M: Philippe Cornu <philippe.cornu@foss.st.com>
|
||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
@ -7184,7 +7188,7 @@ F: drivers/net/can/usb/etas_es58x/
|
||||
|
||||
ETHERNET BRIDGE
|
||||
M: Roopa Prabhu <roopa@nvidia.com>
|
||||
M: Nikolay Aleksandrov <nikolay@nvidia.com>
|
||||
M: Nikolay Aleksandrov <razor@blackwall.org>
|
||||
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -7577,6 +7581,12 @@ S: Maintained
|
||||
W: http://floatingpoint.sourceforge.net/emulator/index.html
|
||||
F: arch/x86/math-emu/
|
||||
|
||||
FRAMEBUFFER CORE
|
||||
M: Daniel Vetter <daniel@ffwll.ch>
|
||||
F: drivers/video/fbdev/core/
|
||||
S: Odd Fixes
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
|
||||
FRAMEBUFFER LAYER
|
||||
M: Helge Deller <deller@gmx.de>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
@ -14391,6 +14401,7 @@ M: Rob Herring <robh+dt@kernel.org>
|
||||
M: Frank Rowand <frowand.list@gmail.com>
|
||||
L: devicetree@vger.kernel.org
|
||||
S: Maintained
|
||||
C: irc://irc.libera.chat/devicetree
|
||||
W: http://www.devicetree.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
|
||||
F: Documentation/ABI/testing/sysfs-firmware-ofw
|
||||
@ -14402,6 +14413,7 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
|
||||
M: Rob Herring <robh+dt@kernel.org>
|
||||
L: devicetree@vger.kernel.org
|
||||
S: Maintained
|
||||
C: irc://irc.libera.chat/devicetree
|
||||
Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
|
||||
F: Documentation/devicetree/
|
||||
@ -15135,7 +15147,7 @@ M: Ingo Molnar <mingo@redhat.com>
|
||||
M: Arnaldo Carvalho de Melo <acme@kernel.org>
|
||||
R: Mark Rutland <mark.rutland@arm.com>
|
||||
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
|
||||
R: Jiri Olsa <jolsa@redhat.com>
|
||||
R: Jiri Olsa <jolsa@kernel.org>
|
||||
R: Namhyung Kim <namhyung@kernel.org>
|
||||
L: linux-perf-users@vger.kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
@ -15292,9 +15304,11 @@ PIN CONTROLLER - SAMSUNG
|
||||
M: Tomasz Figa <tomasz.figa@gmail.com>
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
C: irc://irc.libera.chat/linux-exynos
|
||||
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
|
||||
F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
|
||||
@ -15898,6 +15912,7 @@ S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
F: drivers/net/wireless/ath/ath10k/
|
||||
F: Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
|
||||
|
||||
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@kernel.org>
|
||||
@ -15905,11 +15920,12 @@ L: ath11k@lists.infradead.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
F: drivers/net/wireless/ath/ath11k/
|
||||
F: Documentation/devicetree/bindings/net/wireless/qcom,ath11k.txt
|
||||
|
||||
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
|
||||
M: ath9k-devel@qca.qualcomm.com
|
||||
M: Toke Høiland-Jørgensen <toke@toke.dk>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
S: Maintained
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
|
||||
F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
|
||||
F: drivers/net/wireless/ath/ath9k/
|
||||
@ -16090,11 +16106,10 @@ F: Documentation/devicetree/bindings/media/*venus*
|
||||
F: drivers/media/platform/qcom/venus/
|
||||
|
||||
QUALCOMM WCN36XX WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@kernel.org>
|
||||
M: Loic Poulain <loic.poulain@linaro.org>
|
||||
L: wcn36xx@lists.infradead.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
|
||||
T: git git://github.com/KrasnikovEugene/wcn36xx.git
|
||||
F: drivers/net/wireless/ath/wcn36xx/
|
||||
|
||||
QUANTENNA QTNFMAC WIRELESS DRIVER
|
||||
@ -16818,8 +16833,8 @@ F: drivers/video/fbdev/savage/
|
||||
S390
|
||||
M: Heiko Carstens <hca@linux.ibm.com>
|
||||
M: Vasily Gorbik <gor@linux.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
R: Alexander Gordeev <agordeev@linux.ibm.com>
|
||||
M: Alexander Gordeev <agordeev@linux.ibm.com>
|
||||
R: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
R: Sven Schnelle <svens@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
@ -17090,6 +17105,7 @@ SAMSUNG SOC CLOCK DRIVERS
|
||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
M: Tomasz Figa <tomasz.figa@gmail.com>
|
||||
M: Chanwoo Choi <cw00.choi@samsung.com>
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
|
||||
@ -17726,6 +17742,21 @@ S: Maintained
|
||||
W: http://www.winischhofer.at/linuxsisusbvga.shtml
|
||||
F: drivers/usb/misc/sisusbvga/
|
||||
|
||||
SL28 CPLD MFD DRIVER
|
||||
M: Michael Walle <michael@walle.cc>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/gpio/kontron,sl28cpld-gpio.yaml
|
||||
F: Documentation/devicetree/bindings/hwmon/kontron,sl28cpld-hwmon.yaml
|
||||
F: Documentation/devicetree/bindings/interrupt-controller/kontron,sl28cpld-intc.yaml
|
||||
F: Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml
|
||||
F: Documentation/devicetree/bindings/pwm/kontron,sl28cpld-pwm.yaml
|
||||
F: Documentation/devicetree/bindings/watchdog/kontron,sl28cpld-wdt.yaml
|
||||
F: drivers/gpio/gpio-sl28cpld.c
|
||||
F: drivers/hwmon/sl28cpld-hwmon.c
|
||||
F: drivers/irqchip/irq-sl28cpld.c
|
||||
F: drivers/pwm/pwm-sl28cpld.c
|
||||
F: drivers/watchdog/sl28cpld_wdt.c
|
||||
|
||||
SLAB ALLOCATOR
|
||||
M: Christoph Lameter <cl@linux.com>
|
||||
M: Pekka Enberg <penberg@kernel.org>
|
||||
@ -18442,7 +18473,7 @@ F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
|
||||
F: sound/soc/sti/
|
||||
|
||||
STI CEC DRIVER
|
||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||
M: Alain Volmat <alain.volmat@foss.st.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/media/stih-cec.txt
|
||||
F: drivers/media/cec/platform/sti/
|
||||
@ -19596,6 +19627,14 @@ F: Documentation/trace/timerlat-tracer.rst
|
||||
F: Documentation/trace/hwlat_detector.rst
|
||||
F: arch/*/kernel/trace.c
|
||||
|
||||
Real-time Linux Analysis (RTLA) tools
|
||||
M: Daniel Bristot de Oliveira <bristot@kernel.org>
|
||||
M: Steven Rostedt <rostedt@goodmis.org>
|
||||
L: linux-trace-devel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/tools/rtla/
|
||||
F: tools/tracing/rtla/
|
||||
|
||||
TRADITIONAL CHINESE DOCUMENTATION
|
||||
M: Hu Haowen <src.res@email.cn>
|
||||
L: linux-doc-tw-discuss@lists.sourceforge.net
|
||||
|
4
Makefile
4
Makefile
@ -2,8 +2,8 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Gobble Gobble
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
@ -806,6 +806,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
|
||||
logicpd-som-lv-37xx-devkit.dtb \
|
||||
omap3430-sdp.dtb \
|
||||
omap3-beagle.dtb \
|
||||
omap3-beagle-ab4.dtb \
|
||||
omap3-beagle-xm.dtb \
|
||||
omap3-beagle-xm-ab.dtb \
|
||||
omap3-cm-t3517.dtb \
|
||||
|
@ -55,7 +55,7 @@
|
||||
2 1 0 0 /* # 0: INACTIVE, 1: TX, 2: RX */
|
||||
>;
|
||||
tx-num-evt = <16>;
|
||||
rt-num-evt = <16>;
|
||||
rx-num-evt = <16>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -160,7 +160,7 @@
|
||||
target-module@48210000 {
|
||||
compatible = "ti,sysc-omap4-simple", "ti,sysc";
|
||||
power-domains = <&prm_mpu>;
|
||||
clocks = <&mpu_clkctrl DRA7_MPU_CLKCTRL 0>;
|
||||
clocks = <&mpu_clkctrl DRA7_MPU_MPU_CLKCTRL 0>;
|
||||
clock-names = "fck";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
@ -875,10 +875,10 @@
|
||||
<0x58000014 4>;
|
||||
reg-names = "rev", "syss";
|
||||
ti,syss-mask = <1>;
|
||||
clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 0>,
|
||||
<&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
|
||||
<&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 10>,
|
||||
<&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 11>;
|
||||
clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 0>,
|
||||
<&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
|
||||
<&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 10>,
|
||||
<&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 11>;
|
||||
clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
@ -912,7 +912,7 @@
|
||||
SYSC_OMAP2_SOFTRESET |
|
||||
SYSC_OMAP2_AUTOIDLE)>;
|
||||
ti,syss-mask = <1>;
|
||||
clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
|
||||
clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
|
||||
clock-names = "fck";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
@ -939,8 +939,8 @@
|
||||
<SYSC_IDLE_SMART>,
|
||||
<SYSC_IDLE_SMART_WKUP>;
|
||||
ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>;
|
||||
clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
|
||||
<&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
|
||||
clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
|
||||
<&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
|
||||
clock-names = "fck", "dss_clk";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
@ -979,7 +979,7 @@
|
||||
compatible = "vivante,gc";
|
||||
reg = <0x0 0x700>;
|
||||
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&dss_clkctrl DRA7_BB2D_CLKCTRL 0>;
|
||||
clocks = <&dss_clkctrl DRA7_DSS_BB2D_CLKCTRL 0>;
|
||||
clock-names = "core";
|
||||
};
|
||||
};
|
||||
@ -1333,7 +1333,7 @@
|
||||
ti,no-reset-on-init;
|
||||
ti,no-idle;
|
||||
timer@0 {
|
||||
assigned-clocks = <&wkupaon_clkctrl DRA7_TIMER1_CLKCTRL 24>;
|
||||
assigned-clocks = <&wkupaon_clkctrl DRA7_WKUPAON_TIMER1_CLKCTRL 24>;
|
||||
assigned-clock-parents = <&sys_32k_ck>;
|
||||
};
|
||||
};
|
||||
|
@ -79,7 +79,6 @@
|
||||
MX23_PAD_LCD_RESET__GPIO_1_18
|
||||
MX23_PAD_PWM3__GPIO_1_29
|
||||
MX23_PAD_PWM4__GPIO_1_30
|
||||
MX23_PAD_SSP1_DETECT__SSP1_DETECT
|
||||
>;
|
||||
fsl,drive-strength = <MXS_DRIVE_4mA>;
|
||||
fsl,voltage = <MXS_VOLTAGE_HIGH>;
|
||||
|
@ -5,6 +5,8 @@
|
||||
* Author: Fabio Estevam <fabio.estevam@freescale.com>
|
||||
*/
|
||||
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
aliases {
|
||||
backlight = &backlight;
|
||||
@ -226,6 +228,7 @@
|
||||
MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
|
||||
MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
|
||||
MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
|
||||
MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0
|
||||
>;
|
||||
};
|
||||
|
||||
@ -304,7 +307,7 @@
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
non-removable;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -259,7 +259,7 @@
|
||||
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
|
||||
assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
|
||||
assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
|
||||
assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
|
||||
timeout-sec = <40>;
|
||||
};
|
||||
|
||||
|
@ -59,7 +59,7 @@
|
||||
};
|
||||
|
||||
uart_A: serial@84c0 {
|
||||
compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
|
||||
compatible = "amlogic,meson6-uart";
|
||||
reg = <0x84c0 0x18>;
|
||||
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
|
||||
fifo-size = <128>;
|
||||
@ -67,7 +67,7 @@
|
||||
};
|
||||
|
||||
uart_B: serial@84dc {
|
||||
compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
|
||||
compatible = "amlogic,meson6-uart";
|
||||
reg = <0x84dc 0x18>;
|
||||
interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
|
||||
status = "disabled";
|
||||
@ -105,7 +105,7 @@
|
||||
};
|
||||
|
||||
uart_C: serial@8700 {
|
||||
compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
|
||||
compatible = "amlogic,meson6-uart";
|
||||
reg = <0x8700 0x18>;
|
||||
interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
|
||||
status = "disabled";
|
||||
@ -228,7 +228,7 @@
|
||||
};
|
||||
|
||||
uart_AO: serial@4c0 {
|
||||
compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart";
|
||||
compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart";
|
||||
reg = <0x4c0 0x18>;
|
||||
interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
|
||||
status = "disabled";
|
||||
|
@ -736,27 +736,27 @@
|
||||
};
|
||||
|
||||
&uart_AO {
|
||||
compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
|
||||
clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "baud", "xtal", "pclk";
|
||||
compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart";
|
||||
clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "xtal", "pclk", "baud";
|
||||
};
|
||||
|
||||
&uart_A {
|
||||
compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
|
||||
clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
|
||||
clock-names = "baud", "xtal", "pclk";
|
||||
compatible = "amlogic,meson8-uart";
|
||||
clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "xtal", "pclk", "baud";
|
||||
};
|
||||
|
||||
&uart_B {
|
||||
compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
|
||||
clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
|
||||
clock-names = "baud", "xtal", "pclk";
|
||||
compatible = "amlogic,meson8-uart";
|
||||
clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "xtal", "pclk", "baud";
|
||||
};
|
||||
|
||||
&uart_C {
|
||||
compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
|
||||
clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
|
||||
clock-names = "baud", "xtal", "pclk";
|
||||
compatible = "amlogic,meson8-uart";
|
||||
clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "xtal", "pclk", "baud";
|
||||
};
|
||||
|
||||
&usb0 {
|
||||
|
@ -724,27 +724,27 @@
|
||||
};
|
||||
|
||||
&uart_AO {
|
||||
compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
|
||||
clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "baud", "xtal", "pclk";
|
||||
compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart";
|
||||
clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "xtal", "pclk", "baud";
|
||||
};
|
||||
|
||||
&uart_A {
|
||||
compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
|
||||
clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
|
||||
clock-names = "baud", "xtal", "pclk";
|
||||
compatible = "amlogic,meson8b-uart";
|
||||
clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "xtal", "pclk", "baud";
|
||||
};
|
||||
|
||||
&uart_B {
|
||||
compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
|
||||
clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
|
||||
clock-names = "baud", "xtal", "pclk";
|
||||
compatible = "amlogic,meson8b-uart";
|
||||
clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "xtal", "pclk", "baud";
|
||||
};
|
||||
|
||||
&uart_C {
|
||||
compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
|
||||
clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
|
||||
clock-names = "baud", "xtal", "pclk";
|
||||
compatible = "amlogic,meson8b-uart";
|
||||
clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
|
||||
clock-names = "xtal", "pclk", "baud";
|
||||
};
|
||||
|
||||
&usb0 {
|
||||
|
47
arch/arm/boot/dts/omap3-beagle-ab4.dts
Normal file
47
arch/arm/boot/dts/omap3-beagle-ab4.dts
Normal file
@ -0,0 +1,47 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/dts-v1/;
|
||||
|
||||
#include "omap3-beagle.dts"
|
||||
|
||||
/ {
|
||||
model = "TI OMAP3 BeagleBoard A to B4";
|
||||
compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3";
|
||||
};
|
||||
|
||||
/*
|
||||
* Workaround for capacitor C70 issue, see "Boards revision A and < B5"
|
||||
* section at https://elinux.org/BeagleBoard_Community
|
||||
*/
|
||||
|
||||
/* Unusable as clocksource because of unreliable oscillator */
|
||||
&counter32k {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
/* Unusable as clockevent because of unreliable oscillator, allow to idle */
|
||||
&timer1_target {
|
||||
/delete-property/ti,no-reset-on-init;
|
||||
/delete-property/ti,no-idle;
|
||||
timer@0 {
|
||||
/delete-property/ti,timer-alwon;
|
||||
};
|
||||
};
|
||||
|
||||
/* Preferred always-on timer for clocksource */
|
||||
&timer12_target {
|
||||
ti,no-reset-on-init;
|
||||
ti,no-idle;
|
||||
timer@0 {
|
||||
/* Always clocked by secure_32k_fck */
|
||||
};
|
||||
};
|
||||
|
||||
/* Preferred timer for clockevent */
|
||||
&timer2_target {
|
||||
ti,no-reset-on-init;
|
||||
ti,no-idle;
|
||||
timer@0 {
|
||||
assigned-clocks = <&gpt2_fck>;
|
||||
assigned-clock-parents = <&sys_ck>;
|
||||
};
|
||||
};
|
@ -304,39 +304,6 @@
|
||||
phys = <0 &hsusb2_phy>;
|
||||
};
|
||||
|
||||
/* Unusable as clocksource because of unreliable oscillator */
|
||||
&counter32k {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
/* Unusable as clockevent because if unreliable oscillator, allow to idle */
|
||||
&timer1_target {
|
||||
/delete-property/ti,no-reset-on-init;
|
||||
/delete-property/ti,no-idle;
|
||||
timer@0 {
|
||||
/delete-property/ti,timer-alwon;
|
||||
};
|
||||
};
|
||||
|
||||
/* Preferred always-on timer for clocksource */
|
||||
&timer12_target {
|
||||
ti,no-reset-on-init;
|
||||
ti,no-idle;
|
||||
timer@0 {
|
||||
/* Always clocked by secure_32k_fck */
|
||||
};
|
||||
};
|
||||
|
||||
/* Preferred timer for clockevent */
|
||||
&timer2_target {
|
||||
ti,no-reset-on-init;
|
||||
ti,no-idle;
|
||||
timer@0 {
|
||||
assigned-clocks = <&gpt2_fck>;
|
||||
assigned-clock-parents = <&sys_ck>;
|
||||
};
|
||||
};
|
||||
|
||||
&twl_gpio {
|
||||
ti,use-leds;
|
||||
/* pullups: BIT(1) */
|
||||
|
@ -235,7 +235,6 @@
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x41>;
|
||||
irq-over-gpio;
|
||||
irq-gpios = <&gpiopinctrl 29 0x4>;
|
||||
id = <0>;
|
||||
blocks = <0x5>;
|
||||
|
@ -185,10 +185,6 @@
|
||||
cap-sd-highspeed;
|
||||
cap-mmc-highspeed;
|
||||
/* All direction control is used */
|
||||
st,sig-dir-cmd;
|
||||
st,sig-dir-dat0;
|
||||
st,sig-dir-dat2;
|
||||
st,sig-dir-dat31;
|
||||
st,sig-pin-fbclk;
|
||||
full-pwr-cycle;
|
||||
vmmc-supply = <&ab8500_ldo_aux3_reg>;
|
||||
|
@ -263,9 +263,9 @@ static int __init omapdss_init_of(void)
|
||||
}
|
||||
|
||||
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
|
||||
put_device(&pdev->dev);
|
||||
if (r) {
|
||||
pr_err("Unable to populate DSS submodule devices\n");
|
||||
put_device(&pdev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -752,8 +752,10 @@ static int __init _init_clkctrl_providers(void)
|
||||
|
||||
for_each_matching_node(np, ti_clkctrl_match_table) {
|
||||
ret = _setup_clkctrl_provider(np);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
of_node_put(np);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2,6 +2,7 @@
|
||||
menuconfig ARCH_INTEL_SOCFPGA
|
||||
bool "Altera SOCFPGA family"
|
||||
depends on ARCH_MULTI_V7
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select ARCH_SUPPORTS_BIG_ENDIAN
|
||||
select ARM_AMBA
|
||||
select ARM_GIC
|
||||
@ -18,6 +19,7 @@ menuconfig ARCH_INTEL_SOCFPGA
|
||||
select PL310_ERRATA_727915
|
||||
select PL310_ERRATA_753970 if PL310
|
||||
select PL310_ERRATA_769419
|
||||
select RESET_CONTROLLER
|
||||
|
||||
if ARCH_INTEL_SOCFPGA
|
||||
config SOCFPGA_SUSPEND
|
||||
|
@ -672,6 +672,7 @@ config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
|
||||
config ARM64_ERRATUM_2051678
|
||||
bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit"
|
||||
default y
|
||||
help
|
||||
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
|
||||
Affected Coretex-A510 might not respect the ordering rules for
|
||||
|
@ -309,9 +309,6 @@ config ARCH_VISCONTI
|
||||
help
|
||||
This enables support for Toshiba Visconti SoCs Family.
|
||||
|
||||
config ARCH_VULCAN
|
||||
def_bool n
|
||||
|
||||
config ARCH_XGENE
|
||||
bool "AppliedMicro X-Gene SOC Family"
|
||||
help
|
||||
|
@ -107,6 +107,12 @@
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* 32 MiB reserved for ARM Trusted Firmware (BL32) */
|
||||
secmon_reserved_bl32: secmon@5300000 {
|
||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
linux,cma {
|
||||
compatible = "shared-dma-pool";
|
||||
reusable;
|
||||
|
@ -157,14 +157,6 @@
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
/* TEE Reserved Memory */
|
||||
bl32_reserved: bl32@5000000 {
|
||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
||||
sdio_pwrseq: sdio-pwrseq {
|
||||
compatible = "mmc-pwrseq-simple";
|
||||
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
|
||||
|
@ -17,7 +17,7 @@
|
||||
rtc1 = &vrtc;
|
||||
};
|
||||
|
||||
dioo2133: audio-amplifier-0 {
|
||||
dio2133: audio-amplifier-0 {
|
||||
compatible = "simple-audio-amplifier";
|
||||
enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
|
||||
VCC-supply = <&vcc_5v>;
|
||||
@ -219,7 +219,7 @@
|
||||
audio-widgets = "Line", "Lineout";
|
||||
audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>,
|
||||
<&tdmin_b>, <&tdmin_c>, <&tdmin_lb>,
|
||||
<&dioo2133>;
|
||||
<&dio2133>;
|
||||
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
|
||||
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
|
||||
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
|
||||
|
@ -49,6 +49,12 @@
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* 32 MiB reserved for ARM Trusted Firmware (BL32) */
|
||||
secmon_reserved_bl32: secmon@5300000 {
|
||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
linux,cma {
|
||||
compatible = "shared-dma-pool";
|
||||
reusable;
|
||||
|
@ -123,7 +123,7 @@
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
|
||||
enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
|
||||
enable-gpio = <&gpio_ao GPIOE_2 GPIO_ACTIVE_HIGH>;
|
||||
enable-active-high;
|
||||
regulator-always-on;
|
||||
|
||||
|
@ -48,7 +48,7 @@
|
||||
regulator-max-microvolt = <3300000>;
|
||||
vin-supply = <&vcc_5v>;
|
||||
|
||||
enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
|
||||
enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>;
|
||||
enable-active-high;
|
||||
regulator-always-on;
|
||||
|
||||
|
@ -203,14 +203,6 @@
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
/* TEE Reserved Memory */
|
||||
bl32_reserved: bl32@5000000 {
|
||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
||||
sdio_pwrseq: sdio-pwrseq {
|
||||
compatible = "mmc-pwrseq-simple";
|
||||
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
|
||||
|
@ -157,6 +157,10 @@
|
||||
};
|
||||
};
|
||||
|
||||
&ftm_alarm0 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&gpio1 {
|
||||
gpio-line-names =
|
||||
"", "", "", "", "", "", "", "",
|
||||
|
@ -1115,8 +1115,8 @@
|
||||
status = "okay";
|
||||
|
||||
ports {
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
|
||||
mipi1_sensor_ep: endpoint {
|
||||
remote-endpoint = <&camera1_ep>;
|
||||
|
@ -554,7 +554,7 @@
|
||||
assigned-clock-rates = <0>, <0>, <0>, <594000000>;
|
||||
status = "disabled";
|
||||
|
||||
port@0 {
|
||||
port {
|
||||
lcdif_mipi_dsi: endpoint {
|
||||
remote-endpoint = <&mipi_dsi_lcdif_in>;
|
||||
};
|
||||
@ -1151,8 +1151,8 @@
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
|
||||
csi1_mipi_ep: endpoint {
|
||||
remote-endpoint = <&csi1_ep>;
|
||||
@ -1203,8 +1203,8 @@
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
|
||||
csi2_mipi_ep: endpoint {
|
||||
remote-endpoint = <&csi2_ep>;
|
||||
|
@ -91,7 +91,7 @@
|
||||
|
||||
sound {
|
||||
compatible = "fsl,imx-audio-tlv320aic32x4";
|
||||
model = "tqm-tlv320aic32";
|
||||
model = "imx-audio-tlv320aic32x4";
|
||||
ssi-controller = <&sai3>;
|
||||
audio-codec = <&tlv320aic3x04>;
|
||||
};
|
||||
|
@ -15,8 +15,18 @@
|
||||
model = "Texas Instruments J721S2 EVM";
|
||||
|
||||
chosen {
|
||||
stdout-path = "serial10:115200n8";
|
||||
bootargs = "console=ttyS10,115200n8 earlycon=ns16550a,mmio32,2880000";
|
||||
stdout-path = "serial2:115200n8";
|
||||
bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,2880000";
|
||||
};
|
||||
|
||||
aliases {
|
||||
serial1 = &mcu_uart0;
|
||||
serial2 = &main_uart8;
|
||||
mmc0 = &main_sdhci0;
|
||||
mmc1 = &main_sdhci1;
|
||||
can0 = &main_mcan16;
|
||||
can1 = &mcu_mcan0;
|
||||
can2 = &mcu_mcan1;
|
||||
};
|
||||
|
||||
evm_12v0: fixedregulator-evm12v0 {
|
||||
|
@ -21,28 +21,6 @@
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
aliases {
|
||||
serial0 = &wkup_uart0;
|
||||
serial1 = &mcu_uart0;
|
||||
serial2 = &main_uart0;
|
||||
serial3 = &main_uart1;
|
||||
serial4 = &main_uart2;
|
||||
serial5 = &main_uart3;
|
||||
serial6 = &main_uart4;
|
||||
serial7 = &main_uart5;
|
||||
serial8 = &main_uart6;
|
||||
serial9 = &main_uart7;
|
||||
serial10 = &main_uart8;
|
||||
serial11 = &main_uart9;
|
||||
mmc0 = &main_sdhci0;
|
||||
mmc1 = &main_sdhci1;
|
||||
can0 = &main_mcan16;
|
||||
can1 = &mcu_mcan0;
|
||||
can2 = &mcu_mcan1;
|
||||
can3 = &main_mcan3;
|
||||
can4 = &main_mcan5;
|
||||
};
|
||||
|
||||
chosen { };
|
||||
|
||||
cpus {
|
||||
|
@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
IRQCHIP_STATE_PENDING,
|
||||
&val);
|
||||
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
|
||||
} else if (vgic_irq_is_mapped_level(irq)) {
|
||||
val = vgic_get_phys_line_level(irq);
|
||||
} else {
|
||||
val = irq_is_pending(irq);
|
||||
}
|
||||
|
@ -12,6 +12,14 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
/* compiler build environment sanity checks: */
|
||||
#if !defined(CONFIG_64BIT) && defined(__LP64__)
|
||||
#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
|
||||
#endif
|
||||
#if defined(CONFIG_64BIT) && !defined(__LP64__)
|
||||
#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
|
||||
#endif
|
||||
|
||||
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
|
||||
* on use of volatile and __*_bit() (set/clear/change):
|
||||
* *_bit() want use of volatile.
|
||||
|
@ -89,8 +89,8 @@ struct exception_table_entry {
|
||||
__asm__("1: " ldx " 0(" sr "%2),%0\n" \
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "r"(ptr), "1"(__gu_err)); \
|
||||
: "=r"(__gu_val), "+r"(__gu_err) \
|
||||
: "r"(ptr)); \
|
||||
\
|
||||
(val) = (__force __typeof__(*(ptr))) __gu_val; \
|
||||
}
|
||||
@ -123,8 +123,8 @@ struct exception_table_entry {
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
|
||||
: "=&r"(__gu_tmp.l), "=r"(__gu_err) \
|
||||
: "r"(ptr), "1"(__gu_err)); \
|
||||
: "=&r"(__gu_tmp.l), "+r"(__gu_err) \
|
||||
: "r"(ptr)); \
|
||||
\
|
||||
(val) = __gu_tmp.t; \
|
||||
}
|
||||
@ -135,13 +135,12 @@ struct exception_table_entry {
|
||||
#define __put_user_internal(sr, x, ptr) \
|
||||
({ \
|
||||
ASM_EXCEPTIONTABLE_VAR(__pu_err); \
|
||||
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
|
||||
\
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_asm(sr, "stb", __x, ptr); break; \
|
||||
case 2: __put_user_asm(sr, "sth", __x, ptr); break; \
|
||||
case 4: __put_user_asm(sr, "stw", __x, ptr); break; \
|
||||
case 8: STD_USER(sr, __x, ptr); break; \
|
||||
case 1: __put_user_asm(sr, "stb", x, ptr); break; \
|
||||
case 2: __put_user_asm(sr, "sth", x, ptr); break; \
|
||||
case 4: __put_user_asm(sr, "stw", x, ptr); break; \
|
||||
case 8: STD_USER(sr, x, ptr); break; \
|
||||
default: BUILD_BUG(); \
|
||||
} \
|
||||
\
|
||||
@ -150,7 +149,9 @@ struct exception_table_entry {
|
||||
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
__put_user_internal("%%sr3,", x, ptr); \
|
||||
__typeof__(&*(ptr)) __ptr = ptr; \
|
||||
__typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \
|
||||
__put_user_internal("%%sr3,", __x, __ptr); \
|
||||
})
|
||||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
@ -180,8 +181,8 @@ struct exception_table_entry {
|
||||
"1: " stx " %2,0(" sr "%1)\n" \
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(x), "0"(__pu_err))
|
||||
: "+r"(__pu_err) \
|
||||
: "r"(ptr), "r"(x))
|
||||
|
||||
|
||||
#if !defined(CONFIG_64BIT)
|
||||
@ -193,8 +194,8 @@ struct exception_table_entry {
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(__val), "0"(__pu_err)); \
|
||||
: "+r"(__pu_err) \
|
||||
: "r"(ptr), "r"(__val)); \
|
||||
} while (0)
|
||||
|
||||
#endif /* !defined(CONFIG_64BIT) */
|
||||
|
@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
|
||||
return *((u64 *)addr);
|
||||
}
|
||||
|
||||
u64 ioread64_lo_hi(const void __iomem *addr)
|
||||
{
|
||||
u32 low, high;
|
||||
|
||||
low = ioread32(addr);
|
||||
high = ioread32(addr + sizeof(u32));
|
||||
|
||||
return low + ((u64)high << 32);
|
||||
}
|
||||
|
||||
u64 ioread64_hi_lo(const void __iomem *addr)
|
||||
{
|
||||
u32 low, high;
|
||||
@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
|
||||
}
|
||||
}
|
||||
|
||||
void iowrite64_lo_hi(u64 val, void __iomem *addr)
|
||||
{
|
||||
iowrite32(val, addr);
|
||||
iowrite32(val >> 32, addr + sizeof(u32));
|
||||
}
|
||||
|
||||
void iowrite64_hi_lo(u64 val, void __iomem *addr)
|
||||
{
|
||||
iowrite32(val >> 32, addr + sizeof(u32));
|
||||
@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32);
|
||||
EXPORT_SYMBOL(ioread32be);
|
||||
EXPORT_SYMBOL(ioread64);
|
||||
EXPORT_SYMBOL(ioread64be);
|
||||
EXPORT_SYMBOL(ioread64_lo_hi);
|
||||
EXPORT_SYMBOL(ioread64_hi_lo);
|
||||
EXPORT_SYMBOL(iowrite8);
|
||||
EXPORT_SYMBOL(iowrite16);
|
||||
@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32);
|
||||
EXPORT_SYMBOL(iowrite32be);
|
||||
EXPORT_SYMBOL(iowrite64);
|
||||
EXPORT_SYMBOL(iowrite64be);
|
||||
EXPORT_SYMBOL(iowrite64_lo_hi);
|
||||
EXPORT_SYMBOL(iowrite64_hi_lo);
|
||||
EXPORT_SYMBOL(ioread8_rep);
|
||||
EXPORT_SYMBOL(ioread16_rep);
|
||||
|
@ -337,9 +337,9 @@ static void __init setup_bootmem(void)
|
||||
|
||||
static bool kernel_set_to_readonly;
|
||||
|
||||
static void __init map_pages(unsigned long start_vaddr,
|
||||
unsigned long start_paddr, unsigned long size,
|
||||
pgprot_t pgprot, int force)
|
||||
static void __ref map_pages(unsigned long start_vaddr,
|
||||
unsigned long start_paddr, unsigned long size,
|
||||
pgprot_t pgprot, int force)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pte_t *pg_table;
|
||||
@ -449,7 +449,7 @@ void __init set_kernel_text_rw(int enable_read_write)
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
void __ref free_initmem(void)
|
||||
void free_initmem(void)
|
||||
{
|
||||
unsigned long init_begin = (unsigned long)__init_begin;
|
||||
unsigned long init_end = (unsigned long)__init_end;
|
||||
@ -463,7 +463,6 @@ void __ref free_initmem(void)
|
||||
/* The init text pages are marked R-X. We have to
|
||||
* flush the icache and mark them RW-
|
||||
*
|
||||
* This is tricky, because map_pages is in the init section.
|
||||
* Do a dummy remap of the data section first (the data
|
||||
* section is already PAGE_KERNEL) to pull in the TLB entries
|
||||
* for map_kernel */
|
||||
|
@ -50,6 +50,12 @@ riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
|
||||
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
|
||||
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
|
||||
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
|
||||
|
||||
# Newer binutils versions default to ISA spec version 20191213 which moves some
|
||||
# instructions from the I extension to the Zicsr and Zifencei extensions.
|
||||
toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
|
||||
riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
|
||||
|
||||
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
|
||||
KBUILD_AFLAGS += -march=$(riscv-march-y)
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/sched/hotplug.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/numa.h>
|
||||
#include <asm/sbi.h>
|
||||
|
||||
bool cpu_has_hotplug(unsigned int cpu)
|
||||
@ -40,6 +41,7 @@ int __cpu_disable(void)
|
||||
return ret;
|
||||
|
||||
remove_cpu_topology(cpu);
|
||||
numa_remove_cpu(cpu);
|
||||
set_cpu_online(cpu, false);
|
||||
irq_migrate_all_off_this_cpu();
|
||||
|
||||
|
@ -22,14 +22,13 @@
|
||||
add \reg, \reg, t0
|
||||
.endm
|
||||
.macro XIP_FIXUP_FLASH_OFFSET reg
|
||||
la t1, __data_loc
|
||||
li t0, XIP_OFFSET_MASK
|
||||
and t1, t1, t0
|
||||
li t1, XIP_OFFSET
|
||||
sub t0, t0, t1
|
||||
sub \reg, \reg, t0
|
||||
la t0, __data_loc
|
||||
REG_L t1, _xip_phys_offset
|
||||
sub \reg, \reg, t1
|
||||
add \reg, \reg, t0
|
||||
.endm
|
||||
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
|
||||
_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
|
||||
#else
|
||||
.macro XIP_FIXUP_OFFSET reg
|
||||
.endm
|
||||
|
@ -22,15 +22,16 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
bool (*fn)(void *, unsigned long), void *arg)
|
||||
{
|
||||
unsigned long fp, sp, pc;
|
||||
int level = 0;
|
||||
|
||||
if (regs) {
|
||||
fp = frame_pointer(regs);
|
||||
sp = user_stack_pointer(regs);
|
||||
pc = instruction_pointer(regs);
|
||||
} else if (task == NULL || task == current) {
|
||||
fp = (unsigned long)__builtin_frame_address(1);
|
||||
sp = (unsigned long)__builtin_frame_address(0);
|
||||
pc = (unsigned long)__builtin_return_address(0);
|
||||
fp = (unsigned long)__builtin_frame_address(0);
|
||||
sp = sp_in_global;
|
||||
pc = (unsigned long)walk_stackframe;
|
||||
} else {
|
||||
/* task blocked in __switch_to */
|
||||
fp = task->thread.s[0];
|
||||
@ -42,7 +43,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long low, high;
|
||||
struct stackframe *frame;
|
||||
|
||||
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
|
||||
if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
|
||||
break;
|
||||
|
||||
/* Validate frame pointer */
|
||||
|
@ -33,7 +33,7 @@ static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
|
||||
if (unlikely(offset > MAX_REG_OFFSET))
|
||||
return;
|
||||
|
||||
if (!offset)
|
||||
if (offset)
|
||||
*(unsigned long *)((unsigned long)regs + offset) = val;
|
||||
}
|
||||
|
||||
@ -43,8 +43,8 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
|
||||
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
|
||||
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
|
||||
|
||||
regs_set_gpr(regs, reg_err, -EFAULT);
|
||||
regs_set_gpr(regs, reg_zero, 0);
|
||||
regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
|
||||
regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
|
||||
|
||||
regs->epc = get_ex_fixup(ex);
|
||||
return true;
|
||||
|
@ -232,6 +232,7 @@ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAG
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
|
||||
#define riscv_pfn_base (*(unsigned long *)XIP_FIXUP(&riscv_pfn_base))
|
||||
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
|
||||
#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte))
|
||||
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
|
||||
@ -522,6 +523,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
#define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
|
||||
extern char _xiprom[], _exiprom[], __data_loc;
|
||||
|
||||
/* called from head.S with MMU off */
|
||||
|
@ -5,9 +5,6 @@
|
||||
|
||||
#include "test_modules.h"
|
||||
|
||||
#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
|
||||
REPEAT_10000(DECLARE_RETURN);
|
||||
|
||||
/*
|
||||
* Test that modules with many relocations are loaded properly.
|
||||
*/
|
||||
|
@ -47,4 +47,7 @@
|
||||
__REPEAT_10000_1(f, 8); \
|
||||
__REPEAT_10000_1(f, 9)
|
||||
|
||||
#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
|
||||
REPEAT_10000(DECLARE_RETURN);
|
||||
|
||||
#endif
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||
|
||||
#define _BUG_FLAGS(ins, flags) \
|
||||
#define _BUG_FLAGS(ins, flags, extra) \
|
||||
do { \
|
||||
asm_inline volatile("1:\t" ins "\n" \
|
||||
".pushsection __bug_table,\"aw\"\n" \
|
||||
@ -31,7 +31,8 @@ do { \
|
||||
"\t.word %c1" "\t# bug_entry::line\n" \
|
||||
"\t.word %c2" "\t# bug_entry::flags\n" \
|
||||
"\t.org 2b+%c3\n" \
|
||||
".popsection" \
|
||||
".popsection\n" \
|
||||
extra \
|
||||
: : "i" (__FILE__), "i" (__LINE__), \
|
||||
"i" (flags), \
|
||||
"i" (sizeof(struct bug_entry))); \
|
||||
@ -39,14 +40,15 @@ do { \
|
||||
|
||||
#else /* !CONFIG_DEBUG_BUGVERBOSE */
|
||||
|
||||
#define _BUG_FLAGS(ins, flags) \
|
||||
#define _BUG_FLAGS(ins, flags, extra) \
|
||||
do { \
|
||||
asm_inline volatile("1:\t" ins "\n" \
|
||||
".pushsection __bug_table,\"aw\"\n" \
|
||||
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
|
||||
"\t.word %c0" "\t# bug_entry::flags\n" \
|
||||
"\t.org 2b+%c1\n" \
|
||||
".popsection" \
|
||||
".popsection\n" \
|
||||
extra \
|
||||
: : "i" (flags), \
|
||||
"i" (sizeof(struct bug_entry))); \
|
||||
} while (0)
|
||||
@ -55,7 +57,7 @@ do { \
|
||||
|
||||
#else
|
||||
|
||||
#define _BUG_FLAGS(ins, flags) asm volatile(ins)
|
||||
#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins)
|
||||
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
@ -63,8 +65,8 @@ do { \
|
||||
#define BUG() \
|
||||
do { \
|
||||
instrumentation_begin(); \
|
||||
_BUG_FLAGS(ASM_UD2, 0); \
|
||||
unreachable(); \
|
||||
_BUG_FLAGS(ASM_UD2, 0, ""); \
|
||||
__builtin_unreachable(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@ -75,9 +77,9 @@ do { \
|
||||
*/
|
||||
#define __WARN_FLAGS(flags) \
|
||||
do { \
|
||||
__auto_type f = BUGFLAG_WARNING|(flags); \
|
||||
instrumentation_begin(); \
|
||||
_BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
|
||||
annotate_reachable(); \
|
||||
_BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
|
||||
instrumentation_end(); \
|
||||
} while (0)
|
||||
|
||||
|
@ -476,6 +476,7 @@
|
||||
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
|
||||
#define MSR_AMD64_IBSOPDATA4 0xc001103d
|
||||
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
|
||||
#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
|
||||
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
|
||||
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
|
||||
#define MSR_AMD64_SEV 0xc0010131
|
||||
|
@ -220,6 +220,42 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
|
||||
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
|
||||
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
|
||||
|
||||
|
||||
/* AVIC */
|
||||
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
|
||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
|
||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
|
||||
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
|
||||
#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
|
||||
|
||||
#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
|
||||
|
||||
#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
|
||||
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
|
||||
#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
|
||||
|
||||
enum avic_ipi_failure_cause {
|
||||
AVIC_IPI_FAILURE_INVALID_INT_TYPE,
|
||||
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
|
||||
AVIC_IPI_FAILURE_INVALID_TARGET,
|
||||
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* 0xff is broadcast, so the max index allowed for physical APIC ID
|
||||
* table is 0xfe. APIC IDs above 0xff are reserved.
|
||||
*/
|
||||
#define AVIC_MAX_PHYSICAL_ID_COUNT 0xff
|
||||
|
||||
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
|
||||
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
|
||||
|
||||
|
||||
struct vmcb_seg {
|
||||
u16 selector;
|
||||
u16 attrib;
|
||||
|
@ -100,6 +100,13 @@
|
||||
/* Memory mapped from other domains has valid IOMMU entries */
|
||||
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
|
||||
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
|
||||
#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
|
||||
/*
|
||||
* Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be
|
||||
* used to store high bits for the Destination ID. This expands the Destination
|
||||
* ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
|
||||
*/
|
||||
#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
|
||||
|
||||
/*
|
||||
* Leaf 6 (0x40000x05)
|
||||
|
@ -410,6 +410,8 @@ void sgx_encl_release(struct kref *ref)
|
||||
}
|
||||
|
||||
kfree(entry);
|
||||
/* Invoke scheduler to prevent soft lockups. */
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
xa_destroy(&encl->page_array);
|
||||
|
@ -1,5 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <asm/e820/api.h>
|
||||
|
||||
@ -24,31 +23,11 @@ static void resource_clip(struct resource *res, resource_size_t start,
|
||||
res->start = end + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some BIOS-es contain a bug where they add addresses which map to
|
||||
* system RAM in the PCI host bridge window returned by the ACPI _CRS
|
||||
* method, see commit 4dc2287c1805 ("x86: avoid E820 regions when
|
||||
* allocating address space"). To avoid this Linux by default excludes
|
||||
* E820 reservations when allocating addresses since 2010.
|
||||
* In 2019 some systems have shown-up with E820 reservations which cover
|
||||
* the entire _CRS returned PCI host bridge window, causing all attempts
|
||||
* to assign memory to PCI BARs to fail if Linux uses E820 reservations.
|
||||
*
|
||||
* Ideally Linux would fully stop using E820 reservations, but then
|
||||
* the old systems this was added for will regress.
|
||||
* Instead keep the old behavior for old systems, while ignoring the
|
||||
* E820 reservations for any systems from now on.
|
||||
*/
|
||||
static void remove_e820_regions(struct resource *avail)
|
||||
{
|
||||
int i, year = dmi_get_bios_year();
|
||||
int i;
|
||||
struct e820_entry *entry;
|
||||
|
||||
if (year >= 2018)
|
||||
return;
|
||||
|
||||
pr_info_once("PCI: Removing E820 reservations from host bridge windows\n");
|
||||
|
||||
for (i = 0; i < e820_table->nr_entries; i++) {
|
||||
entry = &e820_table->entries[i];
|
||||
|
||||
|
@ -2306,7 +2306,12 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
|
||||
apic->irr_pending = true;
|
||||
apic->isr_count = 1;
|
||||
} else {
|
||||
apic->irr_pending = (apic_search_irr(apic) != -1);
|
||||
/*
|
||||
* Don't clear irr_pending, searching the IRR can race with
|
||||
* updates from the CPU as APICv is still active from hardware's
|
||||
* perspective. The flag will be cleared as appropriate when
|
||||
* KVM injects the interrupt.
|
||||
*/
|
||||
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
|
||||
}
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
|
||||
}
|
||||
|
||||
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
|
||||
unsigned config, bool exclude_user,
|
||||
u64 config, bool exclude_user,
|
||||
bool exclude_kernel, bool intr,
|
||||
bool in_tx, bool in_tx_cp)
|
||||
{
|
||||
@ -181,7 +181,8 @@ static int cmp_u64(const void *a, const void *b)
|
||||
|
||||
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
{
|
||||
unsigned config, type = PERF_TYPE_RAW;
|
||||
u64 config;
|
||||
u32 type = PERF_TYPE_RAW;
|
||||
struct kvm *kvm = pmc->vcpu->kvm;
|
||||
struct kvm_pmu_event_filter *filter;
|
||||
bool allow_event = true;
|
||||
@ -220,7 +221,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
}
|
||||
|
||||
if (type == PERF_TYPE_RAW)
|
||||
config = eventsel & X86_RAW_EVENT_MASK;
|
||||
config = eventsel & AMD64_RAW_EVENT_MASK;
|
||||
|
||||
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
|
||||
return;
|
||||
|
@ -27,20 +27,6 @@
|
||||
#include "irq.h"
|
||||
#include "svm.h"
|
||||
|
||||
#define SVM_AVIC_DOORBELL 0xc001011b
|
||||
|
||||
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
|
||||
|
||||
/*
|
||||
* 0xff is broadcast, so the max index allowed for physical APIC ID
|
||||
* table is 0xfe. APIC IDs above 0xff are reserved.
|
||||
*/
|
||||
#define AVIC_MAX_PHYSICAL_ID_COUNT 255
|
||||
|
||||
#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
|
||||
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
|
||||
#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
|
||||
|
||||
/* AVIC GATAG is encoded using VM and VCPU IDs */
|
||||
#define AVIC_VCPU_ID_BITS 8
|
||||
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
|
||||
@ -73,12 +59,6 @@ struct amd_svm_iommu_ir {
|
||||
void *data; /* Storing pointer to struct amd_ir_data */
|
||||
};
|
||||
|
||||
enum avic_ipi_failure_cause {
|
||||
AVIC_IPI_FAILURE_INVALID_INT_TYPE,
|
||||
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
|
||||
AVIC_IPI_FAILURE_INVALID_TARGET,
|
||||
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
|
||||
};
|
||||
|
||||
/* Note:
|
||||
* This function is called from IOMMU driver to notify
|
||||
@ -289,6 +269,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void avic_ring_doorbell(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Note, the vCPU could get migrated to a different pCPU at any point,
|
||||
* which could result in signalling the wrong/previous pCPU. But if
|
||||
* that happens the vCPU is guaranteed to do a VMRUN (after being
|
||||
* migrated) and thus will process pending interrupts, i.e. a doorbell
|
||||
* is not needed (and the spurious one is harmless).
|
||||
*/
|
||||
int cpu = READ_ONCE(vcpu->cpu);
|
||||
|
||||
if (cpu != get_cpu())
|
||||
wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
|
||||
u32 icrl, u32 icrh)
|
||||
{
|
||||
@ -304,8 +300,13 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
|
||||
GET_APIC_DEST_FIELD(icrh),
|
||||
icrl & APIC_DEST_MASK))
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
icrl & APIC_DEST_MASK)) {
|
||||
vcpu->arch.apic->irr_pending = true;
|
||||
svm_complete_interrupt_delivery(vcpu,
|
||||
icrl & APIC_MODE_MASK,
|
||||
icrl & APIC_INT_LEVELTRIG,
|
||||
icrl & APIC_VECTOR_MASK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,8 +346,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
|
||||
avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
|
||||
break;
|
||||
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
||||
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
|
||||
index, vcpu->vcpu_id, icrh, icrl);
|
||||
break;
|
||||
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
||||
WARN_ONCE(1, "Invalid backing page\n");
|
||||
@ -669,52 +668,6 @@ void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||
return;
|
||||
}
|
||||
|
||||
int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
||||
{
|
||||
if (!vcpu->arch.apicv_active)
|
||||
return -1;
|
||||
|
||||
kvm_lapic_set_irr(vec, vcpu->arch.apic);
|
||||
|
||||
/*
|
||||
* Pairs with the smp_mb_*() after setting vcpu->guest_mode in
|
||||
* vcpu_enter_guest() to ensure the write to the vIRR is ordered before
|
||||
* the read of guest_mode, which guarantees that either VMRUN will see
|
||||
* and process the new vIRR entry, or that the below code will signal
|
||||
* the doorbell if the vCPU is already running in the guest.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/*
|
||||
* Signal the doorbell to tell hardware to inject the IRQ if the vCPU
|
||||
* is in the guest. If the vCPU is not in the guest, hardware will
|
||||
* automatically process AVIC interrupts at VMRUN.
|
||||
*/
|
||||
if (vcpu->mode == IN_GUEST_MODE) {
|
||||
int cpu = READ_ONCE(vcpu->cpu);
|
||||
|
||||
/*
|
||||
* Note, the vCPU could get migrated to a different pCPU at any
|
||||
* point, which could result in signalling the wrong/previous
|
||||
* pCPU. But if that happens the vCPU is guaranteed to do a
|
||||
* VMRUN (after being migrated) and thus will process pending
|
||||
* interrupts, i.e. a doorbell is not needed (and the spurious
|
||||
* one is harmless).
|
||||
*/
|
||||
if (cpu != get_cpu())
|
||||
wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
|
||||
put_cpu();
|
||||
} else {
|
||||
/*
|
||||
* Wake the vCPU if it was blocking. KVM will then detect the
|
||||
* pending IRQ when checking if the vCPU has a wake event.
|
||||
*/
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
|
@ -1457,18 +1457,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
!__nested_vmcb_check_save(vcpu, &save_cached))
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
* While the nested guest CR3 is already checked and set by
|
||||
* KVM_SET_SREGS, it was set when nested state was yet loaded,
|
||||
* thus MMU might not be initialized correctly.
|
||||
* Set it again to fix this.
|
||||
*/
|
||||
|
||||
ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
|
||||
nested_npt_enabled(svm), false);
|
||||
if (WARN_ON_ONCE(ret))
|
||||
goto out_free;
|
||||
|
||||
|
||||
/*
|
||||
* All checks done, we can enter guest mode. Userspace provides
|
||||
@ -1494,6 +1482,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
|
||||
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
||||
nested_vmcb02_prepare_control(svm);
|
||||
|
||||
/*
|
||||
* While the nested guest CR3 is already checked and set by
|
||||
* KVM_SET_SREGS, it was set when nested state was yet loaded,
|
||||
* thus MMU might not be initialized correctly.
|
||||
* Set it again to fix this.
|
||||
*/
|
||||
|
||||
ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
|
||||
nested_npt_enabled(svm), false);
|
||||
if (WARN_ON_ONCE(ret))
|
||||
goto out_free;
|
||||
|
||||
|
||||
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
||||
ret = 0;
|
||||
out_free:
|
||||
|
@ -1585,6 +1585,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
u64 hcr0 = cr0;
|
||||
bool old_paging = is_paging(vcpu);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
|
||||
@ -1601,8 +1602,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
#endif
|
||||
vcpu->arch.cr0 = cr0;
|
||||
|
||||
if (!npt_enabled)
|
||||
if (!npt_enabled) {
|
||||
hcr0 |= X86_CR0_PG | X86_CR0_WP;
|
||||
if (old_paging != is_paging(vcpu))
|
||||
svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* re-enable caching here because the QEMU bios
|
||||
@ -1646,8 +1650,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
svm_flush_tlb(vcpu);
|
||||
|
||||
vcpu->arch.cr4 = cr4;
|
||||
if (!npt_enabled)
|
||||
if (!npt_enabled) {
|
||||
cr4 |= X86_CR4_PAE;
|
||||
|
||||
if (!is_paging(vcpu))
|
||||
cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
|
||||
}
|
||||
cr4 |= host_cr4_mce;
|
||||
to_svm(vcpu)->vmcb->save.cr4 = cr4;
|
||||
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
|
||||
@ -3291,19 +3299,53 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
|
||||
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
|
||||
}
|
||||
|
||||
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
/*
|
||||
* vcpu->arch.apicv_active must be read after vcpu->mode.
|
||||
* Pairs with smp_store_release in vcpu_enter_guest.
|
||||
*/
|
||||
bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
|
||||
|
||||
if (svm_deliver_avic_intr(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
if (!READ_ONCE(vcpu->arch.apicv_active)) {
|
||||
/* Process the interrupt via inject_pending_event */
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
|
||||
trig_mode, vector);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
|
||||
if (in_guest_mode) {
|
||||
/*
|
||||
* Signal the doorbell to tell hardware to inject the IRQ. If
|
||||
* the vCPU exits the guest before the doorbell chimes, hardware
|
||||
* will automatically process AVIC interrupts at the next VMRUN.
|
||||
*/
|
||||
avic_ring_doorbell(vcpu);
|
||||
} else {
|
||||
/*
|
||||
* Wake the vCPU if it was blocking. KVM will then detect the
|
||||
* pending IRQ when checking if the vCPU has a wake event.
|
||||
*/
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
{
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
|
||||
/*
|
||||
* Pairs with the smp_mb_*() after setting vcpu->guest_mode in
|
||||
* vcpu_enter_guest() to ensure the write to the vIRR is ordered before
|
||||
* the read of guest_mode. This guarantees that either VMRUN will see
|
||||
* and process the new vIRR entry, or that svm_complete_interrupt_delivery
|
||||
* will signal the doorbell if the CPU has already entered the guest.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
|
||||
}
|
||||
|
||||
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
||||
@ -3353,11 +3395,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
if (svm->nested.nested_run_pending)
|
||||
return -EBUSY;
|
||||
|
||||
if (svm_nmi_blocked(vcpu))
|
||||
return 0;
|
||||
|
||||
/* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
|
||||
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
|
||||
return -EBUSY;
|
||||
|
||||
return !svm_nmi_blocked(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
|
||||
@ -3409,9 +3453,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
|
||||
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (svm->nested.nested_run_pending)
|
||||
return -EBUSY;
|
||||
|
||||
if (svm_interrupt_blocked(vcpu))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
|
||||
* e.g. if the IRQ arrived asynchronously after checking nested events.
|
||||
@ -3419,7 +3467,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
|
||||
return -EBUSY;
|
||||
|
||||
return !svm_interrupt_blocked(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
|
||||
@ -4150,11 +4198,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
if (svm->nested.nested_run_pending)
|
||||
return -EBUSY;
|
||||
|
||||
if (svm_smi_blocked(vcpu))
|
||||
return 0;
|
||||
|
||||
/* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
|
||||
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
|
||||
return -EBUSY;
|
||||
|
||||
return !svm_smi_blocked(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
||||
@ -4248,11 +4299,18 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||
* Enter the nested guest now
|
||||
*/
|
||||
|
||||
vmcb_mark_all_dirty(svm->vmcb01.ptr);
|
||||
|
||||
vmcb12 = map.hva;
|
||||
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
|
||||
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
|
||||
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
|
||||
|
||||
if (ret)
|
||||
goto unmap_save;
|
||||
|
||||
svm->nested.nested_run_pending = 1;
|
||||
|
||||
unmap_save:
|
||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||
unmap_map:
|
||||
@ -4637,6 +4695,7 @@ static __init void svm_set_cpu_caps(void)
|
||||
/* CPUID 0x80000001 and 0x8000000A (SVM features) */
|
||||
if (nested) {
|
||||
kvm_cpu_cap_set(X86_FEATURE_SVM);
|
||||
kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
|
||||
|
||||
if (nrips)
|
||||
kvm_cpu_cap_set(X86_FEATURE_NRIPS);
|
||||
|
@ -489,6 +489,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
|
||||
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
|
||||
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
|
||||
int read, int write);
|
||||
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
|
||||
int trig_mode, int vec);
|
||||
|
||||
/* nested.c */
|
||||
|
||||
@ -556,17 +558,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
|
||||
|
||||
/* avic.c */
|
||||
|
||||
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
|
||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
|
||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
|
||||
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
|
||||
|
||||
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
|
||||
|
||||
int avic_ga_log_notifier(u32 ga_tag);
|
||||
void avic_vm_destroy(struct kvm *kvm);
|
||||
int avic_vm_init(struct kvm *kvm);
|
||||
@ -583,12 +574,12 @@ bool svm_check_apicv_inhibit_reasons(ulong bit);
|
||||
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
|
||||
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
|
||||
int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
|
||||
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
|
||||
int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
uint32_t guest_irq, bool set);
|
||||
void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
|
||||
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
|
||||
void avic_ring_doorbell(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* sev.c */
|
||||
|
||||
|
@ -7659,6 +7659,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vmx->nested.nested_run_pending = 1;
|
||||
vmx->nested.smm.guest_mode = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -9983,7 +9983,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
* result in virtual interrupt delivery.
|
||||
*/
|
||||
local_irq_disable();
|
||||
vcpu->mode = IN_GUEST_MODE;
|
||||
|
||||
/* Store vcpu->apicv_active before vcpu->mode. */
|
||||
smp_store_release(&vcpu->mode, IN_GUEST_MODE);
|
||||
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
|
||||
|
@ -133,32 +133,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
|
||||
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
{
|
||||
struct kvm_vcpu_xen *vx = &v->arch.xen;
|
||||
struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
|
||||
struct kvm_memslots *slots = kvm_memslots(v->kvm);
|
||||
bool atomic = (state == RUNSTATE_runnable);
|
||||
uint64_t state_entry_time;
|
||||
unsigned int offset;
|
||||
int __user *user_state;
|
||||
uint64_t __user *user_times;
|
||||
|
||||
kvm_xen_update_runstate(v, state);
|
||||
|
||||
if (!vx->runstate_set)
|
||||
return;
|
||||
|
||||
if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
|
||||
kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
|
||||
return;
|
||||
|
||||
/* We made sure it fits in a single page */
|
||||
BUG_ON(!ghc->memslot);
|
||||
|
||||
if (atomic)
|
||||
pagefault_disable();
|
||||
|
||||
/*
|
||||
* The only difference between 32-bit and 64-bit versions of the
|
||||
* runstate struct us the alignment of uint64_t in 32-bit, which
|
||||
* means that the 64-bit version has an additional 4 bytes of
|
||||
* padding after the first field 'state'.
|
||||
*
|
||||
* So we use 'int __user *user_state' to point to the state field,
|
||||
* and 'uint64_t __user *user_times' for runstate_entry_time. So
|
||||
* the actual array of time[] in each state starts at user_times[1].
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
|
||||
BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
|
||||
user_state = (int __user *)ghc->hva;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
|
||||
|
||||
offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
|
||||
user_times = (uint64_t __user *)(ghc->hva +
|
||||
offsetof(struct compat_vcpu_runstate_info,
|
||||
state_entry_time));
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* The only difference is alignment of uint64_t in 32-bit.
|
||||
* So the first field 'state' is accessed directly using
|
||||
* offsetof() (where its offset happens to be zero), while the
|
||||
* remaining fields which are all uint64_t, start at 'offset'
|
||||
* which we tweak here by adding 4.
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
|
||||
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
|
||||
offsetof(struct compat_vcpu_runstate_info, time) + 4);
|
||||
|
||||
if (v->kvm->arch.xen.long_mode)
|
||||
offset = offsetof(struct vcpu_runstate_info, state_entry_time);
|
||||
user_times = (uint64_t __user *)(ghc->hva +
|
||||
offsetof(struct vcpu_runstate_info,
|
||||
state_entry_time));
|
||||
#endif
|
||||
/*
|
||||
* First write the updated state_entry_time at the appropriate
|
||||
@ -172,10 +197,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
|
||||
sizeof(state_entry_time));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
&state_entry_time, offset,
|
||||
sizeof(state_entry_time)))
|
||||
return;
|
||||
if (__put_user(state_entry_time, user_times))
|
||||
goto out;
|
||||
smp_wmb();
|
||||
|
||||
/*
|
||||
@ -189,11 +212,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
|
||||
sizeof(vx->current_runstate));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
&vx->current_runstate,
|
||||
offsetof(struct vcpu_runstate_info, state),
|
||||
sizeof(vx->current_runstate)))
|
||||
return;
|
||||
if (__put_user(vx->current_runstate, user_state))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Write the actual runstate times immediately after the
|
||||
@ -208,24 +228,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
|
||||
sizeof(vx->runstate_times));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
&vx->runstate_times[0],
|
||||
offset + sizeof(u64),
|
||||
sizeof(vx->runstate_times)))
|
||||
return;
|
||||
|
||||
if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
|
||||
goto out;
|
||||
smp_wmb();
|
||||
|
||||
/*
|
||||
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
|
||||
* runstate_entry_time field.
|
||||
*/
|
||||
|
||||
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
&state_entry_time, offset,
|
||||
sizeof(state_entry_time)))
|
||||
return;
|
||||
__put_user(state_entry_time, user_times);
|
||||
smp_wmb();
|
||||
|
||||
out:
|
||||
mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
|
||||
|
||||
if (atomic)
|
||||
pagefault_enable();
|
||||
}
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
@ -443,6 +462,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
|
||||
/* It must fit within a single page */
|
||||
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_info_cache,
|
||||
data->u.gpa,
|
||||
@ -460,6 +485,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
|
||||
/* It must fit within a single page */
|
||||
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache,
|
||||
data->u.gpa,
|
||||
@ -481,6 +512,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
|
||||
/* It must fit within a single page */
|
||||
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache,
|
||||
data->u.gpa,
|
||||
|
@ -185,8 +185,7 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
|
||||
|
||||
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
|
||||
xen_teardown_timer(cpu);
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool no_vector_callback __initdata;
|
||||
@ -248,6 +247,11 @@ static __init bool xen_x2apic_available(void)
|
||||
return x2apic_supported();
|
||||
}
|
||||
|
||||
static bool __init msi_ext_dest_id(void)
|
||||
{
|
||||
return cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_EXT_DEST_ID;
|
||||
}
|
||||
|
||||
static __init void xen_hvm_guest_late_init(void)
|
||||
{
|
||||
#ifdef CONFIG_XEN_PVH
|
||||
@ -310,6 +314,7 @@ struct hypervisor_x86 x86_hyper_xen_hvm __initdata = {
|
||||
.init.x2apic_available = xen_x2apic_available,
|
||||
.init.init_mem_mapping = xen_hvm_init_mem_mapping,
|
||||
.init.guest_late_init = xen_hvm_guest_late_init,
|
||||
.init.msi_ext_dest_id = msi_ext_dest_id,
|
||||
.runtime.pin_vcpu = xen_pin_vcpu,
|
||||
.ignore_nopv = true,
|
||||
};
|
||||
|
@ -57,6 +57,14 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
|
||||
screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
|
||||
screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
|
||||
|
||||
if (size >= offsetof(struct dom0_vga_console_info,
|
||||
u.vesa_lfb.ext_lfb_base)
|
||||
+ sizeof(info->u.vesa_lfb.ext_lfb_base)
|
||||
&& info->u.vesa_lfb.ext_lfb_base) {
|
||||
screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
|
||||
screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
|
||||
}
|
||||
|
||||
if (info->video_type == XEN_VGATYPE_EFI_LFB) {
|
||||
screen_info->orig_video_isVGA = VIDEO_TYPE_EFI;
|
||||
break;
|
||||
@ -66,14 +74,6 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
|
||||
u.vesa_lfb.mode_attrs)
|
||||
+ sizeof(info->u.vesa_lfb.mode_attrs))
|
||||
screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
|
||||
|
||||
if (size >= offsetof(struct dom0_vga_console_info,
|
||||
u.vesa_lfb.ext_lfb_base)
|
||||
+ sizeof(info->u.vesa_lfb.ext_lfb_base)
|
||||
&& info->u.vesa_lfb.ext_lfb_base) {
|
||||
screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
|
||||
screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -25,12 +25,9 @@ struct alg_type_list {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static atomic_long_t alg_memory_allocated;
|
||||
|
||||
static struct proto alg_proto = {
|
||||
.name = "ALG",
|
||||
.owner = THIS_MODULE,
|
||||
.memory_allocated = &alg_memory_allocated,
|
||||
.obj_size = sizeof(struct alg_sock),
|
||||
};
|
||||
|
||||
|
@ -44,6 +44,7 @@ static struct var_t vars[] = {
|
||||
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
|
||||
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
|
||||
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
|
||||
{ PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
|
||||
{ INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
|
||||
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
|
||||
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
|
||||
|
@ -1361,9 +1361,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
|
||||
res[0].start = pmcg->page0_base_address;
|
||||
res[0].end = pmcg->page0_base_address + SZ_4K - 1;
|
||||
res[0].flags = IORESOURCE_MEM;
|
||||
res[1].start = pmcg->page1_base_address;
|
||||
res[1].end = pmcg->page1_base_address + SZ_4K - 1;
|
||||
res[1].flags = IORESOURCE_MEM;
|
||||
/*
|
||||
* The initial version in DEN0049C lacked a way to describe register
|
||||
* page 1, which makes it broken for most PMCG implementations; in
|
||||
* that case, just let the driver fail gracefully if it expects to
|
||||
* find a second memory resource.
|
||||
*/
|
||||
if (node->revision > 0) {
|
||||
res[1].start = pmcg->page1_base_address;
|
||||
res[1].end = pmcg->page1_base_address + SZ_4K - 1;
|
||||
res[1].flags = IORESOURCE_MEM;
|
||||
}
|
||||
|
||||
if (pmcg->overflow_gsiv)
|
||||
acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
|
||||
|
@ -2065,6 +2065,16 @@ bool acpi_ec_dispatch_gpe(void)
|
||||
if (acpi_any_gpe_status_set(first_ec->gpe))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Cancel the SCI wakeup and process all pending events in case there
|
||||
* are any wakeup ones in there.
|
||||
*
|
||||
* Note that if any non-EC GPEs are active at this point, the SCI will
|
||||
* retrigger after the rearming in acpi_s2idle_wake(), so no events
|
||||
* should be missed by canceling the wakeup here.
|
||||
*/
|
||||
pm_system_cancel_wakeup();
|
||||
|
||||
/*
|
||||
* Dispatch the EC GPE in-band, but do not report wakeup in any case
|
||||
* to allow the caller to process events properly after that.
|
||||
|
@ -736,21 +736,15 @@ bool acpi_s2idle_wake(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Check non-EC GPE wakeups and dispatch the EC GPE. */
|
||||
/*
|
||||
* Check non-EC GPE wakeups and if there are none, cancel the
|
||||
* SCI-related wakeup and dispatch the EC GPE.
|
||||
*/
|
||||
if (acpi_ec_dispatch_gpe()) {
|
||||
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cancel the SCI wakeup and process all pending events in case
|
||||
* there are any wakeup ones in there.
|
||||
*
|
||||
* Note that if any non-EC GPEs are active at this point, the
|
||||
* SCI will retrigger after the rearming below, so no events
|
||||
* should be missed by canceling the wakeup here.
|
||||
*/
|
||||
pm_system_cancel_wakeup();
|
||||
acpi_os_wait_events_complete();
|
||||
|
||||
/*
|
||||
@ -764,6 +758,7 @@ bool acpi_s2idle_wake(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
pm_wakeup_clear(acpi_sci_irq);
|
||||
rearm_wake_irq(acpi_sci_irq);
|
||||
}
|
||||
|
||||
|
@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev,
|
||||
mem_sleep_current = PM_SUSPEND_TO_IDLE;
|
||||
|
||||
/*
|
||||
* Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
|
||||
* use intel-hid or intel-vbtn but require the EC GPE to be enabled while
|
||||
* suspended for certain wakeup devices to work, so mark it as wakeup-capable.
|
||||
*
|
||||
* Only enable on !AMD as enabling this universally causes problems for a number
|
||||
* of AMD based systems.
|
||||
* Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
|
||||
* EC GPE to be enabled while suspended for certain wakeup devices to
|
||||
* work, so mark it as wakeup-capable.
|
||||
*/
|
||||
if (!acpi_s2idle_vendor_amd())
|
||||
acpi_ec_mark_gpe_for_wake();
|
||||
acpi_ec_mark_gpe_for_wake();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4029,6 +4029,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
|
||||
/* devices that don't properly handle TRIM commands */
|
||||
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||
{ "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||
|
||||
/*
|
||||
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
|
||||
|
@ -322,7 +322,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
|
||||
static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%d %d\n",
|
||||
return sysfs_emit(buf, "%u %u\n",
|
||||
intr_coalescing_count, intr_coalescing_ticks);
|
||||
}
|
||||
|
||||
@ -332,10 +332,8 @@ static ssize_t fsl_sata_intr_coalescing_store(struct device *dev,
|
||||
{
|
||||
unsigned int coalescing_count, coalescing_ticks;
|
||||
|
||||
if (sscanf(buf, "%d%d",
|
||||
&coalescing_count,
|
||||
&coalescing_ticks) != 2) {
|
||||
printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
|
||||
if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) {
|
||||
dev_err(dev, "fsl-sata: wrong parameter format.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -359,7 +357,7 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
|
||||
rx_watermark &= 0x1f;
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
return sysfs_emit(buf, "%d\n", rx_watermark);
|
||||
return sysfs_emit(buf, "%u\n", rx_watermark);
|
||||
}
|
||||
|
||||
static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
|
||||
@ -373,8 +371,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
|
||||
void __iomem *csr_base = host_priv->csr_base;
|
||||
u32 temp;
|
||||
|
||||
if (sscanf(buf, "%d", &rx_watermark) != 1) {
|
||||
printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
|
||||
if (kstrtouint(buf, 10, &rx_watermark) < 0) {
|
||||
dev_err(dev, "fsl-sata: wrong parameter format.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -382,8 +380,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
|
||||
temp = ioread32(csr_base + TRANSCFG);
|
||||
temp &= 0xffffffe0;
|
||||
iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
|
||||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
return strlen(buf);
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state;
|
||||
bool events_check_enabled __read_mostly;
|
||||
|
||||
/* First wakeup IRQ seen by the kernel in the last cycle. */
|
||||
unsigned int pm_wakeup_irq __read_mostly;
|
||||
static unsigned int wakeup_irq[2] __read_mostly;
|
||||
static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
|
||||
|
||||
/* If greater than 0 and the system is suspending, terminate the suspend. */
|
||||
static atomic_t pm_abort_suspend __read_mostly;
|
||||
@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void)
|
||||
atomic_dec_if_positive(&pm_abort_suspend);
|
||||
}
|
||||
|
||||
void pm_wakeup_clear(bool reset)
|
||||
void pm_wakeup_clear(unsigned int irq_number)
|
||||
{
|
||||
pm_wakeup_irq = 0;
|
||||
if (reset)
|
||||
raw_spin_lock_irq(&wakeup_irq_lock);
|
||||
|
||||
if (irq_number && wakeup_irq[0] == irq_number)
|
||||
wakeup_irq[0] = wakeup_irq[1];
|
||||
else
|
||||
wakeup_irq[0] = 0;
|
||||
|
||||
wakeup_irq[1] = 0;
|
||||
|
||||
raw_spin_unlock_irq(&wakeup_irq_lock);
|
||||
|
||||
if (!irq_number)
|
||||
atomic_set(&pm_abort_suspend, 0);
|
||||
}
|
||||
|
||||
void pm_system_irq_wakeup(unsigned int irq_number)
|
||||
{
|
||||
if (pm_wakeup_irq == 0) {
|
||||
pm_wakeup_irq = irq_number;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
|
||||
|
||||
if (wakeup_irq[0] == 0)
|
||||
wakeup_irq[0] = irq_number;
|
||||
else if (wakeup_irq[1] == 0)
|
||||
wakeup_irq[1] = irq_number;
|
||||
else
|
||||
irq_number = 0;
|
||||
|
||||
raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
|
||||
|
||||
if (irq_number)
|
||||
pm_system_wakeup();
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int pm_wakeup_irq(void)
|
||||
{
|
||||
return wakeup_irq[0];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1082,7 +1082,7 @@ out_putf:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void __loop_clr_fd(struct loop_device *lo)
|
||||
static void __loop_clr_fd(struct loop_device *lo, bool release)
|
||||
{
|
||||
struct file *filp;
|
||||
gfp_t gfp = lo->old_gfp_mask;
|
||||
@ -1144,6 +1144,8 @@ static void __loop_clr_fd(struct loop_device *lo)
|
||||
/* let user-space know about this change */
|
||||
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
||||
mapping_set_gfp_mask(filp->f_mapping, gfp);
|
||||
/* This is safe: open() is still holding a reference. */
|
||||
module_put(THIS_MODULE);
|
||||
blk_mq_unfreeze_queue(lo->lo_queue);
|
||||
|
||||
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
|
||||
@ -1151,52 +1153,44 @@ static void __loop_clr_fd(struct loop_device *lo)
|
||||
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
|
||||
int err;
|
||||
|
||||
mutex_lock(&lo->lo_disk->open_mutex);
|
||||
/*
|
||||
* open_mutex has been held already in release path, so don't
|
||||
* acquire it if this function is called in such case.
|
||||
*
|
||||
* If the reread partition isn't from release path, lo_refcnt
|
||||
* must be at least one and it can only become zero when the
|
||||
* current holder is released.
|
||||
*/
|
||||
if (!release)
|
||||
mutex_lock(&lo->lo_disk->open_mutex);
|
||||
err = bdev_disk_changed(lo->lo_disk, false);
|
||||
mutex_unlock(&lo->lo_disk->open_mutex);
|
||||
if (!release)
|
||||
mutex_unlock(&lo->lo_disk->open_mutex);
|
||||
if (err)
|
||||
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
||||
__func__, lo->lo_number, err);
|
||||
/* Device is gone, no point in returning error */
|
||||
}
|
||||
|
||||
/*
|
||||
* lo->lo_state is set to Lo_unbound here after above partscan has
|
||||
* finished. There cannot be anybody else entering __loop_clr_fd() as
|
||||
* Lo_rundown state protects us from all the other places trying to
|
||||
* change the 'lo' device.
|
||||
*/
|
||||
lo->lo_flags = 0;
|
||||
if (!part_shift)
|
||||
lo->lo_disk->flags |= GENHD_FL_NO_PART;
|
||||
|
||||
fput(filp);
|
||||
}
|
||||
|
||||
static void loop_rundown_completed(struct loop_device *lo)
|
||||
{
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
lo->lo_state = Lo_unbound;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void loop_rundown_workfn(struct work_struct *work)
|
||||
{
|
||||
struct loop_device *lo = container_of(work, struct loop_device,
|
||||
rundown_work);
|
||||
struct block_device *bdev = lo->lo_device;
|
||||
struct gendisk *disk = lo->lo_disk;
|
||||
|
||||
__loop_clr_fd(lo);
|
||||
kobject_put(&bdev->bd_device.kobj);
|
||||
module_put(disk->fops->owner);
|
||||
loop_rundown_completed(lo);
|
||||
}
|
||||
|
||||
static void loop_schedule_rundown(struct loop_device *lo)
|
||||
{
|
||||
struct block_device *bdev = lo->lo_device;
|
||||
struct gendisk *disk = lo->lo_disk;
|
||||
|
||||
__module_get(disk->fops->owner);
|
||||
kobject_get(&bdev->bd_device.kobj);
|
||||
INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
|
||||
queue_work(system_long_wq, &lo->rundown_work);
|
||||
/*
|
||||
* Need not hold lo_mutex to fput backing file. Calling fput holding
|
||||
* lo_mutex triggers a circular lock dependency possibility warning as
|
||||
* fput can take open_mutex which is usually taken before lo_mutex.
|
||||
*/
|
||||
fput(filp);
|
||||
}
|
||||
|
||||
static int loop_clr_fd(struct loop_device *lo)
|
||||
@ -1228,8 +1222,7 @@ static int loop_clr_fd(struct loop_device *lo)
|
||||
lo->lo_state = Lo_rundown;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
|
||||
__loop_clr_fd(lo);
|
||||
loop_rundown_completed(lo);
|
||||
__loop_clr_fd(lo, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1754,7 +1747,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
* In autoclear mode, stop the loop thread
|
||||
* and remove configuration after last close.
|
||||
*/
|
||||
loop_schedule_rundown(lo);
|
||||
__loop_clr_fd(lo, true);
|
||||
return;
|
||||
} else if (lo->lo_state == Lo_bound) {
|
||||
/*
|
||||
|
@ -56,7 +56,6 @@ struct loop_device {
|
||||
struct gendisk *lo_disk;
|
||||
struct mutex lo_mutex;
|
||||
bool idr_visible;
|
||||
struct work_struct rundown_work;
|
||||
};
|
||||
|
||||
struct loop_cmd {
|
||||
|
@ -366,6 +366,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
|
||||
.config = &modem_foxconn_sdx55_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32,
|
||||
.mru_default = 32768,
|
||||
.sideband_wake = false,
|
||||
};
|
||||
|
||||
@ -401,6 +402,7 @@ static const struct mhi_pci_dev_info mhi_mv31_info = {
|
||||
.config = &modem_mv31_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32,
|
||||
.mru_default = 32768,
|
||||
};
|
||||
|
||||
static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
|
||||
|
@ -241,7 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void)
|
||||
bool quirk_unreliable_oscillator = false;
|
||||
|
||||
/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
|
||||
if (of_machine_is_compatible("ti,omap3-beagle") ||
|
||||
if (of_machine_is_compatible("ti,omap3-beagle-ab4") ||
|
||||
of_machine_is_compatible("timll,omap3-devkit8000")) {
|
||||
quirk_unreliable_oscillator = true;
|
||||
counter_32k = -ENODEV;
|
||||
|
@ -278,7 +278,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
|
||||
{
|
||||
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
|
||||
|
||||
return gpiod_get_value(fwd->descs[offset]);
|
||||
return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
|
||||
: gpiod_get_value(fwd->descs[offset]);
|
||||
}
|
||||
|
||||
static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
|
||||
@ -293,7 +294,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
|
||||
for_each_set_bit(i, mask, fwd->chip.ngpio)
|
||||
descs[j++] = fwd->descs[i];
|
||||
|
||||
error = gpiod_get_array_value(j, descs, NULL, values);
|
||||
if (fwd->chip.can_sleep)
|
||||
error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
|
||||
else
|
||||
error = gpiod_get_array_value(j, descs, NULL, values);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@ -328,7 +332,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
|
||||
{
|
||||
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
|
||||
|
||||
gpiod_set_value(fwd->descs[offset], value);
|
||||
if (chip->can_sleep)
|
||||
gpiod_set_value_cansleep(fwd->descs[offset], value);
|
||||
else
|
||||
gpiod_set_value(fwd->descs[offset], value);
|
||||
}
|
||||
|
||||
static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
|
||||
@ -343,7 +350,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
|
||||
descs[j++] = fwd->descs[i];
|
||||
}
|
||||
|
||||
gpiod_set_array_value(j, descs, NULL, values);
|
||||
if (fwd->chip.can_sleep)
|
||||
gpiod_set_array_value_cansleep(j, descs, NULL, values);
|
||||
else
|
||||
gpiod_set_array_value(j, descs, NULL, values);
|
||||
}
|
||||
|
||||
static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
|
||||
|
@ -223,7 +223,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
|
||||
NULL,
|
||||
chip->base + SIFIVE_GPIO_OUTPUT_EN,
|
||||
chip->base + SIFIVE_GPIO_INPUT_EN,
|
||||
0);
|
||||
BGPIOF_READ_OUTPUT_REG_SET);
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to init generic GPIO\n");
|
||||
return ret;
|
||||
|
@ -570,6 +570,11 @@ static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item)
|
||||
return container_of(group, struct gpio_sim_bank, group);
|
||||
}
|
||||
|
||||
static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank)
|
||||
{
|
||||
return bank->label && *bank->label;
|
||||
}
|
||||
|
||||
static struct gpio_sim_device *
|
||||
gpio_sim_bank_get_device(struct gpio_sim_bank *bank)
|
||||
{
|
||||
@ -770,9 +775,15 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
|
||||
* point the device doesn't exist yet and so dev_name()
|
||||
* is not available.
|
||||
*/
|
||||
hog->chip_label = kasprintf(GFP_KERNEL,
|
||||
"gpio-sim.%u-%s", dev->id,
|
||||
fwnode_get_name(bank->swnode));
|
||||
if (gpio_sim_bank_has_label(bank))
|
||||
hog->chip_label = kstrdup(bank->label,
|
||||
GFP_KERNEL);
|
||||
else
|
||||
hog->chip_label = kasprintf(GFP_KERNEL,
|
||||
"gpio-sim.%u-%s",
|
||||
dev->id,
|
||||
fwnode_get_name(
|
||||
bank->swnode));
|
||||
if (!hog->chip_label) {
|
||||
gpio_sim_remove_hogs(dev);
|
||||
return -ENOMEM;
|
||||
@ -816,7 +827,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
|
||||
|
||||
properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines);
|
||||
|
||||
if (bank->label && (strlen(bank->label) > 0))
|
||||
if (gpio_sim_bank_has_label(bank))
|
||||
properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
|
||||
bank->label);
|
||||
|
||||
|
@ -330,7 +330,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
|
||||
goto out_free_lh;
|
||||
}
|
||||
|
||||
ret = gpiod_request(desc, lh->label);
|
||||
ret = gpiod_request_user(desc, lh->label);
|
||||
if (ret)
|
||||
goto out_free_lh;
|
||||
lh->descs[i] = desc;
|
||||
@ -1378,7 +1378,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
|
||||
goto out_free_linereq;
|
||||
}
|
||||
|
||||
ret = gpiod_request(desc, lr->label);
|
||||
ret = gpiod_request_user(desc, lr->label);
|
||||
if (ret)
|
||||
goto out_free_linereq;
|
||||
|
||||
@ -1764,7 +1764,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
||||
}
|
||||
}
|
||||
|
||||
ret = gpiod_request(desc, le->label);
|
||||
ret = gpiod_request_user(desc, le->label);
|
||||
if (ret)
|
||||
goto out_free_le;
|
||||
le->desc = desc;
|
||||
|
@ -475,12 +475,9 @@ static ssize_t export_store(struct class *class,
|
||||
* they may be undone on its behalf too.
|
||||
*/
|
||||
|
||||
status = gpiod_request(desc, "sysfs");
|
||||
if (status) {
|
||||
if (status == -EPROBE_DEFER)
|
||||
status = -ENODEV;
|
||||
status = gpiod_request_user(desc, "sysfs");
|
||||
if (status)
|
||||
goto done;
|
||||
}
|
||||
|
||||
status = gpiod_set_transitory(desc, false);
|
||||
if (!status) {
|
||||
|
@ -135,6 +135,18 @@ struct gpio_desc {
|
||||
|
||||
int gpiod_request(struct gpio_desc *desc, const char *label);
|
||||
void gpiod_free(struct gpio_desc *desc);
|
||||
|
||||
static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = gpiod_request(desc, label);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
ret = -ENODEV;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
|
||||
unsigned long lflags, enum gpiod_flags dflags);
|
||||
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
|
||||
|
@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
|
||||
adev->gfx.config.max_sh_per_se *
|
||||
adev->gfx.config.max_shader_engines);
|
||||
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(10, 3, 1):
|
||||
case IP_VERSION(10, 3, 3):
|
||||
/* Get SA disabled bitmap from eFuse setting */
|
||||
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
|
||||
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
|
||||
@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
|
||||
disabled_sa = tmp;
|
||||
|
||||
WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3653,7 +3653,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
|
||||
|
||||
/* Use GRPH_PFLIP interrupt */
|
||||
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
|
||||
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
|
||||
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
|
||||
i++) {
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
|
||||
if (r) {
|
||||
|
@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||||
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
|
||||
if (result == VBIOSSMC_Result_Failed) {
|
||||
ASSERT(0);
|
||||
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
|
||||
param == TABLE_WATERMARKS)
|
||||
DC_LOG_WARNING("Watermarks table not configured properly by SMU");
|
||||
else
|
||||
ASSERT(0);
|
||||
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1220,6 +1220,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||
|
||||
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
|
||||
|
||||
dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
|
||||
|
||||
if (dc->res_pool->dmcu != NULL)
|
||||
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
|
||||
}
|
||||
|
@ -202,6 +202,7 @@ struct dc_caps {
|
||||
bool edp_dsc_support;
|
||||
bool vbios_lttpr_aware;
|
||||
bool vbios_lttpr_enable;
|
||||
uint32_t max_otg_num;
|
||||
};
|
||||
|
||||
struct dc_bug_wa {
|
||||
|
@ -1834,9 +1834,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
|
||||
break;
|
||||
}
|
||||
}
|
||||
// We are trying to enable eDP, don't power down VDD
|
||||
if (can_apply_edp_fast_boot)
|
||||
|
||||
/*
|
||||
* TO-DO: So far the code logic below only addresses single eDP case.
|
||||
* For dual eDP case, there are a few things that need to be
|
||||
* implemented first:
|
||||
*
|
||||
* 1. Change the fastboot logic above, so eDP link[0 or 1]'s
|
||||
* stream[0 or 1] will all be checked.
|
||||
*
|
||||
* 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
|
||||
* for each eDP.
|
||||
*
|
||||
* Once above 2 things are completed, we can then change the logic below
|
||||
* correspondingly, so dual eDP case will be fully covered.
|
||||
*/
|
||||
|
||||
// We are trying to enable eDP, don't power down VDD if eDP stream is existing
|
||||
if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
|
||||
keep_edp_vdd_on = true;
|
||||
DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
|
||||
} else {
|
||||
DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Check seamless boot support
|
||||
|
@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
|
||||
ret_val = wm_ns * refclk_mhz;
|
||||
ret_val /= 1000;
|
||||
|
||||
if (ret_val > clamp_value)
|
||||
if (ret_val > clamp_value) {
|
||||
/* clamping WMs is abnormal, unexpected and may lead to underflow*/
|
||||
ASSERT(0);
|
||||
ret_val = clamp_value;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
|
||||
if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
|
||||
hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
|
||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
|
||||
|
||||
@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
|
||||
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
|
||||
hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
|
||||
} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
|
||||
@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
|
||||
if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
|
||||
hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
|
||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
|
||||
|
||||
@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
|
||||
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
|
||||
hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
|
||||
} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
|
||||
@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
|
||||
if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
|
||||
hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
|
||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
|
||||
|
||||
@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
|
||||
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
|
||||
hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
|
||||
} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
|
||||
@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
|
||||
if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
|
||||
hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
|
||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
|
||||
|
||||
@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
|
||||
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
|
||||
hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
|
||||
} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
|
||||
@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
|
||||
@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->a.cstate_pstate.cstate_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
|
||||
@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
|
||||
@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->a.cstate_pstate.cstate_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.cstate_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
|
||||
@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
|
||||
@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->b.cstate_pstate.cstate_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
|
||||
@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
|
||||
@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->b.cstate_pstate.cstate_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.cstate_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
|
||||
@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
|
||||
@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->c.cstate_pstate.cstate_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
|
||||
@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
|
||||
@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->c.cstate_pstate.cstate_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.cstate_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
|
||||
@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
|
||||
@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->d.cstate_pstate.cstate_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
|
||||
@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
|
||||
@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
|
||||
watermarks->d.cstate_pstate.cstate_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.cstate_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
|
||||
@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
|
||||
watermarks->a.cstate_pstate.pstate_change_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
|
||||
@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
|
||||
watermarks->b.cstate_pstate.pstate_change_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
|
||||
@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
|
||||
watermarks->c.cstate_pstate.pstate_change_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
|
||||
@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
|
||||
watermarks->d.cstate_pstate.pstate_change_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
|
||||
|
@ -3462,8 +3462,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_label.dev_attr.attr))
|
||||
attr == &sensor_dev_attr_power2_label.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
return effective_mode;
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/math64.h>
|
||||
@ -196,12 +197,9 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
|
||||
/*
|
||||
* ui2bc - UI time periods to byte clock cycles
|
||||
*/
|
||||
static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
|
||||
static u32 ui2bc(unsigned int ui)
|
||||
{
|
||||
u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
|
||||
|
||||
return DIV64_U64_ROUND_UP(ui * dsi->lanes,
|
||||
dsi->mode.clock * 1000 * bpp);
|
||||
return DIV_ROUND_UP(ui, BITS_PER_BYTE);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -232,12 +230,12 @@ static int nwl_dsi_config_host(struct nwl_dsi *dsi)
|
||||
}
|
||||
|
||||
/* values in byte clock cycles */
|
||||
cycles = ui2bc(dsi, cfg->clk_pre);
|
||||
cycles = ui2bc(cfg->clk_pre);
|
||||
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
|
||||
nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
|
||||
cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
|
||||
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
|
||||
cycles += ui2bc(dsi, cfg->clk_pre);
|
||||
cycles += ui2bc(cfg->clk_pre);
|
||||
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
|
||||
nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
|
||||
cycles = ps2bc(dsi, cfg->hs_exit);
|
||||
|
@ -269,7 +269,7 @@ EXPORT_SYMBOL(drm_privacy_screen_get_state);
|
||||
*
|
||||
* The notifier is called with no locks held. The new hw_state and sw_state
|
||||
* can be retrieved using the drm_privacy_screen_get_state() function.
|
||||
* A pointer to the drm_privacy_screen's struct is passed as the void *data
|
||||
* A pointer to the drm_privacy_screen's struct is passed as the ``void *data``
|
||||
* argument of the notifier_block's notifier_call.
|
||||
*
|
||||
* The notifier will NOT be called when changes are made through
|
||||
|
@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
vlv_wm_sanitize(dev_priv);
|
||||
} else if (DISPLAY_VER(dev_priv) >= 9) {
|
||||
skl_wm_get_hw_state(dev_priv);
|
||||
skl_wm_sanitize(dev_priv);
|
||||
} else if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
ilk_wm_get_hw_state(dev_priv);
|
||||
}
|
||||
|
@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector,
|
||||
struct drm_display_mode *fixed_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_encoder *encoder = connector->encoder;
|
||||
struct drm_display_mode *downclock_mode = NULL;
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
|
||||
@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
|
||||
encoder->port != PORT_A) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"DRRS only supported on eDP port A\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
|
||||
drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
|
||||
return NULL;
|
||||
|
@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
|
||||
|
||||
if (!IS_ERR(fence))
|
||||
goto out;
|
||||
} else if (move_deps) {
|
||||
int err = i915_deps_sync(move_deps, ctx);
|
||||
} else {
|
||||
int err = PTR_ERR(fence);
|
||||
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
|
||||
return fence;
|
||||
|
||||
if (move_deps) {
|
||||
err = i915_deps_sync(move_deps, ctx);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
|
||||
/* Error intercept failed or no accelerated migration to start with */
|
||||
|
@ -6,6 +6,7 @@
|
||||
#ifndef __I915_MM_H__
|
||||
#define __I915_MM_H__
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct vm_area_struct;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user