Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Trivial conflict in CAN, keep the net-next + the byteswap wrapper. Conflicts: drivers/net/can/usb/gs_usb.c Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
5c39f26e67
1
.mailmap
1
.mailmap
@ -290,6 +290,7 @@ Santosh Shilimkar <ssantosh@kernel.org>
|
|||||||
Sarangdhar Joshi <spjoshi@codeaurora.org>
|
Sarangdhar Joshi <spjoshi@codeaurora.org>
|
||||||
Sascha Hauer <s.hauer@pengutronix.de>
|
Sascha Hauer <s.hauer@pengutronix.de>
|
||||||
S.Çağlar Onur <caglar@pardus.org.tr>
|
S.Çağlar Onur <caglar@pardus.org.tr>
|
||||||
|
Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
|
||||||
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
|
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
|
||||||
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
|
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
|
||||||
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
|
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
|
||||||
|
@ -109,30 +109,6 @@ Description:
|
|||||||
When counting down the counter start from preset value
|
When counting down the counter start from preset value
|
||||||
and fire event when reach 0.
|
and fire event when reach 0.
|
||||||
|
|
||||||
What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
|
|
||||||
KernelVersion: 4.12
|
|
||||||
Contact: benjamin.gaignard@st.com
|
|
||||||
Description:
|
|
||||||
Reading returns the list possible quadrature modes.
|
|
||||||
|
|
||||||
What: /sys/bus/iio/devices/iio:deviceX/in_count0_quadrature_mode
|
|
||||||
KernelVersion: 4.12
|
|
||||||
Contact: benjamin.gaignard@st.com
|
|
||||||
Description:
|
|
||||||
Configure the device counter quadrature modes:
|
|
||||||
|
|
||||||
channel_A:
|
|
||||||
Encoder A input servers as the count input and B as
|
|
||||||
the UP/DOWN direction control input.
|
|
||||||
|
|
||||||
channel_B:
|
|
||||||
Encoder B input serves as the count input and A as
|
|
||||||
the UP/DOWN direction control input.
|
|
||||||
|
|
||||||
quadrature:
|
|
||||||
Encoder A and B inputs are mixed to get direction
|
|
||||||
and count with a scale of 0.25.
|
|
||||||
|
|
||||||
What: /sys/bus/iio/devices/iio:deviceX/in_count_enable_mode_available
|
What: /sys/bus/iio/devices/iio:deviceX/in_count_enable_mode_available
|
||||||
KernelVersion: 4.12
|
KernelVersion: 4.12
|
||||||
Contact: benjamin.gaignard@st.com
|
Contact: benjamin.gaignard@st.com
|
||||||
|
@ -76,6 +76,12 @@ properties:
|
|||||||
resets:
|
resets:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
|
wifi-2.4ghz-coexistence:
|
||||||
|
type: boolean
|
||||||
|
description: >
|
||||||
|
Should the pixel frequencies in the WiFi frequencies range be
|
||||||
|
avoided?
|
||||||
|
|
||||||
required:
|
required:
|
||||||
- compatible
|
- compatible
|
||||||
- reg
|
- reg
|
||||||
|
@ -8,10 +8,16 @@ Required properties:
|
|||||||
|
|
||||||
- reg : The I2C address of the device.
|
- reg : The I2C address of the device.
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
|
||||||
|
- realtek,power-up-delay-ms
|
||||||
|
Set a delay time for flush work to be completed,
|
||||||
|
this value is adjustable depending on platform.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
rt1015: codec@28 {
|
rt1015: codec@28 {
|
||||||
compatible = "realtek,rt1015";
|
compatible = "realtek,rt1015";
|
||||||
reg = <0x28>;
|
reg = <0x28>;
|
||||||
|
realtek,power-up-delay-ms = <50>;
|
||||||
};
|
};
|
||||||
|
@ -149,11 +149,11 @@ vidtv_psi.[ch]
|
|||||||
Because the generator is implemented in a separate file, it can be
|
Because the generator is implemented in a separate file, it can be
|
||||||
reused elsewhere in the media subsystem.
|
reused elsewhere in the media subsystem.
|
||||||
|
|
||||||
Currently vidtv supports working with 3 PSI tables: PAT, PMT and
|
Currently vidtv supports working with 5 PSI tables: PAT, PMT,
|
||||||
SDT.
|
SDT, NIT and EIT.
|
||||||
|
|
||||||
The specification for PAT and PMT can be found in *ISO 13818-1:
|
The specification for PAT and PMT can be found in *ISO 13818-1:
|
||||||
Systems*, while the specification for the SDT can be found in *ETSI
|
Systems*, while the specification for the SDT, NIT, EIT can be found in *ETSI
|
||||||
EN 300 468: Specification for Service Information (SI) in DVB
|
EN 300 468: Specification for Service Information (SI) in DVB
|
||||||
systems*.
|
systems*.
|
||||||
|
|
||||||
@ -197,6 +197,8 @@ vidtv_channel.[ch]
|
|||||||
|
|
||||||
#. Their programs will be concatenated to populate the PAT
|
#. Their programs will be concatenated to populate the PAT
|
||||||
|
|
||||||
|
#. Their events will be concatenated to populate the EIT
|
||||||
|
|
||||||
#. For each program in the PAT, a PMT section will be created
|
#. For each program in the PAT, a PMT section will be created
|
||||||
|
|
||||||
#. The PMT section for a channel will be assigned its streams.
|
#. The PMT section for a channel will be assigned its streams.
|
||||||
@ -256,6 +258,42 @@ Using dvb-fe-tool
|
|||||||
The first step to check whether the demod loaded successfully is to run::
|
The first step to check whether the demod loaded successfully is to run::
|
||||||
|
|
||||||
$ dvb-fe-tool
|
$ dvb-fe-tool
|
||||||
|
Device Dummy demod for DVB-T/T2/C/S/S2 (/dev/dvb/adapter0/frontend0) capabilities:
|
||||||
|
CAN_FEC_1_2
|
||||||
|
CAN_FEC_2_3
|
||||||
|
CAN_FEC_3_4
|
||||||
|
CAN_FEC_4_5
|
||||||
|
CAN_FEC_5_6
|
||||||
|
CAN_FEC_6_7
|
||||||
|
CAN_FEC_7_8
|
||||||
|
CAN_FEC_8_9
|
||||||
|
CAN_FEC_AUTO
|
||||||
|
CAN_GUARD_INTERVAL_AUTO
|
||||||
|
CAN_HIERARCHY_AUTO
|
||||||
|
CAN_INVERSION_AUTO
|
||||||
|
CAN_QAM_16
|
||||||
|
CAN_QAM_32
|
||||||
|
CAN_QAM_64
|
||||||
|
CAN_QAM_128
|
||||||
|
CAN_QAM_256
|
||||||
|
CAN_QAM_AUTO
|
||||||
|
CAN_QPSK
|
||||||
|
CAN_TRANSMISSION_MODE_AUTO
|
||||||
|
DVB API Version 5.11, Current v5 delivery system: DVBC/ANNEX_A
|
||||||
|
Supported delivery systems:
|
||||||
|
DVBT
|
||||||
|
DVBT2
|
||||||
|
[DVBC/ANNEX_A]
|
||||||
|
DVBS
|
||||||
|
DVBS2
|
||||||
|
Frequency range for the current standard:
|
||||||
|
From: 51.0 MHz
|
||||||
|
To: 2.15 GHz
|
||||||
|
Step: 62.5 kHz
|
||||||
|
Tolerance: 29.5 MHz
|
||||||
|
Symbol rate ranges for the current standard:
|
||||||
|
From: 1.00 MBauds
|
||||||
|
To: 45.0 MBauds
|
||||||
|
|
||||||
This should return what is currently set up at the demod struct, i.e.::
|
This should return what is currently set up at the demod struct, i.e.::
|
||||||
|
|
||||||
@ -314,7 +352,7 @@ For this, one should provide a configuration file known as a 'scan file',
|
|||||||
here's an example::
|
here's an example::
|
||||||
|
|
||||||
[Channel]
|
[Channel]
|
||||||
FREQUENCY = 330000000
|
FREQUENCY = 474000000
|
||||||
MODULATION = QAM/AUTO
|
MODULATION = QAM/AUTO
|
||||||
SYMBOL_RATE = 6940000
|
SYMBOL_RATE = 6940000
|
||||||
INNER_FEC = AUTO
|
INNER_FEC = AUTO
|
||||||
@ -335,6 +373,14 @@ You can browse scan tables online here: `dvb-scan-tables
|
|||||||
Assuming this channel is named 'channel.conf', you can then run::
|
Assuming this channel is named 'channel.conf', you can then run::
|
||||||
|
|
||||||
$ dvbv5-scan channel.conf
|
$ dvbv5-scan channel.conf
|
||||||
|
dvbv5-scan ~/vidtv.conf
|
||||||
|
ERROR command BANDWIDTH_HZ (5) not found during retrieve
|
||||||
|
Cannot calc frequency shift. Either bandwidth/symbol-rate is unavailable (yet).
|
||||||
|
Scanning frequency #1 330000000
|
||||||
|
(0x00) Signal= -68.00dBm
|
||||||
|
Scanning frequency #2 474000000
|
||||||
|
Lock (0x1f) Signal= -34.45dBm C/N= 33.74dB UCB= 0
|
||||||
|
Service Beethoven, provider LinuxTV.org: digital television
|
||||||
|
|
||||||
For more information on dvb-scan, check its documentation online here:
|
For more information on dvb-scan, check its documentation online here:
|
||||||
`dvb-scan Documentation <https://www.linuxtv.org/wiki/index.php/Dvbscan>`_.
|
`dvb-scan Documentation <https://www.linuxtv.org/wiki/index.php/Dvbscan>`_.
|
||||||
@ -344,23 +390,38 @@ Using dvb-zap
|
|||||||
|
|
||||||
dvbv5-zap is a command line tool that can be used to record MPEG-TS to disk. The
|
dvbv5-zap is a command line tool that can be used to record MPEG-TS to disk. The
|
||||||
typical use is to tune into a channel and put it into record mode. The example
|
typical use is to tune into a channel and put it into record mode. The example
|
||||||
below - which is taken from the documentation - illustrates that::
|
below - which is taken from the documentation - illustrates that\ [1]_::
|
||||||
|
|
||||||
$ dvbv5-zap -c dvb_channel.conf "trilhas sonoras" -r
|
$ dvbv5-zap -c dvb_channel.conf "beethoven" -o music.ts -P -t 10
|
||||||
using demux '/dev/dvb/adapter0/demux0'
|
using demux 'dvb0.demux0'
|
||||||
reading channels from file 'dvb_channel.conf'
|
reading channels from file 'dvb_channel.conf'
|
||||||
service has pid type 05: 204
|
tuning to 474000000 Hz
|
||||||
tuning to 573000000 Hz
|
pass all PID's to TS
|
||||||
audio pid 104
|
dvb_set_pesfilter 8192
|
||||||
dvb_set_pesfilter 104
|
dvb_dev_set_bufsize: buffer set to 6160384
|
||||||
Lock (0x1f) Quality= Good Signal= 100.00% C/N= -13.80dB UCB= 70 postBER= 3.14x10^-3 PER= 0
|
Lock (0x1f) Quality= Good Signal= -34.66dBm C/N= 33.41dB UCB= 0 postBER= 0 preBER= 1.05x10^-3 PER= 0
|
||||||
DVR interface '/dev/dvb/adapter0/dvr0' can now be opened
|
Lock (0x1f) Quality= Good Signal= -34.57dBm C/N= 33.46dB UCB= 0 postBER= 0 preBER= 1.05x10^-3 PER= 0
|
||||||
|
Record to file 'music.ts' started
|
||||||
|
received 24587768 bytes (2401 Kbytes/sec)
|
||||||
|
Lock (0x1f) Quality= Good Signal= -34.42dBm C/N= 33.89dB UCB= 0 postBER= 0 preBER= 2.44x10^-3 PER= 0
|
||||||
|
|
||||||
The channel can be watched by playing the contents of the DVR interface, with
|
.. [1] In this example, it records 10 seconds with all program ID's stored
|
||||||
some player that recognizes the MPEG-TS format, such as *mplayer* or *vlc*.
|
at the music.ts file.
|
||||||
|
|
||||||
|
|
||||||
|
The channel can be watched by playing the contents of the stream with some
|
||||||
|
player that recognizes the MPEG-TS format, such as ``mplayer`` or ``vlc``.
|
||||||
|
|
||||||
By playing the contents of the stream one can visually inspect the workings of
|
By playing the contents of the stream one can visually inspect the workings of
|
||||||
vidtv, e.g.::
|
vidtv, e.g., to play a recorded TS file with::
|
||||||
|
|
||||||
|
$ mplayer music.ts
|
||||||
|
|
||||||
|
or, alternatively, running this command on one terminal::
|
||||||
|
|
||||||
|
$ dvbv5-zap -c dvb_channel.conf "beethoven" -P -r &
|
||||||
|
|
||||||
|
And, on a second terminal, playing the contents from DVR interface with::
|
||||||
|
|
||||||
$ mplayer /dev/dvb/adapter0/dvr0
|
$ mplayer /dev/dvb/adapter0/dvr0
|
||||||
|
|
||||||
@ -423,3 +484,30 @@ A nice addition is to simulate some noise when the signal quality is bad by:
|
|||||||
- Updating the error statistics accordingly (e.g. BER, etc).
|
- Updating the error statistics accordingly (e.g. BER, etc).
|
||||||
|
|
||||||
- Simulating some noise in the encoded data.
|
- Simulating some noise in the encoded data.
|
||||||
|
|
||||||
|
Functions and structs used within vidtv
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_bridge.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_channel.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_demod.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_encoder.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_mux.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_pes.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_psi.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_s302m.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_ts.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_tuner.h
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_common.c
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_tuner.c
|
||||||
|
@ -254,6 +254,32 @@ you will have done run-time testing specific to your change, but at a
|
|||||||
minimum, your changes should survive an ``allyesconfig`` and an
|
minimum, your changes should survive an ``allyesconfig`` and an
|
||||||
``allmodconfig`` build without new warnings or failures.
|
``allmodconfig`` build without new warnings or failures.
|
||||||
|
|
||||||
|
Q: How do I post corresponding changes to user space components?
|
||||||
|
----------------------------------------------------------------
|
||||||
|
A: User space code exercising kernel features should be posted
|
||||||
|
alongside kernel patches. This gives reviewers a chance to see
|
||||||
|
how any new interface is used and how well it works.
|
||||||
|
|
||||||
|
When user space tools reside in the kernel repo itself all changes
|
||||||
|
should generally come as one series. If series becomes too large
|
||||||
|
or the user space project is not reviewed on netdev include a link
|
||||||
|
to a public repo where user space patches can be seen.
|
||||||
|
|
||||||
|
In case user space tooling lives in a separate repository but is
|
||||||
|
reviewed on netdev (e.g. patches to `iproute2` tools) kernel and
|
||||||
|
user space patches should form separate series (threads) when posted
|
||||||
|
to the mailing list, e.g.::
|
||||||
|
|
||||||
|
[PATCH net-next 0/3] net: some feature cover letter
|
||||||
|
└─ [PATCH net-next 1/3] net: some feature prep
|
||||||
|
└─ [PATCH net-next 2/3] net: some feature do it
|
||||||
|
└─ [PATCH net-next 3/3] selftest: net: some feature
|
||||||
|
|
||||||
|
[PATCH iproute2-next] ip: add support for some feature
|
||||||
|
|
||||||
|
Posting as one thread is discouraged because it confuses patchwork
|
||||||
|
(as of patchwork 2.2.2).
|
||||||
|
|
||||||
Q: Any other tips to help ensure my net/net-next patch gets OK'd?
|
Q: Any other tips to help ensure my net/net-next patch gets OK'd?
|
||||||
-----------------------------------------------------------------
|
-----------------------------------------------------------------
|
||||||
A: Attention to detail. Re-read your own work as if you were the
|
A: Attention to detail. Re-read your own work as if you were the
|
||||||
|
19
MAINTAINERS
19
MAINTAINERS
@ -1995,7 +1995,6 @@ N: lpc18xx
|
|||||||
|
|
||||||
ARM/LPC32XX SOC SUPPORT
|
ARM/LPC32XX SOC SUPPORT
|
||||||
M: Vladimir Zapolskiy <vz@mleia.com>
|
M: Vladimir Zapolskiy <vz@mleia.com>
|
||||||
M: Sylvain Lemieux <slemieux.tyco@gmail.com>
|
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://github.com/vzapolskiy/linux-lpc32xx.git
|
T: git git://github.com/vzapolskiy/linux-lpc32xx.git
|
||||||
@ -3528,11 +3527,12 @@ BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
|
|||||||
M: Arend van Spriel <arend.vanspriel@broadcom.com>
|
M: Arend van Spriel <arend.vanspriel@broadcom.com>
|
||||||
M: Franky Lin <franky.lin@broadcom.com>
|
M: Franky Lin <franky.lin@broadcom.com>
|
||||||
M: Hante Meuleman <hante.meuleman@broadcom.com>
|
M: Hante Meuleman <hante.meuleman@broadcom.com>
|
||||||
M: Chi-Hsien Lin <chi-hsien.lin@cypress.com>
|
M: Chi-hsien Lin <chi-hsien.lin@infineon.com>
|
||||||
M: Wright Feng <wright.feng@cypress.com>
|
M: Wright Feng <wright.feng@infineon.com>
|
||||||
|
M: Chung-hsien Hsu <chung-hsien.hsu@infineon.com>
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
L: brcm80211-dev-list.pdl@broadcom.com
|
L: brcm80211-dev-list.pdl@broadcom.com
|
||||||
L: brcm80211-dev-list@cypress.com
|
L: SHA-cyfmac-dev-list@infineon.com
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/wireless/broadcom/brcm80211/
|
F: drivers/net/wireless/broadcom/brcm80211/
|
||||||
|
|
||||||
@ -9155,6 +9155,7 @@ F: include/linux/iomap.h
|
|||||||
|
|
||||||
IOMMU DRIVERS
|
IOMMU DRIVERS
|
||||||
M: Joerg Roedel <joro@8bytes.org>
|
M: Joerg Roedel <joro@8bytes.org>
|
||||||
|
M: Will Deacon <will@kernel.org>
|
||||||
L: iommu@lists.linux-foundation.org
|
L: iommu@lists.linux-foundation.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
||||||
@ -9638,6 +9639,7 @@ F: Documentation/virt/kvm/s390*
|
|||||||
F: arch/s390/include/asm/gmap.h
|
F: arch/s390/include/asm/gmap.h
|
||||||
F: arch/s390/include/asm/kvm*
|
F: arch/s390/include/asm/kvm*
|
||||||
F: arch/s390/include/uapi/asm/kvm*
|
F: arch/s390/include/uapi/asm/kvm*
|
||||||
|
F: arch/s390/kernel/uv.c
|
||||||
F: arch/s390/kvm/
|
F: arch/s390/kvm/
|
||||||
F: arch/s390/mm/gmap.c
|
F: arch/s390/mm/gmap.c
|
||||||
F: tools/testing/selftests/kvm/*/s390x/
|
F: tools/testing/selftests/kvm/*/s390x/
|
||||||
@ -13156,7 +13158,9 @@ M: Jesper Dangaard Brouer <hawk@kernel.org>
|
|||||||
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
|
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
F: Documentation/networking/page_pool.rst
|
||||||
F: include/net/page_pool.h
|
F: include/net/page_pool.h
|
||||||
|
F: include/trace/events/page_pool.h
|
||||||
F: net/core/page_pool.c
|
F: net/core/page_pool.c
|
||||||
|
|
||||||
PANASONIC LAPTOP ACPI EXTRAS DRIVER
|
PANASONIC LAPTOP ACPI EXTRAS DRIVER
|
||||||
@ -14798,7 +14802,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
|
|||||||
F: drivers/net/wireless/realtek/rtlwifi/
|
F: drivers/net/wireless/realtek/rtlwifi/
|
||||||
|
|
||||||
REALTEK WIRELESS DRIVER (rtw88)
|
REALTEK WIRELESS DRIVER (rtw88)
|
||||||
M: Yan-Hsuan Chuang <yhchuang@realtek.com>
|
M: Yan-Hsuan Chuang <tony0620emma@gmail.com>
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/net/wireless/realtek/rtw88/
|
F: drivers/net/wireless/realtek/rtw88/
|
||||||
@ -15771,9 +15775,8 @@ F: drivers/slimbus/
|
|||||||
F: include/linux/slimbus.h
|
F: include/linux/slimbus.h
|
||||||
|
|
||||||
SFC NETWORK DRIVER
|
SFC NETWORK DRIVER
|
||||||
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
|
M: Edward Cree <ecree.xilinx@gmail.com>
|
||||||
M: Edward Cree <ecree@solarflare.com>
|
M: Martin Habets <habetsm.xilinx@gmail.com>
|
||||||
M: Martin Habets <mhabets@solarflare.com>
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/sfc/
|
F: drivers/net/ethernet/sfc/
|
||||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Kleptomaniac Octopus
|
NAME = Kleptomaniac Octopus
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -243,10 +243,8 @@ static inline int constant_fls(unsigned int x)
|
|||||||
x <<= 2;
|
x <<= 2;
|
||||||
r -= 2;
|
r -= 2;
|
||||||
}
|
}
|
||||||
if (!(x & 0x80000000u)) {
|
if (!(x & 0x80000000u))
|
||||||
x <<= 1;
|
|
||||||
r -= 1;
|
r -= 1;
|
||||||
}
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,8 +134,10 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_ARC_HAS_PAE40
|
#ifdef CONFIG_ARC_HAS_PAE40
|
||||||
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
|
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||||
#else
|
#else
|
||||||
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
|
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
|
@ -38,15 +38,27 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_ARC_DW2_UNWIND
|
#ifdef CONFIG_ARC_DW2_UNWIND
|
||||||
|
|
||||||
static void seed_unwind_frame_info(struct task_struct *tsk,
|
static int
|
||||||
struct pt_regs *regs,
|
seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
|
||||||
struct unwind_frame_info *frame_info)
|
struct unwind_frame_info *frame_info)
|
||||||
{
|
{
|
||||||
/*
|
if (regs) {
|
||||||
* synchronous unwinding (e.g. dump_stack)
|
/*
|
||||||
* - uses current values of SP and friends
|
* Asynchronous unwinding of intr/exception
|
||||||
*/
|
* - Just uses the pt_regs passed
|
||||||
if (tsk == NULL && regs == NULL) {
|
*/
|
||||||
|
frame_info->task = tsk;
|
||||||
|
|
||||||
|
frame_info->regs.r27 = regs->fp;
|
||||||
|
frame_info->regs.r28 = regs->sp;
|
||||||
|
frame_info->regs.r31 = regs->blink;
|
||||||
|
frame_info->regs.r63 = regs->ret;
|
||||||
|
frame_info->call_frame = 0;
|
||||||
|
} else if (tsk == NULL || tsk == current) {
|
||||||
|
/*
|
||||||
|
* synchronous unwinding (e.g. dump_stack)
|
||||||
|
* - uses current values of SP and friends
|
||||||
|
*/
|
||||||
unsigned long fp, sp, blink, ret;
|
unsigned long fp, sp, blink, ret;
|
||||||
frame_info->task = current;
|
frame_info->task = current;
|
||||||
|
|
||||||
@ -63,13 +75,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
|
|||||||
frame_info->regs.r31 = blink;
|
frame_info->regs.r31 = blink;
|
||||||
frame_info->regs.r63 = ret;
|
frame_info->regs.r63 = ret;
|
||||||
frame_info->call_frame = 0;
|
frame_info->call_frame = 0;
|
||||||
} else if (regs == NULL) {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Asynchronous unwinding of sleeping task
|
* Asynchronous unwinding of a likely sleeping task
|
||||||
* - Gets SP etc from task's pt_regs (saved bottom of kernel
|
* - first ensure it is actually sleeping
|
||||||
* mode stack of task)
|
* - if so, it will be in __switch_to, kernel mode SP of task
|
||||||
|
* is safe-kept and BLINK at a well known location in there
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
if (tsk->state == TASK_RUNNING)
|
||||||
|
return -1;
|
||||||
|
|
||||||
frame_info->task = tsk;
|
frame_info->task = tsk;
|
||||||
|
|
||||||
frame_info->regs.r27 = TSK_K_FP(tsk);
|
frame_info->regs.r27 = TSK_K_FP(tsk);
|
||||||
@ -90,19 +106,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
|
|||||||
frame_info->regs.r28 += 60;
|
frame_info->regs.r28 += 60;
|
||||||
frame_info->call_frame = 0;
|
frame_info->call_frame = 0;
|
||||||
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Asynchronous unwinding of intr/exception
|
|
||||||
* - Just uses the pt_regs passed
|
|
||||||
*/
|
|
||||||
frame_info->task = tsk;
|
|
||||||
|
|
||||||
frame_info->regs.r27 = regs->fp;
|
|
||||||
frame_info->regs.r28 = regs->sp;
|
|
||||||
frame_info->regs.r31 = regs->blink;
|
|
||||||
frame_info->regs.r63 = regs->ret;
|
|
||||||
frame_info->call_frame = 0;
|
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -116,7 +121,8 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
|||||||
unsigned int address;
|
unsigned int address;
|
||||||
struct unwind_frame_info frame_info;
|
struct unwind_frame_info frame_info;
|
||||||
|
|
||||||
seed_unwind_frame_info(tsk, regs, &frame_info);
|
if (seed_unwind_frame_info(tsk, regs, &frame_info))
|
||||||
|
return 0;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
address = UNW_PC(&frame_info);
|
address = UNW_PC(&frame_info);
|
||||||
|
@ -30,14 +30,14 @@
|
|||||||
* -Changes related to MMU v2 (Rel 4.8)
|
* -Changes related to MMU v2 (Rel 4.8)
|
||||||
*
|
*
|
||||||
* Vineetg: Aug 29th 2008
|
* Vineetg: Aug 29th 2008
|
||||||
* -In TLB Flush operations (Metal Fix MMU) there is a explict command to
|
* -In TLB Flush operations (Metal Fix MMU) there is a explicit command to
|
||||||
* flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
|
* flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
|
||||||
* it fails. Thus need to load it with ANY valid value before invoking
|
* it fails. Thus need to load it with ANY valid value before invoking
|
||||||
* TLBIVUTLB cmd
|
* TLBIVUTLB cmd
|
||||||
*
|
*
|
||||||
* Vineetg: Aug 21th 2008:
|
* Vineetg: Aug 21th 2008:
|
||||||
* -Reduced the duration of IRQ lockouts in TLB Flush routines
|
* -Reduced the duration of IRQ lockouts in TLB Flush routines
|
||||||
* -Multiple copies of TLB erase code seperated into a "single" function
|
* -Multiple copies of TLB erase code separated into a "single" function
|
||||||
* -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
|
* -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
|
||||||
* in interrupt-safe region.
|
* in interrupt-safe region.
|
||||||
*
|
*
|
||||||
@ -66,7 +66,7 @@
|
|||||||
*
|
*
|
||||||
* Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
|
* Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
|
||||||
* much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
|
* much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
|
||||||
* Given this, the thrasing problem should never happen because once the 3
|
* Given this, the thrashing problem should never happen because once the 3
|
||||||
* J-TLB entries are created (even though 3rd will knock out one of the prev
|
* J-TLB entries are created (even though 3rd will knock out one of the prev
|
||||||
* two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
|
* two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
|
||||||
*
|
*
|
||||||
@ -127,7 +127,7 @@ static void utlb_invalidate(void)
|
|||||||
* There was however an obscure hardware bug, where uTLB flush would
|
* There was however an obscure hardware bug, where uTLB flush would
|
||||||
* fail when a prior probe for J-TLB (both totally unrelated) would
|
* fail when a prior probe for J-TLB (both totally unrelated) would
|
||||||
* return lkup err - because the entry didn't exist in MMU.
|
* return lkup err - because the entry didn't exist in MMU.
|
||||||
* The Workround was to set Index reg with some valid value, prior to
|
* The Workaround was to set Index reg with some valid value, prior to
|
||||||
* flush. This was fixed in MMU v3
|
* flush. This was fixed in MMU v3
|
||||||
*/
|
*/
|
||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
@ -272,7 +272,7 @@ noinline void local_flush_tlb_all(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush the entrie MM for userland. The fastest way is to move to Next ASID
|
* Flush the entire MM for userland. The fastest way is to move to Next ASID
|
||||||
*/
|
*/
|
||||||
noinline void local_flush_tlb_mm(struct mm_struct *mm)
|
noinline void local_flush_tlb_mm(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
@ -303,7 +303,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
|
|||||||
* Difference between this and Kernel Range Flush is
|
* Difference between this and Kernel Range Flush is
|
||||||
* -Here the fastest way (if range is too large) is to move to next ASID
|
* -Here the fastest way (if range is too large) is to move to next ASID
|
||||||
* without doing any explicit Shootdown
|
* without doing any explicit Shootdown
|
||||||
* -In case of kernel Flush, entry has to be shot down explictly
|
* -In case of kernel Flush, entry has to be shot down explicitly
|
||||||
*/
|
*/
|
||||||
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
@ -620,7 +620,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
|
|||||||
* Super Page size is configurable in hardware (4K to 16M), but fixed once
|
* Super Page size is configurable in hardware (4K to 16M), but fixed once
|
||||||
* RTL builds.
|
* RTL builds.
|
||||||
*
|
*
|
||||||
* The exact THP size a Linx configuration will support is a function of:
|
* The exact THP size a Linux configuration will support is a function of:
|
||||||
* - MMU page size (typical 8K, RTL fixed)
|
* - MMU page size (typical 8K, RTL fixed)
|
||||||
* - software page walker address split between PGD:PTE:PFN (typical
|
* - software page walker address split between PGD:PTE:PFN (typical
|
||||||
* 11:8:13, but can be changed with 1 line)
|
* 11:8:13, but can be changed with 1 line)
|
||||||
@ -698,7 +698,7 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Read the Cache Build Confuration Registers, Decode them and save into
|
/* Read the Cache Build Configuration Registers, Decode them and save into
|
||||||
* the cpuinfo structure for later use.
|
* the cpuinfo structure for later use.
|
||||||
* No Validation is done here, simply read/convert the BCRs
|
* No Validation is done here, simply read/convert the BCRs
|
||||||
*/
|
*/
|
||||||
@ -803,13 +803,13 @@ void arc_mmu_init(void)
|
|||||||
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
|
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Can't be done in processor.h due to header include depenedencies
|
* Can't be done in processor.h due to header include dependencies
|
||||||
*/
|
*/
|
||||||
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
|
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* stack top size sanity check,
|
* stack top size sanity check,
|
||||||
* Can't be done in processor.h due to header include depenedencies
|
* Can't be done in processor.h due to header include dependencies
|
||||||
*/
|
*/
|
||||||
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
|
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
|
||||||
|
|
||||||
@ -881,7 +881,7 @@ void arc_mmu_init(void)
|
|||||||
* the duplicate one.
|
* the duplicate one.
|
||||||
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
|
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
|
||||||
*/
|
*/
|
||||||
volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
|
volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
|
||||||
|
|
||||||
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
|
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
@ -948,7 +948,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
|
|||||||
|
|
||||||
/***********************************************************************
|
/***********************************************************************
|
||||||
* Diagnostic Routines
|
* Diagnostic Routines
|
||||||
* -Called from Low Level TLB Hanlders if things don;t look good
|
* -Called from Low Level TLB Handlers if things don;t look good
|
||||||
**********************************************************************/
|
**********************************************************************/
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
|
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
|
||||||
|
@ -1472,6 +1472,9 @@ ENTRY(efi_enter_kernel)
|
|||||||
@ issued from HYP mode take us to the correct handler code. We
|
@ issued from HYP mode take us to the correct handler code. We
|
||||||
@ will disable the MMU before jumping to the kernel proper.
|
@ will disable the MMU before jumping to the kernel proper.
|
||||||
@
|
@
|
||||||
|
ARM( bic r1, r1, #(1 << 30) ) @ clear HSCTLR.TE
|
||||||
|
THUMB( orr r1, r1, #(1 << 30) ) @ set HSCTLR.TE
|
||||||
|
mcr p15, 4, r1, c1, c0, 0
|
||||||
adr r0, __hyp_reentry_vectors
|
adr r0, __hyp_reentry_vectors
|
||||||
mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
|
mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
|
||||||
isb
|
isb
|
||||||
|
@ -521,7 +521,7 @@
|
|||||||
ranges = <0x0 0x100000 0x8000>;
|
ranges = <0x0 0x100000 0x8000>;
|
||||||
|
|
||||||
mac_sw: switch@0 {
|
mac_sw: switch@0 {
|
||||||
compatible = "ti,am4372-cpsw","ti,cpsw-switch";
|
compatible = "ti,am4372-cpsw-switch", "ti,cpsw-switch";
|
||||||
reg = <0x0 0x4000>;
|
reg = <0x0 0x4000>;
|
||||||
ranges = <0 0 0x4000>;
|
ranges = <0 0 0x4000>;
|
||||||
clocks = <&cpsw_125mhz_gclk>, <&dpll_clksel_mac_clk>;
|
clocks = <&cpsw_125mhz_gclk>, <&dpll_clksel_mac_clk>;
|
||||||
|
@ -32,8 +32,8 @@
|
|||||||
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
|
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
|
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "int0", "int1";
|
interrupt-names = "int0", "int1";
|
||||||
clocks = <&mcan_clk>, <&l3_iclk_div>;
|
clocks = <&l3_iclk_div>, <&mcan_clk>;
|
||||||
clock-names = "cclk", "hclk";
|
clock-names = "hclk", "cclk";
|
||||||
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
|
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -75,6 +75,8 @@
|
|||||||
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
|
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
|
||||||
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
|
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
|
||||||
|
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PMD_SHIFT determines the size of the area a second-level page table can map
|
* PMD_SHIFT determines the size of the area a second-level page table can map
|
||||||
* PGDIR_SHIFT determines what a third-level page table entry can map
|
* PGDIR_SHIFT determines what a third-level page table entry can map
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
#define PTE_HWTABLE_OFF (0)
|
#define PTE_HWTABLE_OFF (0)
|
||||||
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
|
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
|
||||||
|
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PGDIR_SHIFT determines the size a top-level page table entry can map.
|
* PGDIR_SHIFT determines the size a top-level page table entry can map.
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,6 @@ config ARCH_OMAP2
|
|||||||
depends on ARCH_MULTI_V6
|
depends on ARCH_MULTI_V6
|
||||||
select ARCH_OMAP2PLUS
|
select ARCH_OMAP2PLUS
|
||||||
select CPU_V6
|
select CPU_V6
|
||||||
select PM_GENERIC_DOMAINS if PM
|
|
||||||
select SOC_HAS_OMAP2_SDRC
|
select SOC_HAS_OMAP2_SDRC
|
||||||
|
|
||||||
config ARCH_OMAP3
|
config ARCH_OMAP3
|
||||||
@ -106,6 +105,8 @@ config ARCH_OMAP2PLUS
|
|||||||
select OMAP_DM_TIMER
|
select OMAP_DM_TIMER
|
||||||
select OMAP_GPMC
|
select OMAP_GPMC
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
|
select PM_GENERIC_DOMAINS if PM
|
||||||
|
select PM_GENERIC_DOMAINS_OF if PM
|
||||||
select RESET_CONTROLLER
|
select RESET_CONTROLLER
|
||||||
select SOC_BUS
|
select SOC_BUS
|
||||||
select TI_SYSC
|
select TI_SYSC
|
||||||
|
@ -175,8 +175,11 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||||||
if (mpuss_can_lose_context) {
|
if (mpuss_can_lose_context) {
|
||||||
error = cpu_cluster_pm_enter();
|
error = cpu_cluster_pm_enter();
|
||||||
if (error) {
|
if (error) {
|
||||||
omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
|
index = 0;
|
||||||
goto cpu_cluster_pm_out;
|
cx = state_ptr + index;
|
||||||
|
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
|
||||||
|
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
|
||||||
|
mpuss_can_lose_context = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -184,7 +187,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||||||
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
|
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
|
||||||
cpu_done[dev->cpu] = true;
|
cpu_done[dev->cpu] = true;
|
||||||
|
|
||||||
cpu_cluster_pm_out:
|
|
||||||
/* Wakeup CPU1 only if it is not offlined */
|
/* Wakeup CPU1 only if it is not offlined */
|
||||||
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
|
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
|
||||||
|
|
||||||
|
@ -5,20 +5,20 @@
|
|||||||
usb {
|
usb {
|
||||||
compatible = "simple-bus";
|
compatible = "simple-bus";
|
||||||
dma-ranges;
|
dma-ranges;
|
||||||
#address-cells = <1>;
|
#address-cells = <2>;
|
||||||
#size-cells = <1>;
|
#size-cells = <2>;
|
||||||
ranges = <0x0 0x0 0x68500000 0x00400000>;
|
ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
|
||||||
|
|
||||||
usbphy0: usb-phy@0 {
|
usbphy0: usb-phy@0 {
|
||||||
compatible = "brcm,sr-usb-combo-phy";
|
compatible = "brcm,sr-usb-combo-phy";
|
||||||
reg = <0x00000000 0x100>;
|
reg = <0x0 0x00000000 0x0 0x100>;
|
||||||
#phy-cells = <1>;
|
#phy-cells = <1>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
xhci0: usb@1000 {
|
xhci0: usb@1000 {
|
||||||
compatible = "generic-xhci";
|
compatible = "generic-xhci";
|
||||||
reg = <0x00001000 0x1000>;
|
reg = <0x0 0x00001000 0x0 0x1000>;
|
||||||
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
phys = <&usbphy0 1>, <&usbphy0 0>;
|
phys = <&usbphy0 1>, <&usbphy0 0>;
|
||||||
phy-names = "phy0", "phy1";
|
phy-names = "phy0", "phy1";
|
||||||
@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
bdc0: usb@2000 {
|
bdc0: usb@2000 {
|
||||||
compatible = "brcm,bdc-v0.16";
|
compatible = "brcm,bdc-v0.16";
|
||||||
reg = <0x00002000 0x1000>;
|
reg = <0x0 0x00002000 0x0 0x1000>;
|
||||||
interrupts = <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
phys = <&usbphy0 0>, <&usbphy0 1>;
|
phys = <&usbphy0 0>, <&usbphy0 1>;
|
||||||
phy-names = "phy0", "phy1";
|
phy-names = "phy0", "phy1";
|
||||||
@ -38,21 +38,21 @@
|
|||||||
|
|
||||||
usbphy1: usb-phy@10000 {
|
usbphy1: usb-phy@10000 {
|
||||||
compatible = "brcm,sr-usb-combo-phy";
|
compatible = "brcm,sr-usb-combo-phy";
|
||||||
reg = <0x00010000 0x100>;
|
reg = <0x0 0x00010000 0x0 0x100>;
|
||||||
#phy-cells = <1>;
|
#phy-cells = <1>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
usbphy2: usb-phy@20000 {
|
usbphy2: usb-phy@20000 {
|
||||||
compatible = "brcm,sr-usb-hs-phy";
|
compatible = "brcm,sr-usb-hs-phy";
|
||||||
reg = <0x00020000 0x100>;
|
reg = <0x0 0x00020000 0x0 0x100>;
|
||||||
#phy-cells = <0>;
|
#phy-cells = <0>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
xhci1: usb@11000 {
|
xhci1: usb@11000 {
|
||||||
compatible = "generic-xhci";
|
compatible = "generic-xhci";
|
||||||
reg = <0x00011000 0x1000>;
|
reg = <0x0 0x00011000 0x0 0x1000>;
|
||||||
interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
phys = <&usbphy1 1>, <&usbphy2>, <&usbphy1 0>;
|
phys = <&usbphy1 1>, <&usbphy2>, <&usbphy1 0>;
|
||||||
phy-names = "phy0", "phy1", "phy2";
|
phy-names = "phy0", "phy1", "phy2";
|
||||||
@ -62,7 +62,7 @@
|
|||||||
|
|
||||||
bdc1: usb@21000 {
|
bdc1: usb@21000 {
|
||||||
compatible = "brcm,bdc-v0.16";
|
compatible = "brcm,bdc-v0.16";
|
||||||
reg = <0x00021000 0x1000>;
|
reg = <0x0 0x00021000 0x0 0x1000>;
|
||||||
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
phys = <&usbphy2>;
|
phys = <&usbphy2>;
|
||||||
phy-names = "phy0";
|
phy-names = "phy0";
|
||||||
|
@ -10,18 +10,6 @@
|
|||||||
model = "NVIDIA Jetson TX2 Developer Kit";
|
model = "NVIDIA Jetson TX2 Developer Kit";
|
||||||
compatible = "nvidia,p2771-0000", "nvidia,tegra186";
|
compatible = "nvidia,p2771-0000", "nvidia,tegra186";
|
||||||
|
|
||||||
aconnect {
|
|
||||||
status = "okay";
|
|
||||||
|
|
||||||
dma-controller@2930000 {
|
|
||||||
status = "okay";
|
|
||||||
};
|
|
||||||
|
|
||||||
interrupt-controller@2a40000 {
|
|
||||||
status = "okay";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
i2c@3160000 {
|
i2c@3160000 {
|
||||||
power-monitor@42 {
|
power-monitor@42 {
|
||||||
compatible = "ti,ina3221";
|
compatible = "ti,ina3221";
|
||||||
|
@ -54,7 +54,7 @@
|
|||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
serial@c280000 {
|
serial@3100000 {
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1161,7 +1161,7 @@
|
|||||||
|
|
||||||
hsp_aon: hsp@c150000 {
|
hsp_aon: hsp@c150000 {
|
||||||
compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
|
compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
|
||||||
reg = <0x0c150000 0xa0000>;
|
reg = <0x0c150000 0x90000>;
|
||||||
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
|
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
|
@ -1663,16 +1663,6 @@
|
|||||||
vin-supply = <&vdd_5v0_sys>;
|
vin-supply = <&vdd_5v0_sys>;
|
||||||
};
|
};
|
||||||
|
|
||||||
vdd_usb_vbus_otg: regulator@11 {
|
|
||||||
compatible = "regulator-fixed";
|
|
||||||
regulator-name = "USB_VBUS_EN0";
|
|
||||||
regulator-min-microvolt = <5000000>;
|
|
||||||
regulator-max-microvolt = <5000000>;
|
|
||||||
gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
|
|
||||||
enable-active-high;
|
|
||||||
vin-supply = <&vdd_5v0_sys>;
|
|
||||||
};
|
|
||||||
|
|
||||||
vdd_hdmi: regulator@10 {
|
vdd_hdmi: regulator@10 {
|
||||||
compatible = "regulator-fixed";
|
compatible = "regulator-fixed";
|
||||||
regulator-name = "VDD_HDMI_5V0";
|
regulator-name = "VDD_HDMI_5V0";
|
||||||
@ -1712,4 +1702,14 @@
|
|||||||
enable-active-high;
|
enable-active-high;
|
||||||
vin-supply = <&vdd_3v3_sys>;
|
vin-supply = <&vdd_3v3_sys>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
vdd_usb_vbus_otg: regulator@14 {
|
||||||
|
compatible = "regulator-fixed";
|
||||||
|
regulator-name = "USB_VBUS_EN0";
|
||||||
|
regulator-min-microvolt = <5000000>;
|
||||||
|
regulator-max-microvolt = <5000000>;
|
||||||
|
gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
|
||||||
|
enable-active-high;
|
||||||
|
vin-supply = <&vdd_5v0_sys>;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
compatible = "nvidia,tegra234-vdk", "nvidia,tegra234";
|
compatible = "nvidia,tegra234-vdk", "nvidia,tegra234";
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
sdhci3 = "/cbb@0/sdhci@3460000";
|
mmc3 = "/bus@0/mmc@3460000";
|
||||||
serial0 = &uarta;
|
serial0 = &uarta;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -17,12 +17,12 @@
|
|||||||
stdout-path = "serial0:115200n8";
|
stdout-path = "serial0:115200n8";
|
||||||
};
|
};
|
||||||
|
|
||||||
cbb@0 {
|
bus@0 {
|
||||||
serial@3100000 {
|
serial@3100000 {
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
sdhci@3460000 {
|
mmc@3460000 {
|
||||||
status = "okay";
|
status = "okay";
|
||||||
bus-width = <8>;
|
bus-width = <8>;
|
||||||
non-removable;
|
non-removable;
|
||||||
|
@ -179,22 +179,22 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
soc: soc {
|
soc: soc {
|
||||||
#address-cells = <1>;
|
#address-cells = <2>;
|
||||||
#size-cells = <1>;
|
#size-cells = <2>;
|
||||||
ranges = <0 0 0 0xffffffff>;
|
ranges = <0 0 0 0 0x0 0xffffffff>;
|
||||||
dma-ranges;
|
dma-ranges;
|
||||||
compatible = "simple-bus";
|
compatible = "simple-bus";
|
||||||
|
|
||||||
prng: qrng@e1000 {
|
prng: qrng@e1000 {
|
||||||
compatible = "qcom,prng-ee";
|
compatible = "qcom,prng-ee";
|
||||||
reg = <0xe3000 0x1000>;
|
reg = <0x0 0xe3000 0x0 0x1000>;
|
||||||
clocks = <&gcc GCC_PRNG_AHB_CLK>;
|
clocks = <&gcc GCC_PRNG_AHB_CLK>;
|
||||||
clock-names = "core";
|
clock-names = "core";
|
||||||
};
|
};
|
||||||
|
|
||||||
cryptobam: dma@704000 {
|
cryptobam: dma@704000 {
|
||||||
compatible = "qcom,bam-v1.7.0";
|
compatible = "qcom,bam-v1.7.0";
|
||||||
reg = <0x00704000 0x20000>;
|
reg = <0x0 0x00704000 0x0 0x20000>;
|
||||||
interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&gcc GCC_CRYPTO_AHB_CLK>;
|
clocks = <&gcc GCC_CRYPTO_AHB_CLK>;
|
||||||
clock-names = "bam_clk";
|
clock-names = "bam_clk";
|
||||||
@ -206,7 +206,7 @@
|
|||||||
|
|
||||||
crypto: crypto@73a000 {
|
crypto: crypto@73a000 {
|
||||||
compatible = "qcom,crypto-v5.1";
|
compatible = "qcom,crypto-v5.1";
|
||||||
reg = <0x0073a000 0x6000>;
|
reg = <0x0 0x0073a000 0x0 0x6000>;
|
||||||
clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
|
clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
|
||||||
<&gcc GCC_CRYPTO_AXI_CLK>,
|
<&gcc GCC_CRYPTO_AXI_CLK>,
|
||||||
<&gcc GCC_CRYPTO_CLK>;
|
<&gcc GCC_CRYPTO_CLK>;
|
||||||
@ -217,7 +217,7 @@
|
|||||||
|
|
||||||
tlmm: pinctrl@1000000 {
|
tlmm: pinctrl@1000000 {
|
||||||
compatible = "qcom,ipq6018-pinctrl";
|
compatible = "qcom,ipq6018-pinctrl";
|
||||||
reg = <0x01000000 0x300000>;
|
reg = <0x0 0x01000000 0x0 0x300000>;
|
||||||
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
@ -235,7 +235,7 @@
|
|||||||
|
|
||||||
gcc: gcc@1800000 {
|
gcc: gcc@1800000 {
|
||||||
compatible = "qcom,gcc-ipq6018";
|
compatible = "qcom,gcc-ipq6018";
|
||||||
reg = <0x01800000 0x80000>;
|
reg = <0x0 0x01800000 0x0 0x80000>;
|
||||||
clocks = <&xo>, <&sleep_clk>;
|
clocks = <&xo>, <&sleep_clk>;
|
||||||
clock-names = "xo", "sleep_clk";
|
clock-names = "xo", "sleep_clk";
|
||||||
#clock-cells = <1>;
|
#clock-cells = <1>;
|
||||||
@ -244,17 +244,17 @@
|
|||||||
|
|
||||||
tcsr_mutex_regs: syscon@1905000 {
|
tcsr_mutex_regs: syscon@1905000 {
|
||||||
compatible = "syscon";
|
compatible = "syscon";
|
||||||
reg = <0x01905000 0x8000>;
|
reg = <0x0 0x01905000 0x0 0x8000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
tcsr_q6: syscon@1945000 {
|
tcsr_q6: syscon@1945000 {
|
||||||
compatible = "syscon";
|
compatible = "syscon";
|
||||||
reg = <0x01945000 0xe000>;
|
reg = <0x0 0x01945000 0x0 0xe000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
blsp_dma: dma@7884000 {
|
blsp_dma: dma@7884000 {
|
||||||
compatible = "qcom,bam-v1.7.0";
|
compatible = "qcom,bam-v1.7.0";
|
||||||
reg = <0x07884000 0x2b000>;
|
reg = <0x0 0x07884000 0x0 0x2b000>;
|
||||||
interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&gcc GCC_BLSP1_AHB_CLK>;
|
clocks = <&gcc GCC_BLSP1_AHB_CLK>;
|
||||||
clock-names = "bam_clk";
|
clock-names = "bam_clk";
|
||||||
@ -264,7 +264,7 @@
|
|||||||
|
|
||||||
blsp1_uart3: serial@78b1000 {
|
blsp1_uart3: serial@78b1000 {
|
||||||
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
|
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
|
||||||
reg = <0x078b1000 0x200>;
|
reg = <0x0 0x078b1000 0x0 0x200>;
|
||||||
interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>,
|
clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>,
|
||||||
<&gcc GCC_BLSP1_AHB_CLK>;
|
<&gcc GCC_BLSP1_AHB_CLK>;
|
||||||
@ -276,7 +276,7 @@
|
|||||||
compatible = "qcom,spi-qup-v2.2.1";
|
compatible = "qcom,spi-qup-v2.2.1";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
reg = <0x078b5000 0x600>;
|
reg = <0x0 0x078b5000 0x0 0x600>;
|
||||||
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
spi-max-frequency = <50000000>;
|
spi-max-frequency = <50000000>;
|
||||||
clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
|
clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
|
||||||
@ -291,7 +291,7 @@
|
|||||||
compatible = "qcom,spi-qup-v2.2.1";
|
compatible = "qcom,spi-qup-v2.2.1";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
reg = <0x078b6000 0x600>;
|
reg = <0x0 0x078b6000 0x0 0x600>;
|
||||||
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
spi-max-frequency = <50000000>;
|
spi-max-frequency = <50000000>;
|
||||||
clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
|
clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
|
||||||
@ -306,7 +306,7 @@
|
|||||||
compatible = "qcom,i2c-qup-v2.2.1";
|
compatible = "qcom,i2c-qup-v2.2.1";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
reg = <0x078b6000 0x600>;
|
reg = <0x0 0x078b6000 0x0 0x600>;
|
||||||
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
|
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
|
||||||
<&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
|
<&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
|
||||||
@ -321,7 +321,7 @@
|
|||||||
compatible = "qcom,i2c-qup-v2.2.1";
|
compatible = "qcom,i2c-qup-v2.2.1";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
reg = <0x078b7000 0x600>;
|
reg = <0x0 0x078b7000 0x0 0x600>;
|
||||||
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
|
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
|
||||||
<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
|
<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
|
||||||
@ -336,24 +336,24 @@
|
|||||||
compatible = "qcom,msm-qgic2";
|
compatible = "qcom,msm-qgic2";
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <0x3>;
|
#interrupt-cells = <0x3>;
|
||||||
reg = <0x0b000000 0x1000>, /*GICD*/
|
reg = <0x0 0x0b000000 0x0 0x1000>, /*GICD*/
|
||||||
<0x0b002000 0x1000>, /*GICC*/
|
<0x0 0x0b002000 0x0 0x1000>, /*GICC*/
|
||||||
<0x0b001000 0x1000>, /*GICH*/
|
<0x0 0x0b001000 0x0 0x1000>, /*GICH*/
|
||||||
<0x0b004000 0x1000>; /*GICV*/
|
<0x0 0x0b004000 0x0 0x1000>; /*GICV*/
|
||||||
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
};
|
};
|
||||||
|
|
||||||
watchdog@b017000 {
|
watchdog@b017000 {
|
||||||
compatible = "qcom,kpss-wdt";
|
compatible = "qcom,kpss-wdt";
|
||||||
interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
|
interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
|
||||||
reg = <0x0b017000 0x40>;
|
reg = <0x0 0x0b017000 0x0 0x40>;
|
||||||
clocks = <&sleep_clk>;
|
clocks = <&sleep_clk>;
|
||||||
timeout-sec = <10>;
|
timeout-sec = <10>;
|
||||||
};
|
};
|
||||||
|
|
||||||
apcs_glb: mailbox@b111000 {
|
apcs_glb: mailbox@b111000 {
|
||||||
compatible = "qcom,ipq6018-apcs-apps-global";
|
compatible = "qcom,ipq6018-apcs-apps-global";
|
||||||
reg = <0x0b111000 0x1000>;
|
reg = <0x0 0x0b111000 0x0 0x1000>;
|
||||||
#clock-cells = <1>;
|
#clock-cells = <1>;
|
||||||
clocks = <&a53pll>, <&xo>;
|
clocks = <&a53pll>, <&xo>;
|
||||||
clock-names = "pll", "xo";
|
clock-names = "pll", "xo";
|
||||||
@ -362,7 +362,7 @@
|
|||||||
|
|
||||||
a53pll: clock@b116000 {
|
a53pll: clock@b116000 {
|
||||||
compatible = "qcom,ipq6018-a53pll";
|
compatible = "qcom,ipq6018-a53pll";
|
||||||
reg = <0x0b116000 0x40>;
|
reg = <0x0 0x0b116000 0x0 0x40>;
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
clocks = <&xo>;
|
clocks = <&xo>;
|
||||||
clock-names = "xo";
|
clock-names = "xo";
|
||||||
@ -377,68 +377,68 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
timer@b120000 {
|
timer@b120000 {
|
||||||
#address-cells = <1>;
|
#address-cells = <2>;
|
||||||
#size-cells = <1>;
|
#size-cells = <2>;
|
||||||
ranges;
|
ranges;
|
||||||
compatible = "arm,armv7-timer-mem";
|
compatible = "arm,armv7-timer-mem";
|
||||||
reg = <0x0b120000 0x1000>;
|
reg = <0x0 0x0b120000 0x0 0x1000>;
|
||||||
clock-frequency = <19200000>;
|
clock-frequency = <19200000>;
|
||||||
|
|
||||||
frame@b120000 {
|
frame@b120000 {
|
||||||
frame-number = <0>;
|
frame-number = <0>;
|
||||||
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
|
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
|
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
reg = <0x0b121000 0x1000>,
|
reg = <0x0 0x0b121000 0x0 0x1000>,
|
||||||
<0x0b122000 0x1000>;
|
<0x0 0x0b122000 0x0 0x1000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
frame@b123000 {
|
frame@b123000 {
|
||||||
frame-number = <1>;
|
frame-number = <1>;
|
||||||
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
reg = <0xb123000 0x1000>;
|
reg = <0x0 0xb123000 0x0 0x1000>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
frame@b124000 {
|
frame@b124000 {
|
||||||
frame-number = <2>;
|
frame-number = <2>;
|
||||||
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
reg = <0x0b124000 0x1000>;
|
reg = <0x0 0x0b124000 0x0 0x1000>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
frame@b125000 {
|
frame@b125000 {
|
||||||
frame-number = <3>;
|
frame-number = <3>;
|
||||||
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
reg = <0x0b125000 0x1000>;
|
reg = <0x0 0x0b125000 0x0 0x1000>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
frame@b126000 {
|
frame@b126000 {
|
||||||
frame-number = <4>;
|
frame-number = <4>;
|
||||||
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
reg = <0x0b126000 0x1000>;
|
reg = <0x0 0x0b126000 0x0 0x1000>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
frame@b127000 {
|
frame@b127000 {
|
||||||
frame-number = <5>;
|
frame-number = <5>;
|
||||||
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
reg = <0x0b127000 0x1000>;
|
reg = <0x0 0x0b127000 0x0 0x1000>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
frame@b128000 {
|
frame@b128000 {
|
||||||
frame-number = <6>;
|
frame-number = <6>;
|
||||||
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
reg = <0x0b128000 0x1000>;
|
reg = <0x0 0x0b128000 0x0 0x1000>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
q6v5_wcss: remoteproc@cd00000 {
|
q6v5_wcss: remoteproc@cd00000 {
|
||||||
compatible = "qcom,ipq8074-wcss-pil";
|
compatible = "qcom,ipq8074-wcss-pil";
|
||||||
reg = <0x0cd00000 0x4040>,
|
reg = <0x0 0x0cd00000 0x0 0x4040>,
|
||||||
<0x004ab000 0x20>;
|
<0x0 0x004ab000 0x0 0x20>;
|
||||||
reg-names = "qdsp6",
|
reg-names = "qdsp6",
|
||||||
"rmb";
|
"rmb";
|
||||||
interrupts-extended = <&intc GIC_SPI 325 IRQ_TYPE_EDGE_RISING>,
|
interrupts-extended = <&intc GIC_SPI 325 IRQ_TYPE_EDGE_RISING>,
|
||||||
|
@ -243,7 +243,6 @@
|
|||||||
interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
|
interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pmic_int>;
|
pinctrl-0 = <&pmic_int>;
|
||||||
rockchip,system-power-controller;
|
|
||||||
wakeup-source;
|
wakeup-source;
|
||||||
#clock-cells = <1>;
|
#clock-cells = <1>;
|
||||||
clock-output-names = "rk808-clkout1", "xin32k";
|
clock-output-names = "rk808-clkout1", "xin32k";
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
gmac_clk: gmac-clock {
|
gmac_clk: gmac-clock {
|
||||||
compatible = "fixed-clock";
|
compatible = "fixed-clock";
|
||||||
clock-frequency = <125000000>;
|
clock-frequency = <125000000>;
|
||||||
clock-output-names = "gmac_clk";
|
clock-output-names = "gmac_clkin";
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -74,14 +74,14 @@
|
|||||||
label = "red:diy";
|
label = "red:diy";
|
||||||
gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
|
gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
|
||||||
default-state = "off";
|
default-state = "off";
|
||||||
linux,default-trigger = "mmc1";
|
linux,default-trigger = "mmc2";
|
||||||
};
|
};
|
||||||
|
|
||||||
yellow_led: led-2 {
|
yellow_led: led-2 {
|
||||||
label = "yellow:yellow-led";
|
label = "yellow:yellow-led";
|
||||||
gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
|
gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
|
||||||
default-state = "off";
|
default-state = "off";
|
||||||
linux,default-trigger = "mmc0";
|
linux,default-trigger = "mmc1";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -29,6 +29,9 @@
|
|||||||
i2c6 = &i2c6;
|
i2c6 = &i2c6;
|
||||||
i2c7 = &i2c7;
|
i2c7 = &i2c7;
|
||||||
i2c8 = &i2c8;
|
i2c8 = &i2c8;
|
||||||
|
mmc0 = &sdio0;
|
||||||
|
mmc1 = &sdmmc;
|
||||||
|
mmc2 = &sdhci;
|
||||||
serial0 = &uart0;
|
serial0 = &uart0;
|
||||||
serial1 = &uart1;
|
serial1 = &uart1;
|
||||||
serial2 = &uart2;
|
serial2 = &uart2;
|
||||||
|
@ -115,8 +115,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||||||
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
||||||
#define pte_valid_not_user(pte) \
|
#define pte_valid_not_user(pte) \
|
||||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
|
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
|
||||||
#define pte_valid_young(pte) \
|
|
||||||
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
|
|
||||||
#define pte_valid_user(pte) \
|
#define pte_valid_user(pte) \
|
||||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
|
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
|
||||||
|
|
||||||
@ -124,9 +122,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||||||
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
|
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
|
||||||
* so that we don't erroneously return false for pages that have been
|
* so that we don't erroneously return false for pages that have been
|
||||||
* remapped as PROT_NONE but are yet to be flushed from the TLB.
|
* remapped as PROT_NONE but are yet to be flushed from the TLB.
|
||||||
|
* Note that we can't make any assumptions based on the state of the access
|
||||||
|
* flag, since ptep_clear_flush_young() elides a DSB when invalidating the
|
||||||
|
* TLB.
|
||||||
*/
|
*/
|
||||||
#define pte_accessible(mm, pte) \
|
#define pte_accessible(mm, pte) \
|
||||||
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
|
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* p??_access_permitted() is true for valid user mappings (subject to the
|
* p??_access_permitted() is true for valid user mappings (subject to the
|
||||||
@ -164,13 +165,6 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
|
|||||||
return pmd;
|
return pmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pte_t pte_wrprotect(pte_t pte)
|
|
||||||
{
|
|
||||||
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
|
|
||||||
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
|
||||||
return pte;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pte_t pte_mkwrite(pte_t pte)
|
static inline pte_t pte_mkwrite(pte_t pte)
|
||||||
{
|
{
|
||||||
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
|
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
|
||||||
@ -196,6 +190,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline pte_t pte_wrprotect(pte_t pte)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
|
||||||
|
* clear), set the PTE_DIRTY bit.
|
||||||
|
*/
|
||||||
|
if (pte_hw_dirty(pte))
|
||||||
|
pte = pte_mkdirty(pte);
|
||||||
|
|
||||||
|
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
|
||||||
|
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
static inline pte_t pte_mkold(pte_t pte)
|
static inline pte_t pte_mkold(pte_t pte)
|
||||||
{
|
{
|
||||||
return clear_pte_bit(pte, __pgprot(PTE_AF));
|
return clear_pte_bit(pte, __pgprot(PTE_AF));
|
||||||
@ -845,12 +853,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
|
|||||||
pte = READ_ONCE(*ptep);
|
pte = READ_ONCE(*ptep);
|
||||||
do {
|
do {
|
||||||
old_pte = pte;
|
old_pte = pte;
|
||||||
/*
|
|
||||||
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
|
|
||||||
* clear), set the PTE_DIRTY bit.
|
|
||||||
*/
|
|
||||||
if (pte_hw_dirty(pte))
|
|
||||||
pte = pte_mkdirty(pte);
|
|
||||||
pte = pte_wrprotect(pte);
|
pte = pte_wrprotect(pte);
|
||||||
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
|
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
|
||||||
pte_val(old_pte), pte_val(pte));
|
pte_val(old_pte), pte_val(pte));
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
#ifndef _ARM_PROBES_H
|
#ifndef _ARM_PROBES_H
|
||||||
#define _ARM_PROBES_H
|
#define _ARM_PROBES_H
|
||||||
|
|
||||||
|
#include <asm/insn.h>
|
||||||
|
|
||||||
typedef u32 probe_opcode_t;
|
typedef u32 probe_opcode_t;
|
||||||
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
|
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
|
||||||
|
|
||||||
|
@ -13,6 +13,11 @@
|
|||||||
|
|
||||||
SECTIONS {
|
SECTIONS {
|
||||||
HYP_SECTION(.text)
|
HYP_SECTION(.text)
|
||||||
|
/*
|
||||||
|
* .hyp..data..percpu needs to be page aligned to maintain the same
|
||||||
|
* alignment for when linking into vmlinux.
|
||||||
|
*/
|
||||||
|
. = ALIGN(PAGE_SIZE);
|
||||||
HYP_SECTION_NAME(.data..percpu) : {
|
HYP_SECTION_NAME(.data..percpu) : {
|
||||||
PERCPU_INPUT(L1_CACHE_BYTES)
|
PERCPU_INPUT(L1_CACHE_BYTES)
|
||||||
}
|
}
|
||||||
|
@ -273,6 +273,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
|
|||||||
return extract_bytes(value, addr & 7, len);
|
return extract_bytes(value, addr & 7, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
|
||||||
|
gpa_t addr, unsigned int len)
|
||||||
|
{
|
||||||
|
unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
|
||||||
|
int target_vcpu_id = vcpu->vcpu_id;
|
||||||
|
u64 value;
|
||||||
|
|
||||||
|
value = (u64)(mpidr & GENMASK(23, 0)) << 32;
|
||||||
|
value |= ((target_vcpu_id & 0xffff) << 8);
|
||||||
|
|
||||||
|
if (vgic_has_its(vcpu->kvm))
|
||||||
|
value |= GICR_TYPER_PLPIS;
|
||||||
|
|
||||||
|
/* reporting of the Last bit is not supported for userspace */
|
||||||
|
return extract_bytes(value, addr & 7, len);
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
|
static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
|
||||||
gpa_t addr, unsigned int len)
|
gpa_t addr, unsigned int len)
|
||||||
{
|
{
|
||||||
@ -593,8 +610,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
|
|||||||
REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
|
REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
|
||||||
vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
|
vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
|
||||||
VGIC_ACCESS_32bit),
|
VGIC_ACCESS_32bit),
|
||||||
REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
|
REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
|
||||||
vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
|
vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
|
||||||
|
vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
|
||||||
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
|
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
|
||||||
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
|
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
|
||||||
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
|
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
|
||||||
|
@ -18,4 +18,10 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* CONFIG_SPARSEMEM */
|
#endif /* CONFIG_SPARSEMEM */
|
||||||
|
|
||||||
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
|
int memory_add_physaddr_to_nid(u64 addr);
|
||||||
|
#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_IA64_SPARSEMEM_H */
|
#endif /* _ASM_IA64_SPARSEMEM_H */
|
||||||
|
@ -154,6 +154,7 @@ static inline void pmd_clear(pmd_t *pmdp)
|
|||||||
|
|
||||||
#if defined(CONFIG_XPA)
|
#if defined(CONFIG_XPA)
|
||||||
|
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||||
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
|
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
|
||||||
static inline pte_t
|
static inline pte_t
|
||||||
pfn_pte(unsigned long pfn, pgprot_t prot)
|
pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||||
@ -169,6 +170,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
|
|||||||
|
|
||||||
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
|
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
|
||||||
|
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 36
|
||||||
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
|
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
|
||||||
|
|
||||||
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||||
@ -183,6 +185,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||||
#ifdef CONFIG_CPU_VR41XX
|
#ifdef CONFIG_CPU_VR41XX
|
||||||
#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
|
#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
|
||||||
#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
|
#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
|
||||||
|
@ -248,7 +248,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-string)
|
|||||||
cpu-as-$(CONFIG_40x) += -Wa,-m405
|
cpu-as-$(CONFIG_40x) += -Wa,-m405
|
||||||
cpu-as-$(CONFIG_44x) += -Wa,-m440
|
cpu-as-$(CONFIG_44x) += -Wa,-m440
|
||||||
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
|
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
|
||||||
cpu-as-$(CONFIG_E200) += -Wa,-me200
|
|
||||||
cpu-as-$(CONFIG_E500) += -Wa,-me500
|
cpu-as-$(CONFIG_E500) += -Wa,-me500
|
||||||
|
|
||||||
# When using '-many -mpower4' gas will first try and find a matching power4
|
# When using '-many -mpower4' gas will first try and find a matching power4
|
||||||
|
@ -36,8 +36,10 @@ static inline bool pte_user(pte_t pte)
|
|||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PTE_64BIT
|
#ifdef CONFIG_PTE_64BIT
|
||||||
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
|
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 36
|
||||||
#else
|
#else
|
||||||
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -63,6 +63,8 @@
|
|||||||
|
|
||||||
#else /* !__ASSEMBLY__ */
|
#else /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
|
#include <linux/jump_label.h>
|
||||||
|
|
||||||
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
|
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_KUAP
|
#ifdef CONFIG_PPC_KUAP
|
||||||
|
@ -46,5 +46,10 @@ u64 memory_hotplug_max(void);
|
|||||||
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
|
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
|
extern int create_section_mapping(unsigned long start, unsigned long end,
|
||||||
|
int nid, pgprot_t prot);
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_MMZONE_H_ */
|
#endif /* _ASM_MMZONE_H_ */
|
||||||
|
@ -153,8 +153,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
|
|||||||
*/
|
*/
|
||||||
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
|
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
|
||||||
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
|
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 36
|
||||||
#else
|
#else
|
||||||
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -13,9 +13,9 @@
|
|||||||
#endif /* CONFIG_SPARSEMEM */
|
#endif /* CONFIG_SPARSEMEM */
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
extern int create_section_mapping(unsigned long start, unsigned long end,
|
|
||||||
int nid, pgprot_t prot);
|
|
||||||
extern int remove_section_mapping(unsigned long start, unsigned long end);
|
extern int remove_section_mapping(unsigned long start, unsigned long end);
|
||||||
|
extern int memory_add_physaddr_to_nid(u64 start);
|
||||||
|
#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
extern int hot_add_scn_to_nid(unsigned long scn_addr);
|
extern int hot_add_scn_to_nid(unsigned long scn_addr);
|
||||||
@ -26,6 +26,5 @@ static inline int hot_add_scn_to_nid(unsigned long scn_addr)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_NUMA */
|
#endif /* CONFIG_NUMA */
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_POWERPC_SPARSEMEM_H */
|
#endif /* _ASM_POWERPC_SPARSEMEM_H */
|
||||||
|
@ -1000,8 +1000,6 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
|
|||||||
* Vectors for the FWNMI option. Share common code.
|
* Vectors for the FWNMI option. Share common code.
|
||||||
*/
|
*/
|
||||||
TRAMP_REAL_BEGIN(system_reset_fwnmi)
|
TRAMP_REAL_BEGIN(system_reset_fwnmi)
|
||||||
/* XXX: fwnmi guest could run a nested/PR guest, so why no test? */
|
|
||||||
__IKVM_REAL(system_reset)=0
|
|
||||||
GEN_INT_ENTRY system_reset, virt=0
|
GEN_INT_ENTRY system_reset, virt=0
|
||||||
|
|
||||||
#endif /* CONFIG_PPC_PSERIES */
|
#endif /* CONFIG_PPC_PSERIES */
|
||||||
@ -1412,6 +1410,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
|||||||
* If none is found, do a Linux page fault. Linux page faults can happen in
|
* If none is found, do a Linux page fault. Linux page faults can happen in
|
||||||
* kernel mode due to user copy operations of course.
|
* kernel mode due to user copy operations of course.
|
||||||
*
|
*
|
||||||
|
* KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
|
||||||
|
* MMU context, which may cause a DSI in the host, which must go to the
|
||||||
|
* KVM handler. MSR[IR] is not enabled, so the real-mode handler will
|
||||||
|
* always be used regardless of AIL setting.
|
||||||
|
*
|
||||||
* - Radix MMU
|
* - Radix MMU
|
||||||
* The hardware loads from the Linux page table directly, so a fault goes
|
* The hardware loads from the Linux page table directly, so a fault goes
|
||||||
* immediately to Linux page fault.
|
* immediately to Linux page fault.
|
||||||
@ -1422,10 +1425,8 @@ INT_DEFINE_BEGIN(data_access)
|
|||||||
IVEC=0x300
|
IVEC=0x300
|
||||||
IDAR=1
|
IDAR=1
|
||||||
IDSISR=1
|
IDSISR=1
|
||||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
|
||||||
IKVM_SKIP=1
|
IKVM_SKIP=1
|
||||||
IKVM_REAL=1
|
IKVM_REAL=1
|
||||||
#endif
|
|
||||||
INT_DEFINE_END(data_access)
|
INT_DEFINE_END(data_access)
|
||||||
|
|
||||||
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
|
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
|
||||||
@ -1464,6 +1465,8 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
|||||||
* ppc64_bolted_size (first segment). The kernel handler must avoid stomping
|
* ppc64_bolted_size (first segment). The kernel handler must avoid stomping
|
||||||
* on user-handler data structures.
|
* on user-handler data structures.
|
||||||
*
|
*
|
||||||
|
* KVM: Same as 0x300, DSLB must test for KVM guest.
|
||||||
|
*
|
||||||
* A dedicated save area EXSLB is used (XXX: but it actually need not be
|
* A dedicated save area EXSLB is used (XXX: but it actually need not be
|
||||||
* these days, we could use EXGEN).
|
* these days, we could use EXGEN).
|
||||||
*/
|
*/
|
||||||
@ -1472,10 +1475,8 @@ INT_DEFINE_BEGIN(data_access_slb)
|
|||||||
IAREA=PACA_EXSLB
|
IAREA=PACA_EXSLB
|
||||||
IRECONCILE=0
|
IRECONCILE=0
|
||||||
IDAR=1
|
IDAR=1
|
||||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
|
||||||
IKVM_SKIP=1
|
IKVM_SKIP=1
|
||||||
IKVM_REAL=1
|
IKVM_REAL=1
|
||||||
#endif
|
|
||||||
INT_DEFINE_END(data_access_slb)
|
INT_DEFINE_END(data_access_slb)
|
||||||
|
|
||||||
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
|
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
|
||||||
|
@ -156,6 +156,7 @@ __after_mmu_off:
|
|||||||
bl initial_bats
|
bl initial_bats
|
||||||
bl load_segment_registers
|
bl load_segment_registers
|
||||||
BEGIN_MMU_FTR_SECTION
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
bl reloc_offset
|
||||||
bl early_hash_table
|
bl early_hash_table
|
||||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||||
#if defined(CONFIG_BOOTX_TEXT)
|
#if defined(CONFIG_BOOTX_TEXT)
|
||||||
@ -920,7 +921,7 @@ early_hash_table:
|
|||||||
ori r6, r6, 3 /* 256kB table */
|
ori r6, r6, 3 /* 256kB table */
|
||||||
mtspr SPRN_SDR1, r6
|
mtspr SPRN_SDR1, r6
|
||||||
lis r6, early_hash@h
|
lis r6, early_hash@h
|
||||||
lis r3, Hash@ha
|
addis r3, r3, Hash@ha
|
||||||
stw r6, Hash@l(r3)
|
stw r6, Hash@l(r3)
|
||||||
blr
|
blr
|
||||||
|
|
||||||
|
@ -251,6 +251,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
state = &sb->irq_state[src];
|
state = &sb->irq_state[src];
|
||||||
|
|
||||||
|
/* Some sanity checking */
|
||||||
|
if (!state->valid) {
|
||||||
|
pr_devel("%s: source %lx invalid !\n", __func__, irq);
|
||||||
|
return VM_FAULT_SIGBUS;
|
||||||
|
}
|
||||||
|
|
||||||
kvmppc_xive_select_irq(state, &hw_num, &xd);
|
kvmppc_xive_select_irq(state, &hw_num, &xd);
|
||||||
|
|
||||||
arch_spin_lock(&sb->lock);
|
arch_spin_lock(&sb->lock);
|
||||||
|
@ -50,6 +50,7 @@
|
|||||||
#include <asm/rtas.h>
|
#include <asm/rtas.h>
|
||||||
#include <asm/kasan.h>
|
#include <asm/kasan.h>
|
||||||
#include <asm/svm.h>
|
#include <asm/svm.h>
|
||||||
|
#include <asm/mmzone.h>
|
||||||
|
|
||||||
#include <mm/mmu_decl.h>
|
#include <mm/mmu_decl.h>
|
||||||
|
|
||||||
|
@ -14,4 +14,6 @@
|
|||||||
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
|
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
|
||||||
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
||||||
|
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 34
|
||||||
|
|
||||||
#endif /* _ASM_RISCV_PGTABLE_32_H */
|
#endif /* _ASM_RISCV_PGTABLE_32_H */
|
||||||
|
@ -53,11 +53,11 @@ int main(void)
|
|||||||
/* stack_frame offsets */
|
/* stack_frame offsets */
|
||||||
OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
|
OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
|
||||||
OFFSET(__SF_GPRS, stack_frame, gprs);
|
OFFSET(__SF_GPRS, stack_frame, gprs);
|
||||||
OFFSET(__SF_EMPTY, stack_frame, empty1);
|
OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
|
||||||
OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
|
OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
|
||||||
OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
|
OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
|
||||||
OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
|
OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
|
||||||
OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]);
|
OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
|
||||||
BLANK();
|
BLANK();
|
||||||
OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
|
OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
|
||||||
BLANK();
|
BLANK();
|
||||||
|
@ -1068,6 +1068,7 @@ EXPORT_SYMBOL(save_fpu_regs)
|
|||||||
* %r4
|
* %r4
|
||||||
*/
|
*/
|
||||||
load_fpu_regs:
|
load_fpu_regs:
|
||||||
|
stnsm __SF_EMPTY(%r15),0xfc
|
||||||
lg %r4,__LC_CURRENT
|
lg %r4,__LC_CURRENT
|
||||||
aghi %r4,__TASK_thread
|
aghi %r4,__TASK_thread
|
||||||
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
|
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
|
||||||
@ -1099,6 +1100,7 @@ load_fpu_regs:
|
|||||||
.Lload_fpu_regs_done:
|
.Lload_fpu_regs_done:
|
||||||
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
|
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
|
||||||
.Lload_fpu_regs_exit:
|
.Lload_fpu_regs_exit:
|
||||||
|
ssm __SF_EMPTY(%r15)
|
||||||
BR_EX %r14
|
BR_EX %r14
|
||||||
.Lload_fpu_regs_end:
|
.Lload_fpu_regs_end:
|
||||||
ENDPROC(load_fpu_regs)
|
ENDPROC(load_fpu_regs)
|
||||||
|
@ -129,8 +129,15 @@ int uv_destroy_page(unsigned long paddr)
|
|||||||
.paddr = paddr
|
.paddr = paddr
|
||||||
};
|
};
|
||||||
|
|
||||||
if (uv_call(0, (u64)&uvcb))
|
if (uv_call(0, (u64)&uvcb)) {
|
||||||
|
/*
|
||||||
|
* Older firmware uses 107/d as an indication of a non secure
|
||||||
|
* page. Let us emulate the newer variant (no-op).
|
||||||
|
*/
|
||||||
|
if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
|
||||||
|
return 0;
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2312,7 +2312,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
|
|||||||
struct kvm_s390_pv_unp unp = {};
|
struct kvm_s390_pv_unp unp = {};
|
||||||
|
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
if (!kvm_s390_pv_is_protected(kvm))
|
if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
r = -EFAULT;
|
r = -EFAULT;
|
||||||
@ -3564,7 +3564,6 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->arch.sie_block->pp = 0;
|
vcpu->arch.sie_block->pp = 0;
|
||||||
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
|
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
|
||||||
vcpu->arch.sie_block->todpr = 0;
|
vcpu->arch.sie_block->todpr = 0;
|
||||||
vcpu->arch.sie_block->cpnc = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3582,7 +3581,6 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
regs->etoken = 0;
|
regs->etoken = 0;
|
||||||
regs->etoken_extension = 0;
|
regs->etoken_extension = 0;
|
||||||
regs->diag318 = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
|
@ -208,7 +208,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
|
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
|
||||||
atomic_set(&kvm->mm->context.is_protected, 1);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,6 +227,8 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
|
|||||||
*rrc = uvcb.header.rrc;
|
*rrc = uvcb.header.rrc;
|
||||||
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
|
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
|
||||||
*rc, *rrc);
|
*rc, *rrc);
|
||||||
|
if (!cc)
|
||||||
|
atomic_set(&kvm->mm->context.is_protected, 1);
|
||||||
return cc ? -EINVAL : 0;
|
return cc ? -EINVAL : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2690,6 +2690,8 @@ static const struct mm_walk_ops reset_acc_walk_ops = {
|
|||||||
#include <linux/sched/mm.h>
|
#include <linux/sched/mm.h>
|
||||||
void s390_reset_acc(struct mm_struct *mm)
|
void s390_reset_acc(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
if (!mm_is_protected(mm))
|
||||||
|
return;
|
||||||
/*
|
/*
|
||||||
* we might be called during
|
* we might be called during
|
||||||
* reset: we walk the pages and clear
|
* reset: we walk the pages and clear
|
||||||
|
@ -107,14 +107,14 @@
|
|||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
|
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
|
||||||
static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
|
static ssize_t __cstate_##_var##_show(struct device *dev, \
|
||||||
struct kobj_attribute *attr, \
|
struct device_attribute *attr, \
|
||||||
char *page) \
|
char *page) \
|
||||||
{ \
|
{ \
|
||||||
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
|
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
|
||||||
return sprintf(page, _format "\n"); \
|
return sprintf(page, _format "\n"); \
|
||||||
} \
|
} \
|
||||||
static struct kobj_attribute format_attr_##_var = \
|
static struct device_attribute format_attr_##_var = \
|
||||||
__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
|
__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
|
||||||
|
|
||||||
static ssize_t cstate_get_attr_cpumask(struct device *dev,
|
static ssize_t cstate_get_attr_cpumask(struct device *dev,
|
||||||
|
@ -94,8 +94,8 @@ end:
|
|||||||
return map;
|
return map;
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t uncore_event_show(struct kobject *kobj,
|
ssize_t uncore_event_show(struct device *dev,
|
||||||
struct kobj_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
struct uncore_event_desc *event =
|
struct uncore_event_desc *event =
|
||||||
container_of(attr, struct uncore_event_desc, attr);
|
container_of(attr, struct uncore_event_desc, attr);
|
||||||
|
@ -157,7 +157,7 @@ struct intel_uncore_box {
|
|||||||
#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
|
#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
|
||||||
|
|
||||||
struct uncore_event_desc {
|
struct uncore_event_desc {
|
||||||
struct kobj_attribute attr;
|
struct device_attribute attr;
|
||||||
const char *config;
|
const char *config;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -179,8 +179,8 @@ struct pci2phy_map {
|
|||||||
struct pci2phy_map *__find_pci2phy_map(int segment);
|
struct pci2phy_map *__find_pci2phy_map(int segment);
|
||||||
int uncore_pcibus_to_physid(struct pci_bus *bus);
|
int uncore_pcibus_to_physid(struct pci_bus *bus);
|
||||||
|
|
||||||
ssize_t uncore_event_show(struct kobject *kobj,
|
ssize_t uncore_event_show(struct device *dev,
|
||||||
struct kobj_attribute *attr, char *buf);
|
struct device_attribute *attr, char *buf);
|
||||||
|
|
||||||
static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
|
static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
|
||||||
{
|
{
|
||||||
@ -201,14 +201,14 @@ extern int __uncore_max_dies;
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
|
#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
|
||||||
static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
|
static ssize_t __uncore_##_var##_show(struct device *dev, \
|
||||||
struct kobj_attribute *attr, \
|
struct device_attribute *attr, \
|
||||||
char *page) \
|
char *page) \
|
||||||
{ \
|
{ \
|
||||||
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
|
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
|
||||||
return sprintf(page, _format "\n"); \
|
return sprintf(page, _format "\n"); \
|
||||||
} \
|
} \
|
||||||
static struct kobj_attribute format_attr_##_var = \
|
static struct device_attribute format_attr_##_var = \
|
||||||
__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
|
__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
|
||||||
|
|
||||||
static inline bool uncore_pmc_fixed(int idx)
|
static inline bool uncore_pmc_fixed(int idx)
|
||||||
|
@ -93,18 +93,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
|
|||||||
* any other bit is reserved
|
* any other bit is reserved
|
||||||
*/
|
*/
|
||||||
#define RAPL_EVENT_MASK 0xFFULL
|
#define RAPL_EVENT_MASK 0xFFULL
|
||||||
|
|
||||||
#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
|
|
||||||
static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
|
|
||||||
struct kobj_attribute *attr, \
|
|
||||||
char *page) \
|
|
||||||
{ \
|
|
||||||
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
|
|
||||||
return sprintf(page, _format "\n"); \
|
|
||||||
} \
|
|
||||||
static struct kobj_attribute format_attr_##_var = \
|
|
||||||
__ATTR(_name, 0444, __rapl_##_var##_show, NULL)
|
|
||||||
|
|
||||||
#define RAPL_CNTR_WIDTH 32
|
#define RAPL_CNTR_WIDTH 32
|
||||||
|
|
||||||
#define RAPL_EVENT_ATTR_STR(_name, v, str) \
|
#define RAPL_EVENT_ATTR_STR(_name, v, str) \
|
||||||
@ -441,7 +429,7 @@ static struct attribute_group rapl_pmu_events_group = {
|
|||||||
.attrs = attrs_empty,
|
.attrs = attrs_empty,
|
||||||
};
|
};
|
||||||
|
|
||||||
DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
|
PMU_FORMAT_ATTR(event, "config:0-7");
|
||||||
static struct attribute *rapl_formats_attr[] = {
|
static struct attribute *rapl_formats_attr[] = {
|
||||||
&format_attr_event.attr,
|
&format_attr_event.attr,
|
||||||
NULL,
|
NULL,
|
||||||
|
@ -1656,6 +1656,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
|||||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
|
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
|
||||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
|
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
|
||||||
|
int kvm_cpu_has_extint(struct kvm_vcpu *v);
|
||||||
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
||||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
||||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
|
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
|
||||||
|
@ -28,4 +28,14 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* CONFIG_SPARSEMEM */
|
#endif /* CONFIG_SPARSEMEM */
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
#ifdef CONFIG_NUMA_KEEP_MEMINFO
|
||||||
|
extern int phys_to_target_node(phys_addr_t start);
|
||||||
|
#define phys_to_target_node phys_to_target_node
|
||||||
|
extern int memory_add_physaddr_to_nid(u64 start);
|
||||||
|
#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
|
||||||
|
#endif
|
||||||
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* _ASM_X86_SPARSEMEM_H */
|
#endif /* _ASM_X86_SPARSEMEM_H */
|
||||||
|
@ -100,53 +100,6 @@ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev
|
|||||||
return find_matching_signature(mc, csig, cpf);
|
return find_matching_signature(mc, csig, cpf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Given CPU signature and a microcode patch, this function finds if the
|
|
||||||
* microcode patch has matching family and model with the CPU.
|
|
||||||
*
|
|
||||||
* %true - if there's a match
|
|
||||||
* %false - otherwise
|
|
||||||
*/
|
|
||||||
static bool microcode_matches(struct microcode_header_intel *mc_header,
|
|
||||||
unsigned long sig)
|
|
||||||
{
|
|
||||||
unsigned long total_size = get_totalsize(mc_header);
|
|
||||||
unsigned long data_size = get_datasize(mc_header);
|
|
||||||
struct extended_sigtable *ext_header;
|
|
||||||
unsigned int fam_ucode, model_ucode;
|
|
||||||
struct extended_signature *ext_sig;
|
|
||||||
unsigned int fam, model;
|
|
||||||
int ext_sigcount, i;
|
|
||||||
|
|
||||||
fam = x86_family(sig);
|
|
||||||
model = x86_model(sig);
|
|
||||||
|
|
||||||
fam_ucode = x86_family(mc_header->sig);
|
|
||||||
model_ucode = x86_model(mc_header->sig);
|
|
||||||
|
|
||||||
if (fam == fam_ucode && model == model_ucode)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/* Look for ext. headers: */
|
|
||||||
if (total_size <= data_size + MC_HEADER_SIZE)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
|
|
||||||
ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
|
|
||||||
ext_sigcount = ext_header->count;
|
|
||||||
|
|
||||||
for (i = 0; i < ext_sigcount; i++) {
|
|
||||||
fam_ucode = x86_family(ext_sig->sig);
|
|
||||||
model_ucode = x86_model(ext_sig->sig);
|
|
||||||
|
|
||||||
if (fam == fam_ucode && model == model_ucode)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
ext_sig++;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ucode_patch *memdup_patch(void *data, unsigned int size)
|
static struct ucode_patch *memdup_patch(void *data, unsigned int size)
|
||||||
{
|
{
|
||||||
struct ucode_patch *p;
|
struct ucode_patch *p;
|
||||||
@ -164,7 +117,7 @@ static struct ucode_patch *memdup_patch(void *data, unsigned int size)
|
|||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void save_microcode_patch(void *data, unsigned int size)
|
static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
|
||||||
{
|
{
|
||||||
struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
|
struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
|
||||||
struct ucode_patch *iter, *tmp, *p = NULL;
|
struct ucode_patch *iter, *tmp, *p = NULL;
|
||||||
@ -210,6 +163,9 @@ static void save_microcode_patch(void *data, unsigned int size)
|
|||||||
if (!p)
|
if (!p)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Save for early loading. On 32-bit, that needs to be a physical
|
* Save for early loading. On 32-bit, that needs to be a physical
|
||||||
* address as the APs are running from physical addresses, before
|
* address as the APs are running from physical addresses, before
|
||||||
@ -344,13 +300,14 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
|
|||||||
|
|
||||||
size -= mc_size;
|
size -= mc_size;
|
||||||
|
|
||||||
if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
|
if (!find_matching_signature(data, uci->cpu_sig.sig,
|
||||||
|
uci->cpu_sig.pf)) {
|
||||||
data += mc_size;
|
data += mc_size;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (save) {
|
if (save) {
|
||||||
save_microcode_patch(data, mc_size);
|
save_microcode_patch(uci, data, mc_size);
|
||||||
goto next;
|
goto next;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -483,14 +440,14 @@ static void show_saved_mc(void)
|
|||||||
* Save this microcode patch. It will be loaded early when a CPU is
|
* Save this microcode patch. It will be loaded early when a CPU is
|
||||||
* hot-added or resumes.
|
* hot-added or resumes.
|
||||||
*/
|
*/
|
||||||
static void save_mc_for_early(u8 *mc, unsigned int size)
|
static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
|
||||||
{
|
{
|
||||||
/* Synchronization during CPU hotplug. */
|
/* Synchronization during CPU hotplug. */
|
||||||
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
|
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
|
||||||
|
|
||||||
mutex_lock(&x86_cpu_microcode_mutex);
|
mutex_lock(&x86_cpu_microcode_mutex);
|
||||||
|
|
||||||
save_microcode_patch(mc, size);
|
save_microcode_patch(uci, mc, size);
|
||||||
show_saved_mc();
|
show_saved_mc();
|
||||||
|
|
||||||
mutex_unlock(&x86_cpu_microcode_mutex);
|
mutex_unlock(&x86_cpu_microcode_mutex);
|
||||||
@ -935,7 +892,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
|
|||||||
* permanent memory. So it will be loaded early when a CPU is hot added
|
* permanent memory. So it will be loaded early when a CPU is hot added
|
||||||
* or resumes.
|
* or resumes.
|
||||||
*/
|
*/
|
||||||
save_mc_for_early(new_mc, new_mc_size);
|
save_mc_for_early(uci, new_mc, new_mc_size);
|
||||||
|
|
||||||
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
||||||
cpu, new_rev, uci->cpu_sig.rev);
|
cpu, new_rev, uci->cpu_sig.rev);
|
||||||
|
@ -78,6 +78,9 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
|
|||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
|
return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
|
||||||
|
|
||||||
|
/* The user space code from other tasks cannot be accessed. */
|
||||||
|
if (regs != task_pt_regs(current))
|
||||||
|
return -EPERM;
|
||||||
/*
|
/*
|
||||||
* Make sure userspace isn't trying to trick us into dumping kernel
|
* Make sure userspace isn't trying to trick us into dumping kernel
|
||||||
* memory by pointing the userspace instruction pointer at it.
|
* memory by pointing the userspace instruction pointer at it.
|
||||||
@ -85,6 +88,12 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
|
|||||||
if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
|
if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even if named copy_from_user_nmi() this can be invoked from
|
||||||
|
* other contexts and will not try to resolve a pagefault, which is
|
||||||
|
* the correct thing to do here as this code can be called from any
|
||||||
|
* context.
|
||||||
|
*/
|
||||||
return copy_from_user_nmi(buf, (void __user *)src, nbytes);
|
return copy_from_user_nmi(buf, (void __user *)src, nbytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,13 +124,19 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
|
|||||||
u8 opcodes[OPCODE_BUFSIZE];
|
u8 opcodes[OPCODE_BUFSIZE];
|
||||||
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
|
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
|
||||||
|
|
||||||
if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
|
switch (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
|
||||||
printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
|
case 0:
|
||||||
loglvl, prologue);
|
|
||||||
} else {
|
|
||||||
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
|
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
|
||||||
__stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
|
__stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
|
||||||
opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
|
opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
|
||||||
|
break;
|
||||||
|
case -EPERM:
|
||||||
|
/* No access to the user space stack of other tasks. Ignore. */
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
|
||||||
|
loglvl, prologue);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -514,16 +514,10 @@ int tboot_force_iommu(void)
|
|||||||
if (!tboot_enabled())
|
if (!tboot_enabled())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (intel_iommu_tboot_noforce)
|
if (no_iommu || dmar_disabled)
|
||||||
return 1;
|
|
||||||
|
|
||||||
if (no_iommu || swiotlb || dmar_disabled)
|
|
||||||
pr_warn("Forcing Intel-IOMMU to enabled\n");
|
pr_warn("Forcing Intel-IOMMU to enabled\n");
|
||||||
|
|
||||||
dmar_disabled = 0;
|
dmar_disabled = 0;
|
||||||
#ifdef CONFIG_SWIOTLB
|
|
||||||
swiotlb = 0;
|
|
||||||
#endif
|
|
||||||
no_iommu = 0;
|
no_iommu = 0;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
|
|||||||
* check if there is pending interrupt from
|
* check if there is pending interrupt from
|
||||||
* non-APIC source without intack.
|
* non-APIC source without intack.
|
||||||
*/
|
*/
|
||||||
static int kvm_cpu_has_extint(struct kvm_vcpu *v)
|
int kvm_cpu_has_extint(struct kvm_vcpu *v)
|
||||||
{
|
|
||||||
u8 accept = kvm_apic_accept_pic_intr(v);
|
|
||||||
|
|
||||||
if (accept) {
|
|
||||||
if (irqchip_split(v->kvm))
|
|
||||||
return pending_userspace_extint(v);
|
|
||||||
else
|
|
||||||
return v->kvm->arch.vpic->output;
|
|
||||||
} else
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* check if there is injectable interrupt:
|
|
||||||
* when virtual interrupt delivery enabled,
|
|
||||||
* interrupt from apic will handled by hardware,
|
|
||||||
* we don't need to check it here.
|
|
||||||
*/
|
|
||||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* FIXME: interrupt.injected represents an interrupt that it's
|
* FIXME: interrupt.injected represents an interrupt whose
|
||||||
* side-effects have already been applied (e.g. bit from IRR
|
* side-effects have already been applied (e.g. bit from IRR
|
||||||
* already moved to ISR). Therefore, it is incorrect to rely
|
* already moved to ISR). Therefore, it is incorrect to rely
|
||||||
* on interrupt.injected to know if there is a pending
|
* on interrupt.injected to know if there is a pending
|
||||||
@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
|
|||||||
if (!lapic_in_kernel(v))
|
if (!lapic_in_kernel(v))
|
||||||
return v->arch.interrupt.injected;
|
return v->arch.interrupt.injected;
|
||||||
|
|
||||||
|
if (!kvm_apic_accept_pic_intr(v))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (irqchip_split(v->kvm))
|
||||||
|
return pending_userspace_extint(v);
|
||||||
|
else
|
||||||
|
return v->kvm->arch.vpic->output;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* check if there is injectable interrupt:
|
||||||
|
* when virtual interrupt delivery enabled,
|
||||||
|
* interrupt from apic will handled by hardware,
|
||||||
|
* we don't need to check it here.
|
||||||
|
*/
|
||||||
|
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
|
||||||
|
{
|
||||||
if (kvm_cpu_has_extint(v))
|
if (kvm_cpu_has_extint(v))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
@ -91,20 +89,6 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
|
|||||||
*/
|
*/
|
||||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
|
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* FIXME: interrupt.injected represents an interrupt that it's
|
|
||||||
* side-effects have already been applied (e.g. bit from IRR
|
|
||||||
* already moved to ISR). Therefore, it is incorrect to rely
|
|
||||||
* on interrupt.injected to know if there is a pending
|
|
||||||
* interrupt in the user-mode LAPIC.
|
|
||||||
* This leads to nVMX/nSVM not be able to distinguish
|
|
||||||
* if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
|
|
||||||
* pending interrupt or should re-inject an injected
|
|
||||||
* interrupt.
|
|
||||||
*/
|
|
||||||
if (!lapic_in_kernel(v))
|
|
||||||
return v->arch.interrupt.injected;
|
|
||||||
|
|
||||||
if (kvm_cpu_has_extint(v))
|
if (kvm_cpu_has_extint(v))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
@ -118,16 +102,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
|
|||||||
*/
|
*/
|
||||||
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
|
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
|
||||||
{
|
{
|
||||||
if (kvm_cpu_has_extint(v)) {
|
if (!kvm_cpu_has_extint(v)) {
|
||||||
if (irqchip_split(v->kvm)) {
|
WARN_ON(!lapic_in_kernel(v));
|
||||||
int vector = v->arch.pending_external_vector;
|
|
||||||
|
|
||||||
v->arch.pending_external_vector = -1;
|
|
||||||
return vector;
|
|
||||||
} else
|
|
||||||
return kvm_pic_read_irq(v->kvm); /* PIC */
|
|
||||||
} else
|
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!lapic_in_kernel(v))
|
||||||
|
return v->arch.interrupt.nr;
|
||||||
|
|
||||||
|
if (irqchip_split(v->kvm)) {
|
||||||
|
int vector = v->arch.pending_external_vector;
|
||||||
|
|
||||||
|
v->arch.pending_external_vector = -1;
|
||||||
|
return vector;
|
||||||
|
} else
|
||||||
|
return kvm_pic_read_irq(v->kvm); /* PIC */
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -135,13 +124,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
|
|||||||
*/
|
*/
|
||||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
|
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
|
||||||
{
|
{
|
||||||
int vector;
|
int vector = kvm_cpu_get_extint(v);
|
||||||
|
|
||||||
if (!lapic_in_kernel(v))
|
|
||||||
return v->arch.interrupt.nr;
|
|
||||||
|
|
||||||
vector = kvm_cpu_get_extint(v);
|
|
||||||
|
|
||||||
if (vector != -1)
|
if (vector != -1)
|
||||||
return vector; /* PIC */
|
return vector; /* PIC */
|
||||||
|
|
||||||
|
@ -2465,7 +2465,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||||
u32 ppr;
|
u32 ppr;
|
||||||
|
|
||||||
if (!kvm_apic_hw_enabled(apic))
|
if (!kvm_apic_present(vcpu))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
__apic_update_ppr(apic, &ppr);
|
__apic_update_ppr(apic, &ppr);
|
||||||
|
@ -3517,7 +3517,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
|||||||
{
|
{
|
||||||
u64 sptes[PT64_ROOT_MAX_LEVEL];
|
u64 sptes[PT64_ROOT_MAX_LEVEL];
|
||||||
struct rsvd_bits_validate *rsvd_check;
|
struct rsvd_bits_validate *rsvd_check;
|
||||||
int root = vcpu->arch.mmu->root_level;
|
int root = vcpu->arch.mmu->shadow_root_level;
|
||||||
int leaf;
|
int leaf;
|
||||||
int level;
|
int level;
|
||||||
bool reserved = false;
|
bool reserved = false;
|
||||||
|
@ -642,8 +642,8 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
|
|||||||
* Its safe to read more than we are asked, caller should ensure that
|
* Its safe to read more than we are asked, caller should ensure that
|
||||||
* destination has enough space.
|
* destination has enough space.
|
||||||
*/
|
*/
|
||||||
src_paddr = round_down(src_paddr, 16);
|
|
||||||
offset = src_paddr & 15;
|
offset = src_paddr & 15;
|
||||||
|
src_paddr = round_down(src_paddr, 16);
|
||||||
sz = round_up(sz + offset, 16);
|
sz = round_up(sz + offset, 16);
|
||||||
|
|
||||||
return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
|
return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
|
||||||
|
@ -1309,8 +1309,10 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
|
|||||||
svm->avic_is_running = true;
|
svm->avic_is_running = true;
|
||||||
|
|
||||||
svm->msrpm = svm_vcpu_alloc_msrpm();
|
svm->msrpm = svm_vcpu_alloc_msrpm();
|
||||||
if (!svm->msrpm)
|
if (!svm->msrpm) {
|
||||||
|
err = -ENOMEM;
|
||||||
goto error_free_vmcb_page;
|
goto error_free_vmcb_page;
|
||||||
|
}
|
||||||
|
|
||||||
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
|
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
|
||||||
|
|
||||||
|
@ -4051,21 +4051,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
|
|||||||
|
|
||||||
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
|
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* We can accept userspace's request for interrupt injection
|
||||||
|
* as long as we have a place to store the interrupt number.
|
||||||
|
* The actual injection will happen when the CPU is able to
|
||||||
|
* deliver the interrupt.
|
||||||
|
*/
|
||||||
|
if (kvm_cpu_has_extint(vcpu))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Acknowledging ExtINT does not happen if LINT0 is masked. */
|
||||||
return (!lapic_in_kernel(vcpu) ||
|
return (!lapic_in_kernel(vcpu) ||
|
||||||
kvm_apic_accept_pic_intr(vcpu));
|
kvm_apic_accept_pic_intr(vcpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* if userspace requested an interrupt window, check that the
|
|
||||||
* interrupt window is open.
|
|
||||||
*
|
|
||||||
* No need to exit to userspace if we already have an interrupt queued.
|
|
||||||
*/
|
|
||||||
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
|
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_arch_interrupt_allowed(vcpu) &&
|
return kvm_arch_interrupt_allowed(vcpu) &&
|
||||||
!kvm_cpu_has_interrupt(vcpu) &&
|
|
||||||
!kvm_event_needs_reinjection(vcpu) &&
|
|
||||||
kvm_cpu_accept_dm_intr(vcpu);
|
kvm_cpu_accept_dm_intr(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -938,6 +938,7 @@ int phys_to_target_node(phys_addr_t start)
|
|||||||
|
|
||||||
return meminfo_to_nid(&numa_reserved_meminfo, start);
|
return meminfo_to_nid(&numa_reserved_meminfo, start);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(phys_to_target_node);
|
||||||
|
|
||||||
int memory_add_physaddr_to_nid(u64 start)
|
int memory_add_physaddr_to_nid(u64 start)
|
||||||
{
|
{
|
||||||
@ -947,4 +948,5 @@ int memory_add_physaddr_to_nid(u64 start)
|
|||||||
nid = numa_meminfo.blk[0].nid;
|
nid = numa_meminfo.blk[0].nid;
|
||||||
return nid;
|
return nid;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
|
||||||
#endif
|
#endif
|
||||||
|
@ -78,28 +78,30 @@ int __init efi_alloc_page_tables(void)
|
|||||||
gfp_mask = GFP_KERNEL | __GFP_ZERO;
|
gfp_mask = GFP_KERNEL | __GFP_ZERO;
|
||||||
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
|
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
|
||||||
if (!efi_pgd)
|
if (!efi_pgd)
|
||||||
return -ENOMEM;
|
goto fail;
|
||||||
|
|
||||||
pgd = efi_pgd + pgd_index(EFI_VA_END);
|
pgd = efi_pgd + pgd_index(EFI_VA_END);
|
||||||
p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
|
p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
|
||||||
if (!p4d) {
|
if (!p4d)
|
||||||
free_page((unsigned long)efi_pgd);
|
goto free_pgd;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
|
pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
|
||||||
if (!pud) {
|
if (!pud)
|
||||||
if (pgtable_l5_enabled())
|
goto free_p4d;
|
||||||
free_page((unsigned long) pgd_page_vaddr(*pgd));
|
|
||||||
free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
efi_mm.pgd = efi_pgd;
|
efi_mm.pgd = efi_pgd;
|
||||||
mm_init_cpumask(&efi_mm);
|
mm_init_cpumask(&efi_mm);
|
||||||
init_new_context(NULL, &efi_mm);
|
init_new_context(NULL, &efi_mm);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
free_p4d:
|
||||||
|
if (pgtable_l5_enabled())
|
||||||
|
free_page((unsigned long)pgd_page_vaddr(*pgd));
|
||||||
|
free_pgd:
|
||||||
|
free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
|
||||||
|
fail:
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -93,10 +93,20 @@ void xen_init_lock_cpu(int cpu)
|
|||||||
|
|
||||||
void xen_uninit_lock_cpu(int cpu)
|
void xen_uninit_lock_cpu(int cpu)
|
||||||
{
|
{
|
||||||
|
int irq;
|
||||||
|
|
||||||
if (!xen_pvspin)
|
if (!xen_pvspin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
|
/*
|
||||||
|
* When booting the kernel with 'mitigations=auto,nosmt', the secondary
|
||||||
|
* CPUs are not activated, and lock_kicker_irq is not initialized.
|
||||||
|
*/
|
||||||
|
irq = per_cpu(lock_kicker_irq, cpu);
|
||||||
|
if (irq == -1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
unbind_from_irqhandler(irq, NULL);
|
||||||
per_cpu(lock_kicker_irq, cpu) = -1;
|
per_cpu(lock_kicker_irq, cpu) = -1;
|
||||||
kfree(per_cpu(irq_name, cpu));
|
kfree(per_cpu(irq_name, cpu));
|
||||||
per_cpu(irq_name, cpu) = NULL;
|
per_cpu(irq_name, cpu) = NULL;
|
||||||
|
@ -849,6 +849,7 @@ static void blkcg_fill_root_iostats(void)
|
|||||||
blkg_iostat_set(&blkg->iostat.cur, &tmp);
|
blkg_iostat_set(&blkg->iostat.cur, &tmp);
|
||||||
u64_stats_update_end(&blkg->iostat.sync);
|
u64_stats_update_end(&blkg->iostat.sync);
|
||||||
}
|
}
|
||||||
|
disk_put_part(part);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,13 +225,18 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||||||
/* release the tag's ownership to the req cloned from */
|
/* release the tag's ownership to the req cloned from */
|
||||||
spin_lock_irqsave(&fq->mq_flush_lock, flags);
|
spin_lock_irqsave(&fq->mq_flush_lock, flags);
|
||||||
|
|
||||||
WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
|
|
||||||
if (!refcount_dec_and_test(&flush_rq->ref)) {
|
if (!refcount_dec_and_test(&flush_rq->ref)) {
|
||||||
fq->rq_status = error;
|
fq->rq_status = error;
|
||||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush request has to be marked as IDLE when it is really ended
|
||||||
|
* because its .end_io() is called from timeout code path too for
|
||||||
|
* avoiding use-after-free.
|
||||||
|
*/
|
||||||
|
WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
|
||||||
if (fq->rq_status != BLK_STS_OK)
|
if (fq->rq_status != BLK_STS_OK)
|
||||||
error = fq->rq_status;
|
error = fq->rq_status;
|
||||||
|
|
||||||
|
@ -103,6 +103,13 @@ int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots)
|
|||||||
spin_lock_init(&ksm->idle_slots_lock);
|
spin_lock_init(&ksm->idle_slots_lock);
|
||||||
|
|
||||||
slot_hashtable_size = roundup_pow_of_two(num_slots);
|
slot_hashtable_size = roundup_pow_of_two(num_slots);
|
||||||
|
/*
|
||||||
|
* hash_ptr() assumes bits != 0, so ensure the hash table has at least 2
|
||||||
|
* buckets. This only makes a difference when there is only 1 keyslot.
|
||||||
|
*/
|
||||||
|
if (slot_hashtable_size < 2)
|
||||||
|
slot_hashtable_size = 2;
|
||||||
|
|
||||||
ksm->log_slot_ht_size = ilog2(slot_hashtable_size);
|
ksm->log_slot_ht_size = ilog2(slot_hashtable_size);
|
||||||
ksm->slot_hashtable = kvmalloc_array(slot_hashtable_size,
|
ksm->slot_hashtable = kvmalloc_array(slot_hashtable_size,
|
||||||
sizeof(ksm->slot_hashtable[0]),
|
sizeof(ksm->slot_hashtable[0]),
|
||||||
|
@ -49,15 +49,25 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
|
|||||||
|
|
||||||
if (!tty->ops->write)
|
if (!tty->ops->write)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
mutex_lock(&speakup_tty_mutex);
|
||||||
|
if (speakup_tty) {
|
||||||
|
mutex_unlock(&speakup_tty_mutex);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
speakup_tty = tty;
|
speakup_tty = tty;
|
||||||
|
|
||||||
ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
|
ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
|
||||||
if (!ldisc_data)
|
if (!ldisc_data) {
|
||||||
|
speakup_tty = NULL;
|
||||||
|
mutex_unlock(&speakup_tty_mutex);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
init_completion(&ldisc_data->completion);
|
init_completion(&ldisc_data->completion);
|
||||||
ldisc_data->buf_free = true;
|
ldisc_data->buf_free = true;
|
||||||
speakup_tty->disc_data = ldisc_data;
|
speakup_tty->disc_data = ldisc_data;
|
||||||
|
mutex_unlock(&speakup_tty_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(iort_fwnode_lock);
|
|||||||
* iort_set_fwnode() - Create iort_fwnode and use it to register
|
* iort_set_fwnode() - Create iort_fwnode and use it to register
|
||||||
* iommu data in the iort_fwnode_list
|
* iommu data in the iort_fwnode_list
|
||||||
*
|
*
|
||||||
* @node: IORT table node associated with the IOMMU
|
* @iort_node: IORT table node associated with the IOMMU
|
||||||
* @fwnode: fwnode associated with the IORT node
|
* @fwnode: fwnode associated with the IORT node
|
||||||
*
|
*
|
||||||
* Returns: 0 on success
|
* Returns: 0 on success
|
||||||
@ -673,7 +673,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 id,
|
|||||||
/**
|
/**
|
||||||
* iort_get_device_domain() - Find MSI domain related to a device
|
* iort_get_device_domain() - Find MSI domain related to a device
|
||||||
* @dev: The device.
|
* @dev: The device.
|
||||||
* @req_id: Requester ID for the device.
|
* @id: Requester ID for the device.
|
||||||
|
* @bus_token: irq domain bus token.
|
||||||
*
|
*
|
||||||
* Returns: the MSI domain for this device, NULL otherwise
|
* Returns: the MSI domain for this device, NULL otherwise
|
||||||
*/
|
*/
|
||||||
@ -1136,7 +1137,7 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
|
|||||||
*
|
*
|
||||||
* @dev: device to configure
|
* @dev: device to configure
|
||||||
* @dma_addr: device DMA address result pointer
|
* @dma_addr: device DMA address result pointer
|
||||||
* @size: DMA range size result pointer
|
* @dma_size: DMA range size result pointer
|
||||||
*/
|
*/
|
||||||
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
|
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
|
||||||
{
|
{
|
||||||
@ -1526,6 +1527,7 @@ static __init const struct iort_dev_config *iort_get_dev_cfg(
|
|||||||
/**
|
/**
|
||||||
* iort_add_platform_device() - Allocate a platform device for IORT node
|
* iort_add_platform_device() - Allocate a platform device for IORT node
|
||||||
* @node: Pointer to device ACPI IORT node
|
* @node: Pointer to device ACPI IORT node
|
||||||
|
* @ops: Pointer to IORT device config struct
|
||||||
*
|
*
|
||||||
* Returns: 0 on success, <0 failure
|
* Returns: 0 on success, <0 failure
|
||||||
*/
|
*/
|
||||||
|
@ -227,6 +227,9 @@ static int sysc_wait_softreset(struct sysc *ddata)
|
|||||||
u32 sysc_mask, syss_done, rstval;
|
u32 sysc_mask, syss_done, rstval;
|
||||||
int syss_offset, error = 0;
|
int syss_offset, error = 0;
|
||||||
|
|
||||||
|
if (ddata->cap->regbits->srst_shift < 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
|
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
|
||||||
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
|
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
|
||||||
|
|
||||||
@ -970,9 +973,15 @@ static int sysc_enable_module(struct device *dev)
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
error = sysc_wait_softreset(ddata);
|
/*
|
||||||
if (error)
|
* Some modules like i2c and hdq1w have unusable reset status unless
|
||||||
dev_warn(ddata->dev, "OCP softreset timed out\n");
|
* the module reset quirk is enabled. Skip status check on enable.
|
||||||
|
*/
|
||||||
|
if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
|
||||||
|
error = sysc_wait_softreset(ddata);
|
||||||
|
if (error)
|
||||||
|
dev_warn(ddata->dev, "OCP softreset timed out\n");
|
||||||
|
}
|
||||||
if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
|
if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
|
||||||
sysc_disable_opt_clocks(ddata);
|
sysc_disable_opt_clocks(ddata);
|
||||||
|
|
||||||
@ -1373,17 +1382,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
|
|||||||
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
|
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
|
||||||
SYSC_QUIRK_OPT_CLKS_NEEDED),
|
SYSC_QUIRK_OPT_CLKS_NEEDED),
|
||||||
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
|
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
|
||||||
SYSC_MODULE_QUIRK_HDQ1W),
|
SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||||
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
|
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
|
||||||
SYSC_MODULE_QUIRK_HDQ1W),
|
SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||||
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
|
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
|
||||||
SYSC_MODULE_QUIRK_I2C),
|
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||||
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
|
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
|
||||||
SYSC_MODULE_QUIRK_I2C),
|
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||||
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
|
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
|
||||||
SYSC_MODULE_QUIRK_I2C),
|
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||||
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
|
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
|
||||||
SYSC_MODULE_QUIRK_I2C),
|
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||||
SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
|
SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
|
||||||
SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
|
SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
|
||||||
SYSC_MODULE_QUIRK_SGX),
|
SYSC_MODULE_QUIRK_SGX),
|
||||||
@ -2880,7 +2889,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
|
|||||||
|
|
||||||
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
|
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
|
||||||
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
|
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
|
||||||
return -EBUSY;
|
return -ENXIO;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -368,7 +368,7 @@ static const struct regmap_config ti_eqep_regmap32_config = {
|
|||||||
.reg_bits = 32,
|
.reg_bits = 32,
|
||||||
.val_bits = 32,
|
.val_bits = 32,
|
||||||
.reg_stride = 4,
|
.reg_stride = 4,
|
||||||
.max_register = 0x24,
|
.max_register = QUPRD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct regmap_config ti_eqep_regmap16_config = {
|
static const struct regmap_config ti_eqep_regmap16_config = {
|
||||||
@ -376,7 +376,7 @@ static const struct regmap_config ti_eqep_regmap16_config = {
|
|||||||
.reg_bits = 16,
|
.reg_bits = 16,
|
||||||
.val_bits = 16,
|
.val_bits = 16,
|
||||||
.reg_stride = 2,
|
.reg_stride = 2,
|
||||||
.max_register = 0x1e,
|
.max_register = QCPRDLAT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ti_eqep_probe(struct platform_device *pdev)
|
static int ti_eqep_probe(struct platform_device *pdev)
|
||||||
|
@ -236,13 +236,15 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
|
|||||||
if (!handle || !handle->perf_ops)
|
if (!handle || !handle->perf_ops)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
#ifdef CONFIG_COMMON_CLK
|
||||||
/* dummy clock provider as needed by OPP if clocks property is used */
|
/* dummy clock provider as needed by OPP if clocks property is used */
|
||||||
if (of_find_property(dev->of_node, "#clock-cells", NULL))
|
if (of_find_property(dev->of_node, "#clock-cells", NULL))
|
||||||
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
|
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
|
||||||
|
#endif
|
||||||
|
|
||||||
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
|
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
|
dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
|
||||||
__func__, ret);
|
__func__, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,7 +50,6 @@ config DEV_DAX_HMEM
|
|||||||
Say M if unsure.
|
Say M if unsure.
|
||||||
|
|
||||||
config DEV_DAX_HMEM_DEVICES
|
config DEV_DAX_HMEM_DEVICES
|
||||||
depends on NUMA_KEEP_MEMINFO # for phys_to_target_node()
|
|
||||||
depends on DEV_DAX_HMEM && DAX=y
|
depends on DEV_DAX_HMEM && DAX=y
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
@ -1039,16 +1039,15 @@ static int get_dma_id(struct dma_device *device)
|
|||||||
static int __dma_async_device_channel_register(struct dma_device *device,
|
static int __dma_async_device_channel_register(struct dma_device *device,
|
||||||
struct dma_chan *chan)
|
struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc;
|
||||||
|
|
||||||
chan->local = alloc_percpu(typeof(*chan->local));
|
chan->local = alloc_percpu(typeof(*chan->local));
|
||||||
if (!chan->local)
|
if (!chan->local)
|
||||||
goto err_out;
|
return -ENOMEM;
|
||||||
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
|
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
|
||||||
if (!chan->dev) {
|
if (!chan->dev) {
|
||||||
free_percpu(chan->local);
|
rc = -ENOMEM;
|
||||||
chan->local = NULL;
|
goto err_free_local;
|
||||||
goto err_out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1061,7 +1060,8 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||||||
if (chan->chan_id < 0) {
|
if (chan->chan_id < 0) {
|
||||||
pr_err("%s: unable to alloc ida for chan: %d\n",
|
pr_err("%s: unable to alloc ida for chan: %d\n",
|
||||||
__func__, chan->chan_id);
|
__func__, chan->chan_id);
|
||||||
goto err_out;
|
rc = chan->chan_id;
|
||||||
|
goto err_free_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
chan->dev->device.class = &dma_devclass;
|
chan->dev->device.class = &dma_devclass;
|
||||||
@ -1082,9 +1082,10 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||||||
mutex_lock(&device->chan_mutex);
|
mutex_lock(&device->chan_mutex);
|
||||||
ida_free(&device->chan_ida, chan->chan_id);
|
ida_free(&device->chan_ida, chan->chan_id);
|
||||||
mutex_unlock(&device->chan_mutex);
|
mutex_unlock(&device->chan_mutex);
|
||||||
err_out:
|
err_free_dev:
|
||||||
free_percpu(chan->local);
|
|
||||||
kfree(chan->dev);
|
kfree(chan->dev);
|
||||||
|
err_free_local:
|
||||||
|
free_percpu(chan->local);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,7 +271,7 @@ int idxd_wq_map_portal(struct idxd_wq *wq)
|
|||||||
resource_size_t start;
|
resource_size_t start;
|
||||||
|
|
||||||
start = pci_resource_start(pdev, IDXD_WQ_BAR);
|
start = pci_resource_start(pdev, IDXD_WQ_BAR);
|
||||||
start = start + wq->id * IDXD_PORTAL_SIZE;
|
start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
|
||||||
|
|
||||||
wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
|
wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
|
||||||
if (!wq->dportal)
|
if (!wq->dportal)
|
||||||
@ -295,7 +295,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
|||||||
int i, wq_offset;
|
int i, wq_offset;
|
||||||
|
|
||||||
lockdep_assert_held(&idxd->dev_lock);
|
lockdep_assert_held(&idxd->dev_lock);
|
||||||
memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
|
memset(wq->wqcfg, 0, idxd->wqcfg_size);
|
||||||
wq->type = IDXD_WQT_NONE;
|
wq->type = IDXD_WQT_NONE;
|
||||||
wq->size = 0;
|
wq->size = 0;
|
||||||
wq->group = NULL;
|
wq->group = NULL;
|
||||||
@ -304,8 +304,8 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
|||||||
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||||
memset(wq->name, 0, WQ_NAME_SIZE);
|
memset(wq->name, 0, WQ_NAME_SIZE);
|
||||||
|
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
||||||
wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
|
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
||||||
iowrite32(0, idxd->reg_base + wq_offset);
|
iowrite32(0, idxd->reg_base + wq_offset);
|
||||||
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
|
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
|
||||||
wq->id, i, wq_offset,
|
wq->id, i, wq_offset,
|
||||||
@ -539,10 +539,10 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||||||
if (!wq->group)
|
if (!wq->group)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
memset(&wq->wqcfg, 0, sizeof(union wqcfg));
|
memset(wq->wqcfg, 0, idxd->wqcfg_size);
|
||||||
|
|
||||||
/* byte 0-3 */
|
/* byte 0-3 */
|
||||||
wq->wqcfg.wq_size = wq->size;
|
wq->wqcfg->wq_size = wq->size;
|
||||||
|
|
||||||
if (wq->size == 0) {
|
if (wq->size == 0) {
|
||||||
dev_warn(dev, "Incorrect work queue size: 0\n");
|
dev_warn(dev, "Incorrect work queue size: 0\n");
|
||||||
@ -550,22 +550,21 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* bytes 4-7 */
|
/* bytes 4-7 */
|
||||||
wq->wqcfg.wq_thresh = wq->threshold;
|
wq->wqcfg->wq_thresh = wq->threshold;
|
||||||
|
|
||||||
/* byte 8-11 */
|
/* byte 8-11 */
|
||||||
wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
|
wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
|
||||||
wq->wqcfg.mode = 1;
|
wq->wqcfg->mode = 1;
|
||||||
|
wq->wqcfg->priority = wq->priority;
|
||||||
wq->wqcfg.priority = wq->priority;
|
|
||||||
|
|
||||||
/* bytes 12-15 */
|
/* bytes 12-15 */
|
||||||
wq->wqcfg.max_xfer_shift = ilog2(wq->max_xfer_bytes);
|
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
|
||||||
wq->wqcfg.max_batch_shift = ilog2(wq->max_batch_size);
|
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
|
||||||
|
|
||||||
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
|
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
||||||
wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
|
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
||||||
iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
|
iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
|
||||||
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
|
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
|
||||||
wq->id, i, wq_offset,
|
wq->id, i, wq_offset,
|
||||||
ioread32(idxd->reg_base + wq_offset));
|
ioread32(idxd->reg_base + wq_offset));
|
||||||
|
@ -103,7 +103,7 @@ struct idxd_wq {
|
|||||||
u32 priority;
|
u32 priority;
|
||||||
enum idxd_wq_state state;
|
enum idxd_wq_state state;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
union wqcfg wqcfg;
|
union wqcfg *wqcfg;
|
||||||
u32 vec_ptr; /* interrupt steering */
|
u32 vec_ptr; /* interrupt steering */
|
||||||
struct dsa_hw_desc **hw_descs;
|
struct dsa_hw_desc **hw_descs;
|
||||||
int num_descs;
|
int num_descs;
|
||||||
@ -183,6 +183,7 @@ struct idxd_device {
|
|||||||
int max_wq_size;
|
int max_wq_size;
|
||||||
int token_limit;
|
int token_limit;
|
||||||
int nr_tokens; /* non-reserved tokens */
|
int nr_tokens; /* non-reserved tokens */
|
||||||
|
unsigned int wqcfg_size;
|
||||||
|
|
||||||
union sw_err_reg sw_err;
|
union sw_err_reg sw_err;
|
||||||
wait_queue_head_t cmd_waitq;
|
wait_queue_head_t cmd_waitq;
|
||||||
|
@ -178,6 +178,9 @@ static int idxd_setup_internals(struct idxd_device *idxd)
|
|||||||
wq->idxd_cdev.minor = -1;
|
wq->idxd_cdev.minor = -1;
|
||||||
wq->max_xfer_bytes = idxd->max_xfer_bytes;
|
wq->max_xfer_bytes = idxd->max_xfer_bytes;
|
||||||
wq->max_batch_size = idxd->max_batch_size;
|
wq->max_batch_size = idxd->max_batch_size;
|
||||||
|
wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
|
||||||
|
if (!wq->wqcfg)
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < idxd->max_engines; i++) {
|
for (i = 0; i < idxd->max_engines; i++) {
|
||||||
@ -251,6 +254,8 @@ static void idxd_read_caps(struct idxd_device *idxd)
|
|||||||
dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
|
dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
|
||||||
idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
|
idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
|
||||||
dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
|
dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
|
||||||
|
idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
|
||||||
|
dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
|
||||||
|
|
||||||
/* reading operation capabilities */
|
/* reading operation capabilities */
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
#define IDXD_MMIO_BAR 0
|
#define IDXD_MMIO_BAR 0
|
||||||
#define IDXD_WQ_BAR 2
|
#define IDXD_WQ_BAR 2
|
||||||
#define IDXD_PORTAL_SIZE 0x4000
|
#define IDXD_PORTAL_SIZE PAGE_SIZE
|
||||||
|
|
||||||
/* MMIO Device BAR0 Registers */
|
/* MMIO Device BAR0 Registers */
|
||||||
#define IDXD_VER_OFFSET 0x00
|
#define IDXD_VER_OFFSET 0x00
|
||||||
@ -43,7 +43,8 @@ union wq_cap_reg {
|
|||||||
struct {
|
struct {
|
||||||
u64 total_wq_size:16;
|
u64 total_wq_size:16;
|
||||||
u64 num_wqs:8;
|
u64 num_wqs:8;
|
||||||
u64 rsvd:24;
|
u64 wqcfg_size:4;
|
||||||
|
u64 rsvd:20;
|
||||||
u64 shared_mode:1;
|
u64 shared_mode:1;
|
||||||
u64 dedicated_mode:1;
|
u64 dedicated_mode:1;
|
||||||
u64 rsvd2:1;
|
u64 rsvd2:1;
|
||||||
@ -55,6 +56,7 @@ union wq_cap_reg {
|
|||||||
u64 bits;
|
u64 bits;
|
||||||
} __packed;
|
} __packed;
|
||||||
#define IDXD_WQCAP_OFFSET 0x20
|
#define IDXD_WQCAP_OFFSET 0x20
|
||||||
|
#define IDXD_WQCFG_MIN 5
|
||||||
|
|
||||||
union group_cap_reg {
|
union group_cap_reg {
|
||||||
struct {
|
struct {
|
||||||
@ -333,4 +335,23 @@ union wqcfg {
|
|||||||
};
|
};
|
||||||
u32 bits[8];
|
u32 bits[8];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This macro calculates the offset into the WQCFG register
|
||||||
|
* idxd - struct idxd *
|
||||||
|
* n - wq id
|
||||||
|
* ofs - the index of the 32b dword for the config register
|
||||||
|
*
|
||||||
|
* The WQCFG register block is divided into groups per each wq. The n index
|
||||||
|
* allows us to move to the register group that's for that particular wq.
|
||||||
|
* Each register is 32bits. The ofs gives us the number of register to access.
|
||||||
|
*/
|
||||||
|
#define WQCFG_OFFSET(_idxd_dev, n, ofs) \
|
||||||
|
({\
|
||||||
|
typeof(_idxd_dev) __idxd_dev = (_idxd_dev); \
|
||||||
|
(__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -74,7 +74,7 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
|
|||||||
if (idxd->state != IDXD_DEV_ENABLED)
|
if (idxd->state != IDXD_DEV_ENABLED)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
|
portal = wq->dportal;
|
||||||
/*
|
/*
|
||||||
* The wmb() flushes writes to coherent DMA data before possibly
|
* The wmb() flushes writes to coherent DMA data before possibly
|
||||||
* triggering a DMA read. The wmb() is necessary even on UP because
|
* triggering a DMA read. The wmb() is necessary even on UP because
|
||||||
|
@ -40,16 +40,6 @@
|
|||||||
#define DCA2_TAG_MAP_BYTE3 0x82
|
#define DCA2_TAG_MAP_BYTE3 0x82
|
||||||
#define DCA2_TAG_MAP_BYTE4 0x82
|
#define DCA2_TAG_MAP_BYTE4 0x82
|
||||||
|
|
||||||
/* verify if tag map matches expected values */
|
|
||||||
static inline int dca2_tag_map_valid(u8 *tag_map)
|
|
||||||
{
|
|
||||||
return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
|
|
||||||
(tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
|
|
||||||
(tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
|
|
||||||
(tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
|
|
||||||
(tag_map[4] == DCA2_TAG_MAP_BYTE4));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* "Legacy" DCA systems do not implement the DCA register set in the
|
* "Legacy" DCA systems do not implement the DCA register set in the
|
||||||
* I/OAT device. Software needs direct support for their tag mappings.
|
* I/OAT device. Software needs direct support for their tag mappings.
|
||||||
|
@ -2799,7 +2799,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
|
|||||||
* If burst size is smaller than bus width then make sure we only
|
* If burst size is smaller than bus width then make sure we only
|
||||||
* transfer one at a time to avoid a burst stradling an MFIFO entry.
|
* transfer one at a time to avoid a burst stradling an MFIFO entry.
|
||||||
*/
|
*/
|
||||||
if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
|
if (burst * 8 < pl330->pcfg.data_bus_width)
|
||||||
desc->rqcfg.brst_len = 1;
|
desc->rqcfg.brst_len = 1;
|
||||||
|
|
||||||
desc->bytes_requested = len;
|
desc->bytes_requested = len;
|
||||||
|
@ -83,7 +83,7 @@ EXPORT_SYMBOL(xudma_rflow_is_gp);
|
|||||||
#define XUDMA_GET_PUT_RESOURCE(res) \
|
#define XUDMA_GET_PUT_RESOURCE(res) \
|
||||||
struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \
|
struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \
|
||||||
{ \
|
{ \
|
||||||
return __udma_reserve_##res(ud, false, id); \
|
return __udma_reserve_##res(ud, UDMA_TP_NORMAL, id); \
|
||||||
} \
|
} \
|
||||||
EXPORT_SYMBOL(xudma_##res##_get); \
|
EXPORT_SYMBOL(xudma_##res##_get); \
|
||||||
\
|
\
|
||||||
|
@ -1522,29 +1522,38 @@ static void omap_dma_free(struct omap_dmadev *od)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Currently used by omap2 & 3 to block deeper SoC idle states */
|
||||||
|
static bool omap_dma_busy(struct omap_dmadev *od)
|
||||||
|
{
|
||||||
|
struct omap_chan *c;
|
||||||
|
int lch = -1;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
|
||||||
|
if (lch >= od->lch_count)
|
||||||
|
break;
|
||||||
|
c = od->lch_map[lch];
|
||||||
|
if (!c)
|
||||||
|
continue;
|
||||||
|
if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
|
/* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
|
||||||
static int omap_dma_busy_notifier(struct notifier_block *nb,
|
static int omap_dma_busy_notifier(struct notifier_block *nb,
|
||||||
unsigned long cmd, void *v)
|
unsigned long cmd, void *v)
|
||||||
{
|
{
|
||||||
struct omap_dmadev *od;
|
struct omap_dmadev *od;
|
||||||
struct omap_chan *c;
|
|
||||||
int lch = -1;
|
|
||||||
|
|
||||||
od = container_of(nb, struct omap_dmadev, nb);
|
od = container_of(nb, struct omap_dmadev, nb);
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case CPU_CLUSTER_PM_ENTER:
|
case CPU_CLUSTER_PM_ENTER:
|
||||||
while (1) {
|
if (omap_dma_busy(od))
|
||||||
lch = find_next_bit(od->lch_bitmap, od->lch_count,
|
return NOTIFY_BAD;
|
||||||
lch + 1);
|
|
||||||
if (lch >= od->lch_count)
|
|
||||||
break;
|
|
||||||
c = od->lch_map[lch];
|
|
||||||
if (!c)
|
|
||||||
continue;
|
|
||||||
if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
|
|
||||||
return NOTIFY_BAD;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case CPU_CLUSTER_PM_ENTER_FAILED:
|
case CPU_CLUSTER_PM_ENTER_FAILED:
|
||||||
case CPU_CLUSTER_PM_EXIT:
|
case CPU_CLUSTER_PM_EXIT:
|
||||||
@ -1595,6 +1604,8 @@ static int omap_dma_context_notifier(struct notifier_block *nb,
|
|||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case CPU_CLUSTER_PM_ENTER:
|
case CPU_CLUSTER_PM_ENTER:
|
||||||
|
if (omap_dma_busy(od))
|
||||||
|
return NOTIFY_BAD;
|
||||||
omap_dma_context_save(od);
|
omap_dma_context_save(od);
|
||||||
break;
|
break;
|
||||||
case CPU_CLUSTER_PM_ENTER_FAILED:
|
case CPU_CLUSTER_PM_ENTER_FAILED:
|
||||||
|
@ -517,8 +517,8 @@ struct xilinx_dma_device {
|
|||||||
#define to_dma_tx_descriptor(tx) \
|
#define to_dma_tx_descriptor(tx) \
|
||||||
container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
|
container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
|
||||||
#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
|
#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
|
||||||
readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
|
readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
|
||||||
cond, delay_us, timeout_us)
|
val, cond, delay_us, timeout_us)
|
||||||
|
|
||||||
/* IO accessors */
|
/* IO accessors */
|
||||||
static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
|
static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
|
||||||
@ -948,8 +948,10 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
|
|||||||
{
|
{
|
||||||
struct xilinx_cdma_tx_segment *cdma_seg;
|
struct xilinx_cdma_tx_segment *cdma_seg;
|
||||||
struct xilinx_axidma_tx_segment *axidma_seg;
|
struct xilinx_axidma_tx_segment *axidma_seg;
|
||||||
|
struct xilinx_aximcdma_tx_segment *aximcdma_seg;
|
||||||
struct xilinx_cdma_desc_hw *cdma_hw;
|
struct xilinx_cdma_desc_hw *cdma_hw;
|
||||||
struct xilinx_axidma_desc_hw *axidma_hw;
|
struct xilinx_axidma_desc_hw *axidma_hw;
|
||||||
|
struct xilinx_aximcdma_desc_hw *aximcdma_hw;
|
||||||
struct list_head *entry;
|
struct list_head *entry;
|
||||||
u32 residue = 0;
|
u32 residue = 0;
|
||||||
|
|
||||||
@ -961,13 +963,23 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
|
|||||||
cdma_hw = &cdma_seg->hw;
|
cdma_hw = &cdma_seg->hw;
|
||||||
residue += (cdma_hw->control - cdma_hw->status) &
|
residue += (cdma_hw->control - cdma_hw->status) &
|
||||||
chan->xdev->max_buffer_len;
|
chan->xdev->max_buffer_len;
|
||||||
} else {
|
} else if (chan->xdev->dma_config->dmatype ==
|
||||||
|
XDMA_TYPE_AXIDMA) {
|
||||||
axidma_seg = list_entry(entry,
|
axidma_seg = list_entry(entry,
|
||||||
struct xilinx_axidma_tx_segment,
|
struct xilinx_axidma_tx_segment,
|
||||||
node);
|
node);
|
||||||
axidma_hw = &axidma_seg->hw;
|
axidma_hw = &axidma_seg->hw;
|
||||||
residue += (axidma_hw->control - axidma_hw->status) &
|
residue += (axidma_hw->control - axidma_hw->status) &
|
||||||
chan->xdev->max_buffer_len;
|
chan->xdev->max_buffer_len;
|
||||||
|
} else {
|
||||||
|
aximcdma_seg =
|
||||||
|
list_entry(entry,
|
||||||
|
struct xilinx_aximcdma_tx_segment,
|
||||||
|
node);
|
||||||
|
aximcdma_hw = &aximcdma_seg->hw;
|
||||||
|
residue +=
|
||||||
|
(aximcdma_hw->control - aximcdma_hw->status) &
|
||||||
|
chan->xdev->max_buffer_len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1135,7 +1147,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
|
|||||||
upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
|
upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
|
||||||
((i + 1) % XILINX_DMA_NUM_DESCS));
|
((i + 1) % XILINX_DMA_NUM_DESCS));
|
||||||
chan->seg_mv[i].phys = chan->seg_p +
|
chan->seg_mv[i].phys = chan->seg_p +
|
||||||
sizeof(*chan->seg_v) * i;
|
sizeof(*chan->seg_mv) * i;
|
||||||
list_add_tail(&chan->seg_mv[i].node,
|
list_add_tail(&chan->seg_mv[i].node,
|
||||||
&chan->free_seg_list);
|
&chan->free_seg_list);
|
||||||
}
|
}
|
||||||
@ -1560,7 +1572,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
|
|||||||
static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
|
static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
|
struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
|
||||||
struct xilinx_axidma_tx_segment *tail_segment;
|
struct xilinx_aximcdma_tx_segment *tail_segment;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1582,7 +1594,7 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
|
|||||||
tail_desc = list_last_entry(&chan->pending_list,
|
tail_desc = list_last_entry(&chan->pending_list,
|
||||||
struct xilinx_dma_tx_descriptor, node);
|
struct xilinx_dma_tx_descriptor, node);
|
||||||
tail_segment = list_last_entry(&tail_desc->segments,
|
tail_segment = list_last_entry(&tail_desc->segments,
|
||||||
struct xilinx_axidma_tx_segment, node);
|
struct xilinx_aximcdma_tx_segment, node);
|
||||||
|
|
||||||
reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
|
reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
|
||||||
|
|
||||||
@ -1864,6 +1876,7 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
|
|||||||
struct xilinx_vdma_tx_segment *tail_segment;
|
struct xilinx_vdma_tx_segment *tail_segment;
|
||||||
struct xilinx_dma_tx_descriptor *tail_desc;
|
struct xilinx_dma_tx_descriptor *tail_desc;
|
||||||
struct xilinx_axidma_tx_segment *axidma_tail_segment;
|
struct xilinx_axidma_tx_segment *axidma_tail_segment;
|
||||||
|
struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
|
||||||
struct xilinx_cdma_tx_segment *cdma_tail_segment;
|
struct xilinx_cdma_tx_segment *cdma_tail_segment;
|
||||||
|
|
||||||
if (list_empty(&chan->pending_list))
|
if (list_empty(&chan->pending_list))
|
||||||
@ -1885,11 +1898,17 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
|
|||||||
struct xilinx_cdma_tx_segment,
|
struct xilinx_cdma_tx_segment,
|
||||||
node);
|
node);
|
||||||
cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
|
cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
|
||||||
} else {
|
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
|
||||||
axidma_tail_segment = list_last_entry(&tail_desc->segments,
|
axidma_tail_segment = list_last_entry(&tail_desc->segments,
|
||||||
struct xilinx_axidma_tx_segment,
|
struct xilinx_axidma_tx_segment,
|
||||||
node);
|
node);
|
||||||
axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
|
axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
|
||||||
|
} else {
|
||||||
|
aximcdma_tail_segment =
|
||||||
|
list_last_entry(&tail_desc->segments,
|
||||||
|
struct xilinx_aximcdma_tx_segment,
|
||||||
|
node);
|
||||||
|
aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2836,10 +2855,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
|||||||
chan->stop_transfer = xilinx_dma_stop_transfer;
|
chan->stop_transfer = xilinx_dma_stop_transfer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check if SG is enabled (only for AXIDMA and CDMA) */
|
/* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
|
||||||
if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
|
if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
|
||||||
if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
|
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
|
||||||
XILINX_DMA_DMASR_SG_MASK)
|
dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
|
||||||
|
XILINX_DMA_DMASR_SG_MASK)
|
||||||
chan->has_sg = true;
|
chan->has_sg = true;
|
||||||
dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
|
dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
|
||||||
chan->has_sg ? "enabled" : "disabled");
|
chan->has_sg ? "enabled" : "disabled");
|
||||||
|
@ -20,12 +20,28 @@
|
|||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/hashtable.h>
|
||||||
|
|
||||||
#include <linux/firmware/xlnx-zynqmp.h>
|
#include <linux/firmware/xlnx-zynqmp.h>
|
||||||
#include "zynqmp-debug.h"
|
#include "zynqmp-debug.h"
|
||||||
|
|
||||||
|
/* Max HashMap Order for PM API feature check (1<<7 = 128) */
|
||||||
|
#define PM_API_FEATURE_CHECK_MAX_ORDER 7
|
||||||
|
|
||||||
static bool feature_check_enabled;
|
static bool feature_check_enabled;
|
||||||
static u32 zynqmp_pm_features[PM_API_MAX];
|
DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct pm_api_feature_data - PM API Feature data
|
||||||
|
* @pm_api_id: PM API Id, used as key to index into hashmap
|
||||||
|
* @feature_status: status of PM API feature: valid, invalid
|
||||||
|
* @hentry: hlist_node that hooks this entry into hashtable
|
||||||
|
*/
|
||||||
|
struct pm_api_feature_data {
|
||||||
|
u32 pm_api_id;
|
||||||
|
int feature_status;
|
||||||
|
struct hlist_node hentry;
|
||||||
|
};
|
||||||
|
|
||||||
static const struct mfd_cell firmware_devs[] = {
|
static const struct mfd_cell firmware_devs[] = {
|
||||||
{
|
{
|
||||||
@ -142,29 +158,37 @@ static int zynqmp_pm_feature(u32 api_id)
|
|||||||
int ret;
|
int ret;
|
||||||
u32 ret_payload[PAYLOAD_ARG_CNT];
|
u32 ret_payload[PAYLOAD_ARG_CNT];
|
||||||
u64 smc_arg[2];
|
u64 smc_arg[2];
|
||||||
|
struct pm_api_feature_data *feature_data;
|
||||||
|
|
||||||
if (!feature_check_enabled)
|
if (!feature_check_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Return value if feature is already checked */
|
/* Check for existing entry in hash table for given api */
|
||||||
if (api_id > ARRAY_SIZE(zynqmp_pm_features))
|
hash_for_each_possible(pm_api_features_map, feature_data, hentry,
|
||||||
return PM_FEATURE_INVALID;
|
api_id) {
|
||||||
|
if (feature_data->pm_api_id == api_id)
|
||||||
|
return feature_data->feature_status;
|
||||||
|
}
|
||||||
|
|
||||||
if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
|
/* Add new entry if not present */
|
||||||
return zynqmp_pm_features[api_id];
|
feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
|
||||||
|
if (!feature_data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
feature_data->pm_api_id = api_id;
|
||||||
smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
|
smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
|
||||||
smc_arg[1] = api_id;
|
smc_arg[1] = api_id;
|
||||||
|
|
||||||
ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
|
ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
|
||||||
if (ret) {
|
if (ret)
|
||||||
zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
|
ret = -EOPNOTSUPP;
|
||||||
return PM_FEATURE_INVALID;
|
else
|
||||||
}
|
ret = ret_payload[1];
|
||||||
|
|
||||||
zynqmp_pm_features[api_id] = ret_payload[1];
|
feature_data->feature_status = ret;
|
||||||
|
hash_add(pm_api_features_map, &feature_data->hentry, api_id);
|
||||||
|
|
||||||
return zynqmp_pm_features[api_id];
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -200,9 +224,12 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
|
|||||||
* Make sure to stay in x0 register
|
* Make sure to stay in x0 register
|
||||||
*/
|
*/
|
||||||
u64 smc_arg[4];
|
u64 smc_arg[4];
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
|
/* Check if feature is supported or not */
|
||||||
return -ENOTSUPP;
|
ret = zynqmp_pm_feature(pm_api_id);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
smc_arg[0] = PM_SIP_SVC | pm_api_id;
|
smc_arg[0] = PM_SIP_SVC | pm_api_id;
|
||||||
smc_arg[1] = ((u64)arg1 << 32) | arg0;
|
smc_arg[1] = ((u64)arg1 << 32) | arg0;
|
||||||
@ -615,7 +642,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
|
|||||||
*/
|
*/
|
||||||
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
|
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
|
||||||
{
|
{
|
||||||
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
|
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
|
||||||
type, 0, NULL);
|
type, 0, NULL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
|
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
|
||||||
@ -1252,9 +1279,17 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
static int zynqmp_firmware_remove(struct platform_device *pdev)
|
static int zynqmp_firmware_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
|
struct pm_api_feature_data *feature_data;
|
||||||
|
int i;
|
||||||
|
|
||||||
mfd_remove_devices(&pdev->dev);
|
mfd_remove_devices(&pdev->dev);
|
||||||
zynqmp_pm_api_debugfs_exit();
|
zynqmp_pm_api_debugfs_exit();
|
||||||
|
|
||||||
|
hash_for_each(pm_api_features_map, i, feature_data, hentry) {
|
||||||
|
hash_del(&feature_data->hentry);
|
||||||
|
kfree(feature_data);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4852,7 +4852,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
|
|||||||
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
|
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
|
|
||||||
if (ras && ras->supported)
|
if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
|
||||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
|
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
|
||||||
|
|
||||||
return amdgpu_dpm_baco_enter(adev);
|
return amdgpu_dpm_baco_enter(adev);
|
||||||
@ -4871,7 +4871,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (ras && ras->supported)
|
if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
|
||||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
|
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1055,10 +1055,10 @@ static const struct pci_device_id pciidlist[] = {
|
|||||||
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
|
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
|
||||||
{0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
|
{0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
|
||||||
/* Arcturus */
|
/* Arcturus */
|
||||||
{0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
|
||||||
{0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
|
||||||
{0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
|
||||||
{0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
|
||||||
/* Navi10 */
|
/* Navi10 */
|
||||||
{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||||
{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||||
|
@ -69,10 +69,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
|
|||||||
|
|
||||||
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
|
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
|
||||||
unsigned int type,
|
unsigned int type,
|
||||||
uint64_t size)
|
uint64_t size_in_page)
|
||||||
{
|
{
|
||||||
return ttm_range_man_init(&adev->mman.bdev, type,
|
return ttm_range_man_init(&adev->mman.bdev, type,
|
||||||
false, size >> PAGE_SHIFT);
|
false, size_in_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -67,6 +67,7 @@ struct amdgpu_uvd {
|
|||||||
unsigned harvest_config;
|
unsigned harvest_config;
|
||||||
/* store image width to adjust nb memory state */
|
/* store image width to adjust nb memory state */
|
||||||
unsigned decode_image_width;
|
unsigned decode_image_width;
|
||||||
|
uint32_t keyselect;
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
||||||
|
@ -3105,6 +3105,8 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
|
|||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
||||||
|
SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
|
||||||
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||||
|
@ -277,15 +277,8 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
|
|||||||
*/
|
*/
|
||||||
static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
|
static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
void *ptr;
|
int i;
|
||||||
uint32_t ucode_len, i;
|
uint32_t keysel = adev->uvd.keyselect;
|
||||||
uint32_t keysel;
|
|
||||||
|
|
||||||
ptr = adev->uvd.inst[0].cpu_addr;
|
|
||||||
ptr += 192 + 16;
|
|
||||||
memcpy(&ucode_len, ptr, 4);
|
|
||||||
ptr += ucode_len;
|
|
||||||
memcpy(&keysel, ptr, 4);
|
|
||||||
|
|
||||||
WREG32(mmUVD_FW_START, keysel);
|
WREG32(mmUVD_FW_START, keysel);
|
||||||
|
|
||||||
@ -550,6 +543,8 @@ static int uvd_v3_1_sw_init(void *handle)
|
|||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
int r;
|
int r;
|
||||||
|
void *ptr;
|
||||||
|
uint32_t ucode_len;
|
||||||
|
|
||||||
/* UVD TRAP */
|
/* UVD TRAP */
|
||||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
|
||||||
@ -571,6 +566,13 @@ static int uvd_v3_1_sw_init(void *handle)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
/* Retrieval firmware validate key */
|
||||||
|
ptr = adev->uvd.inst[0].cpu_addr;
|
||||||
|
ptr += 192 + 16;
|
||||||
|
memcpy(&ucode_len, ptr, 4);
|
||||||
|
ptr += ucode_len;
|
||||||
|
memcpy(&adev->uvd.keyselect, ptr, 4);
|
||||||
|
|
||||||
r = amdgpu_uvd_entity_init(adev);
|
r = amdgpu_uvd_entity_init(adev);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
@ -1041,7 +1041,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||||||
amdgpu_dm_init_color_mod();
|
amdgpu_dm_init_color_mod();
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||||
if (adev->asic_type >= CHIP_RAVEN) {
|
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
|
||||||
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
|
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
|
||||||
|
|
||||||
if (!adev->dm.hdcp_workqueue)
|
if (!adev->dm.hdcp_workqueue)
|
||||||
@ -7506,7 +7506,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||||||
bool mode_set_reset_required = false;
|
bool mode_set_reset_required = false;
|
||||||
|
|
||||||
drm_atomic_helper_update_legacy_modeset_state(dev, state);
|
drm_atomic_helper_update_legacy_modeset_state(dev, state);
|
||||||
drm_atomic_helper_calc_timestamping_constants(state);
|
|
||||||
|
|
||||||
dm_state = dm_atomic_get_new_state(state);
|
dm_state = dm_atomic_get_new_state(state);
|
||||||
if (dm_state && dm_state->context) {
|
if (dm_state && dm_state->context) {
|
||||||
@ -7533,6 +7532,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_atomic_helper_calc_timestamping_constants(state);
|
||||||
|
|
||||||
/* update changed items */
|
/* update changed items */
|
||||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||||
|
@ -299,8 +299,8 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
|
|||||||
pflip_int_entry(1),
|
pflip_int_entry(1),
|
||||||
pflip_int_entry(2),
|
pflip_int_entry(2),
|
||||||
pflip_int_entry(3),
|
pflip_int_entry(3),
|
||||||
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
|
pflip_int_entry(4),
|
||||||
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
|
pflip_int_entry(5),
|
||||||
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
|
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
|
||||||
gpio_pad_int_entry(0),
|
gpio_pad_int_entry(0),
|
||||||
gpio_pad_int_entry(1),
|
gpio_pad_int_entry(1),
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user