mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Merge branch 'topic/control-lookup-rwlock' into for-next
Pull control lookup optimization changes. Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
commit
4004f3029e
@ -4798,11 +4798,9 @@
|
|||||||
|
|
||||||
profile= [KNL] Enable kernel profiling via /proc/profile
|
profile= [KNL] Enable kernel profiling via /proc/profile
|
||||||
Format: [<profiletype>,]<number>
|
Format: [<profiletype>,]<number>
|
||||||
Param: <profiletype>: "schedule", "sleep", or "kvm"
|
Param: <profiletype>: "schedule" or "kvm"
|
||||||
[defaults to kernel profiling]
|
[defaults to kernel profiling]
|
||||||
Param: "schedule" - profile schedule points.
|
Param: "schedule" - profile schedule points.
|
||||||
Param: "sleep" - profile D-state sleeping (millisecs).
|
|
||||||
Requires CONFIG_SCHEDSTATS
|
|
||||||
Param: "kvm" - profile VM exits.
|
Param: "kvm" - profile VM exits.
|
||||||
Param: <number> - step/bucket size as a power of 2 for
|
Param: <number> - step/bucket size as a power of 2 for
|
||||||
statistical time based profiling.
|
statistical time based profiling.
|
||||||
|
@ -122,10 +122,18 @@ stable kernels.
|
|||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A76 | #1490853 | N/A |
|
| ARM | Cortex-A76 | #1490853 | N/A |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A77 | #1491015 | N/A |
|
| ARM | Cortex-A77 | #1491015 | N/A |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
|
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
|
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
|
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
|
||||||
@ -138,8 +146,14 @@ stable kernels.
|
|||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
|
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-X1 | #1502854 | N/A |
|
| ARM | Cortex-X1 | #1502854 | N/A |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
|
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
|
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
|
||||||
@ -160,6 +174,8 @@ stable kernels.
|
|||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
|
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 |
|
| ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
|
| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
|
||||||
@ -170,6 +186,8 @@ stable kernels.
|
|||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Neoverse-V1 | #1619801 | N/A |
|
| ARM | Neoverse-V1 | #1619801 | N/A |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
|
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
|
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
|
||||||
|
@ -35,6 +35,9 @@ properties:
|
|||||||
ports-implemented:
|
ports-implemented:
|
||||||
const: 1
|
const: 1
|
||||||
|
|
||||||
|
power-domains:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
sata-port@0:
|
sata-port@0:
|
||||||
$ref: /schemas/ata/snps,dwc-ahci-common.yaml#/$defs/dwc-ahci-port
|
$ref: /schemas/ata/snps,dwc-ahci-common.yaml#/$defs/dwc-ahci-port
|
||||||
|
|
||||||
|
@ -199,10 +199,11 @@ additionalProperties: false
|
|||||||
|
|
||||||
examples:
|
examples:
|
||||||
- |
|
- |
|
||||||
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
codec@1,0{
|
codec@1,0{
|
||||||
compatible = "slim217,250";
|
compatible = "slim217,250";
|
||||||
reg = <1 0>;
|
reg = <1 0>;
|
||||||
reset-gpios = <&tlmm 64 0>;
|
reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>;
|
||||||
slim-ifc-dev = <&wcd9340_ifd>;
|
slim-ifc-dev = <&wcd9340_ifd>;
|
||||||
#sound-dai-cells = <1>;
|
#sound-dai-cells = <1>;
|
||||||
interrupt-parent = <&tlmm>;
|
interrupt-parent = <&tlmm>;
|
||||||
|
@ -42,7 +42,7 @@ examples:
|
|||||||
pinctrl-names = "default", "sleep";
|
pinctrl-names = "default", "sleep";
|
||||||
pinctrl-0 = <&wcd_reset_n>;
|
pinctrl-0 = <&wcd_reset_n>;
|
||||||
pinctrl-1 = <&wcd_reset_n_sleep>;
|
pinctrl-1 = <&wcd_reset_n_sleep>;
|
||||||
reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>;
|
reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>;
|
||||||
vdd-buck-supply = <&vreg_l17b_1p8>;
|
vdd-buck-supply = <&vreg_l17b_1p8>;
|
||||||
vdd-rxtx-supply = <&vreg_l18b_1p8>;
|
vdd-rxtx-supply = <&vreg_l18b_1p8>;
|
||||||
vdd-px-supply = <&vreg_l18b_1p8>;
|
vdd-px-supply = <&vreg_l18b_1p8>;
|
||||||
|
@ -34,9 +34,10 @@ unevaluatedProperties: false
|
|||||||
|
|
||||||
examples:
|
examples:
|
||||||
- |
|
- |
|
||||||
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
codec {
|
codec {
|
||||||
compatible = "qcom,wcd9380-codec";
|
compatible = "qcom,wcd9380-codec";
|
||||||
reset-gpios = <&tlmm 32 0>;
|
reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
|
||||||
#sound-dai-cells = <1>;
|
#sound-dai-cells = <1>;
|
||||||
qcom,tx-device = <&wcd938x_tx>;
|
qcom,tx-device = <&wcd938x_tx>;
|
||||||
qcom,rx-device = <&wcd938x_rx>;
|
qcom,rx-device = <&wcd938x_rx>;
|
||||||
|
@ -52,10 +52,10 @@ unevaluatedProperties: false
|
|||||||
|
|
||||||
examples:
|
examples:
|
||||||
- |
|
- |
|
||||||
#include <dt-bindings/interrupt-controller/irq.h>
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
codec {
|
codec {
|
||||||
compatible = "qcom,wcd9390-codec";
|
compatible = "qcom,wcd9390-codec";
|
||||||
reset-gpios = <&tlmm 32 IRQ_TYPE_NONE>;
|
reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
|
||||||
#sound-dai-cells = <1>;
|
#sound-dai-cells = <1>;
|
||||||
qcom,tx-device = <&wcd939x_tx>;
|
qcom,tx-device = <&wcd939x_tx>;
|
||||||
qcom,rx-device = <&wcd939x_rx>;
|
qcom,rx-device = <&wcd939x_rx>;
|
||||||
|
@ -4,8 +4,6 @@ Generic Thermal Sysfs driver How To
|
|||||||
|
|
||||||
Written by Sujith Thomas <sujith.thomas@intel.com>, Zhang Rui <rui.zhang@intel.com>
|
Written by Sujith Thomas <sujith.thomas@intel.com>, Zhang Rui <rui.zhang@intel.com>
|
||||||
|
|
||||||
Updated: 2 January 2008
|
|
||||||
|
|
||||||
Copyright (c) 2008 Intel Corporation
|
Copyright (c) 2008 Intel Corporation
|
||||||
|
|
||||||
|
|
||||||
@ -38,23 +36,23 @@ temperature) and throttle appropriate devices.
|
|||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
struct thermal_zone_device
|
struct thermal_zone_device *
|
||||||
*thermal_zone_device_register(char *type,
|
thermal_zone_device_register_with_trips(const char *type,
|
||||||
int trips, int mask, void *devdata,
|
const struct thermal_trip *trips,
|
||||||
struct thermal_zone_device_ops *ops,
|
int num_trips, void *devdata,
|
||||||
const struct thermal_zone_params *tzp,
|
const struct thermal_zone_device_ops *ops,
|
||||||
int passive_delay, int polling_delay))
|
const struct thermal_zone_params *tzp,
|
||||||
|
unsigned int passive_delay,
|
||||||
|
unsigned int polling_delay)
|
||||||
|
|
||||||
This interface function adds a new thermal zone device (sensor) to
|
This interface function adds a new thermal zone device (sensor) to the
|
||||||
/sys/class/thermal folder as `thermal_zone[0-*]`. It tries to bind all the
|
/sys/class/thermal folder as `thermal_zone[0-*]`. It tries to bind all the
|
||||||
thermal cooling devices registered at the same time.
|
thermal cooling devices registered to it at the same time.
|
||||||
|
|
||||||
type:
|
type:
|
||||||
the thermal zone type.
|
the thermal zone type.
|
||||||
trips:
|
trips:
|
||||||
the total number of trip points this thermal zone supports.
|
the table of trip points for this thermal zone.
|
||||||
mask:
|
|
||||||
Bit string: If 'n'th bit is set, then trip point 'n' is writable.
|
|
||||||
devdata:
|
devdata:
|
||||||
device private data
|
device private data
|
||||||
ops:
|
ops:
|
||||||
@ -67,32 +65,29 @@ temperature) and throttle appropriate devices.
|
|||||||
.get_temp:
|
.get_temp:
|
||||||
get the current temperature of the thermal zone.
|
get the current temperature of the thermal zone.
|
||||||
.set_trips:
|
.set_trips:
|
||||||
set the trip points window. Whenever the current temperature
|
set the trip points window. Whenever the current temperature
|
||||||
is updated, the trip points immediately below and above the
|
is updated, the trip points immediately below and above the
|
||||||
current temperature are found.
|
current temperature are found.
|
||||||
.get_mode:
|
.change_mode:
|
||||||
get the current mode (enabled/disabled) of the thermal zone.
|
change the mode (enabled/disabled) of the thermal zone.
|
||||||
|
.set_trip_temp:
|
||||||
- "enabled" means the kernel thermal management is
|
set the temperature of a given trip point.
|
||||||
enabled.
|
.get_crit_temp:
|
||||||
- "disabled" will prevent kernel thermal driver action
|
get the critical temperature for this thermal zone.
|
||||||
upon trip points so that user applications can take
|
|
||||||
charge of thermal management.
|
|
||||||
.set_mode:
|
|
||||||
set the mode (enabled/disabled) of the thermal zone.
|
|
||||||
.get_trip_type:
|
|
||||||
get the type of certain trip point.
|
|
||||||
.get_trip_temp:
|
|
||||||
get the temperature above which the certain trip point
|
|
||||||
will be fired.
|
|
||||||
.set_emul_temp:
|
.set_emul_temp:
|
||||||
set the emulation temperature which helps in debugging
|
set the emulation temperature which helps in debugging
|
||||||
different threshold temperature points.
|
different threshold temperature points.
|
||||||
|
.get_trend:
|
||||||
|
get the trend of most recent zone temperature changes.
|
||||||
|
.hot:
|
||||||
|
hot trip point crossing handler.
|
||||||
|
.critical:
|
||||||
|
critical trip point crossing handler.
|
||||||
tzp:
|
tzp:
|
||||||
thermal zone platform parameters.
|
thermal zone platform parameters.
|
||||||
passive_delay:
|
passive_delay:
|
||||||
number of milliseconds to wait between polls when
|
number of milliseconds to wait between polls when performing passive
|
||||||
performing passive cooling.
|
cooling.
|
||||||
polling_delay:
|
polling_delay:
|
||||||
number of milliseconds to wait between polls when checking
|
number of milliseconds to wait between polls when checking
|
||||||
whether trip points have been crossed (0 for interrupt driven systems).
|
whether trip points have been crossed (0 for interrupt driven systems).
|
||||||
|
@ -1753,6 +1753,7 @@ operations:
|
|||||||
request:
|
request:
|
||||||
attributes:
|
attributes:
|
||||||
- header
|
- header
|
||||||
|
- context
|
||||||
reply:
|
reply:
|
||||||
attributes:
|
attributes:
|
||||||
- header
|
- header
|
||||||
@ -1761,7 +1762,6 @@ operations:
|
|||||||
- indir
|
- indir
|
||||||
- hkey
|
- hkey
|
||||||
- input_xfrm
|
- input_xfrm
|
||||||
dump: *rss-get-op
|
|
||||||
-
|
-
|
||||||
name: plca-get-cfg
|
name: plca-get-cfg
|
||||||
doc: Get PLCA params.
|
doc: Get PLCA params.
|
||||||
|
@ -1875,6 +1875,7 @@ Kernel response contents:
|
|||||||
|
|
||||||
===================================== ====== ==========================
|
===================================== ====== ==========================
|
||||||
``ETHTOOL_A_RSS_HEADER`` nested reply header
|
``ETHTOOL_A_RSS_HEADER`` nested reply header
|
||||||
|
``ETHTOOL_A_RSS_CONTEXT`` u32 context number
|
||||||
``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
|
``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
|
||||||
``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
|
``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
|
||||||
``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
|
``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
|
||||||
|
@ -21,9 +21,9 @@ are often referred to as greyscale formats.
|
|||||||
|
|
||||||
.. raw:: latex
|
.. raw:: latex
|
||||||
|
|
||||||
\scriptsize
|
\tiny
|
||||||
|
|
||||||
.. tabularcolumns:: |p{3.6cm}|p{3.0cm}|p{1.3cm}|p{2.6cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
|
.. tabularcolumns:: |p{3.6cm}|p{2.4cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
|
||||||
|
|
||||||
.. flat-table:: Luma-Only Image Formats
|
.. flat-table:: Luma-Only Image Formats
|
||||||
:header-rows: 1
|
:header-rows: 1
|
||||||
|
@ -6368,7 +6368,7 @@ a single guest_memfd file, but the bound ranges must not overlap).
|
|||||||
See KVM_SET_USER_MEMORY_REGION2 for additional details.
|
See KVM_SET_USER_MEMORY_REGION2 for additional details.
|
||||||
|
|
||||||
4.143 KVM_PRE_FAULT_MEMORY
|
4.143 KVM_PRE_FAULT_MEMORY
|
||||||
------------------------
|
---------------------------
|
||||||
|
|
||||||
:Capability: KVM_CAP_PRE_FAULT_MEMORY
|
:Capability: KVM_CAP_PRE_FAULT_MEMORY
|
||||||
:Architectures: none
|
:Architectures: none
|
||||||
@ -6405,6 +6405,12 @@ for the current vCPU state. KVM maps memory as if the vCPU generated a
|
|||||||
stage-2 read page fault, e.g. faults in memory as needed, but doesn't break
|
stage-2 read page fault, e.g. faults in memory as needed, but doesn't break
|
||||||
CoW. However, KVM does not mark any newly created stage-2 PTE as Accessed.
|
CoW. However, KVM does not mark any newly created stage-2 PTE as Accessed.
|
||||||
|
|
||||||
|
In the case of confidential VM types where there is an initial set up of
|
||||||
|
private guest memory before the guest is 'finalized'/measured, this ioctl
|
||||||
|
should only be issued after completing all the necessary setup to put the
|
||||||
|
guest into a 'finalized' state so that the above semantics can be reliably
|
||||||
|
ensured.
|
||||||
|
|
||||||
In some cases, multiple vCPUs might share the page tables. In this
|
In some cases, multiple vCPUs might share the page tables. In this
|
||||||
case, the ioctl can be called in parallel.
|
case, the ioctl can be called in parallel.
|
||||||
|
|
||||||
|
@ -5306,7 +5306,7 @@ F: drivers/media/cec/i2c/ch7322.c
|
|||||||
CIRRUS LOGIC AUDIO CODEC DRIVERS
|
CIRRUS LOGIC AUDIO CODEC DRIVERS
|
||||||
M: David Rhodes <david.rhodes@cirrus.com>
|
M: David Rhodes <david.rhodes@cirrus.com>
|
||||||
M: Richard Fitzgerald <rf@opensource.cirrus.com>
|
M: Richard Fitzgerald <rf@opensource.cirrus.com>
|
||||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
L: linux-sound@vger.kernel.org
|
||||||
L: patches@opensource.cirrus.com
|
L: patches@opensource.cirrus.com
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/sound/cirrus,cs*
|
F: Documentation/devicetree/bindings/sound/cirrus,cs*
|
||||||
@ -5375,7 +5375,7 @@ F: sound/soc/codecs/lochnagar-sc.c
|
|||||||
CIRRUS LOGIC MADERA CODEC DRIVERS
|
CIRRUS LOGIC MADERA CODEC DRIVERS
|
||||||
M: Charles Keepax <ckeepax@opensource.cirrus.com>
|
M: Charles Keepax <ckeepax@opensource.cirrus.com>
|
||||||
M: Richard Fitzgerald <rf@opensource.cirrus.com>
|
M: Richard Fitzgerald <rf@opensource.cirrus.com>
|
||||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
L: linux-sound@vger.kernel.org
|
||||||
L: patches@opensource.cirrus.com
|
L: patches@opensource.cirrus.com
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://github.com/CirrusLogic/linux-drivers/wiki
|
W: https://github.com/CirrusLogic/linux-drivers/wiki
|
||||||
@ -15936,6 +15936,7 @@ F: include/linux/in.h
|
|||||||
F: include/linux/indirect_call_wrapper.h
|
F: include/linux/indirect_call_wrapper.h
|
||||||
F: include/linux/net.h
|
F: include/linux/net.h
|
||||||
F: include/linux/netdevice.h
|
F: include/linux/netdevice.h
|
||||||
|
F: include/linux/skbuff.h
|
||||||
F: include/net/
|
F: include/net/
|
||||||
F: include/uapi/linux/in.h
|
F: include/uapi/linux/in.h
|
||||||
F: include/uapi/linux/net.h
|
F: include/uapi/linux/net.h
|
||||||
@ -18556,7 +18557,7 @@ F: drivers/usb/misc/qcom_eud.c
|
|||||||
QCOM IPA DRIVER
|
QCOM IPA DRIVER
|
||||||
M: Alex Elder <elder@kernel.org>
|
M: Alex Elder <elder@kernel.org>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Maintained
|
||||||
F: drivers/net/ipa/
|
F: drivers/net/ipa/
|
||||||
|
|
||||||
QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
|
QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
|
||||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 6
|
VERSION = 6
|
||||||
PATCHLEVEL = 11
|
PATCHLEVEL = 11
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc1
|
EXTRAVERSION = -rc2
|
||||||
NAME = Baby Opossum Posse
|
NAME = Baby Opossum Posse
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -534,8 +534,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
|
|||||||
|
|
||||||
#define ioread16be(p) swab16(ioread16(p))
|
#define ioread16be(p) swab16(ioread16(p))
|
||||||
#define ioread32be(p) swab32(ioread32(p))
|
#define ioread32be(p) swab32(ioread32(p))
|
||||||
|
#define ioread64be(p) swab64(ioread64(p))
|
||||||
#define iowrite16be(v,p) iowrite16(swab16(v), (p))
|
#define iowrite16be(v,p) iowrite16(swab16(v), (p))
|
||||||
#define iowrite32be(v,p) iowrite32(swab32(v), (p))
|
#define iowrite32be(v,p) iowrite32(swab32(v), (p))
|
||||||
|
#define iowrite64be(v,p) iowrite64(swab64(v), (p))
|
||||||
|
|
||||||
#define inb_p inb
|
#define inb_p inb
|
||||||
#define inw_p inw
|
#define inw_p inw
|
||||||
@ -634,8 +636,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
|
|||||||
*/
|
*/
|
||||||
#define ioread64 ioread64
|
#define ioread64 ioread64
|
||||||
#define iowrite64 iowrite64
|
#define iowrite64 iowrite64
|
||||||
#define ioread64be ioread64be
|
|
||||||
#define iowrite64be iowrite64be
|
|
||||||
#define ioread8_rep ioread8_rep
|
#define ioread8_rep ioread8_rep
|
||||||
#define ioread16_rep ioread16_rep
|
#define ioread16_rep ioread16_rep
|
||||||
#define ioread32_rep ioread32_rep
|
#define ioread32_rep ioread32_rep
|
||||||
|
@ -87,6 +87,7 @@ config ARM
|
|||||||
select HAVE_ARCH_PFN_VALID
|
select HAVE_ARCH_PFN_VALID
|
||||||
select HAVE_ARCH_SECCOMP
|
select HAVE_ARCH_SECCOMP
|
||||||
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
|
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
|
||||||
|
select HAVE_ARCH_STACKLEAK
|
||||||
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
|
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE
|
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE
|
||||||
@ -116,6 +117,7 @@ config ARM
|
|||||||
select HAVE_KERNEL_XZ
|
select HAVE_KERNEL_XZ
|
||||||
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
|
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
|
||||||
select HAVE_KRETPROBES if HAVE_KPROBES
|
select HAVE_KRETPROBES if HAVE_KPROBES
|
||||||
|
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
|
||||||
select HAVE_MOD_ARCH_SPECIFIC
|
select HAVE_MOD_ARCH_SPECIFIC
|
||||||
select HAVE_NMI
|
select HAVE_NMI
|
||||||
select HAVE_OPTPROBES if !THUMB2_KERNEL
|
select HAVE_OPTPROBES if !THUMB2_KERNEL
|
||||||
@ -736,7 +738,7 @@ config ARM_ERRATA_764319
|
|||||||
bool "ARM errata: Read to DBGPRSR and DBGOSLSR may generate Undefined instruction"
|
bool "ARM errata: Read to DBGPRSR and DBGOSLSR may generate Undefined instruction"
|
||||||
depends on CPU_V7
|
depends on CPU_V7
|
||||||
help
|
help
|
||||||
This option enables the workaround for the 764319 Cortex A-9 erratum.
|
This option enables the workaround for the 764319 Cortex-A9 erratum.
|
||||||
CP14 read accesses to the DBGPRSR and DBGOSLSR registers generate an
|
CP14 read accesses to the DBGPRSR and DBGOSLSR registers generate an
|
||||||
unexpected Undefined Instruction exception when the DBGSWENABLE
|
unexpected Undefined Instruction exception when the DBGSWENABLE
|
||||||
external pin is set to 0, even when the CP14 accesses are performed
|
external pin is set to 0, even when the CP14 accesses are performed
|
||||||
|
@ -9,6 +9,7 @@ OBJS =
|
|||||||
|
|
||||||
HEAD = head.o
|
HEAD = head.o
|
||||||
OBJS += misc.o decompress.o
|
OBJS += misc.o decompress.o
|
||||||
|
CFLAGS_decompress.o += $(DISABLE_STACKLEAK_PLUGIN)
|
||||||
ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
|
ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
|
||||||
OBJS += debug.o
|
OBJS += debug.o
|
||||||
AFLAGS_head.o += -DDEBUG
|
AFLAGS_head.o += -DDEBUG
|
||||||
|
@ -125,7 +125,7 @@ SECTIONS
|
|||||||
|
|
||||||
. = BSS_START;
|
. = BSS_START;
|
||||||
__bss_start = .;
|
__bss_start = .;
|
||||||
.bss : { *(.bss) }
|
.bss : { *(.bss .bss.*) }
|
||||||
_end = .;
|
_end = .;
|
||||||
|
|
||||||
. = ALIGN(8); /* the stack must be 64-bit aligned */
|
. = ALIGN(8); /* the stack must be 64-bit aligned */
|
||||||
|
@ -157,7 +157,7 @@
|
|||||||
clocks = <&xtal24mhz>;
|
clocks = <&xtal24mhz>;
|
||||||
};
|
};
|
||||||
|
|
||||||
pclk: clock-24000000 {
|
pclk: clock-pclk {
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
compatible = "fixed-factor-clock";
|
compatible = "fixed-factor-clock";
|
||||||
clock-div = <1>;
|
clock-div = <1>;
|
||||||
|
@ -26,6 +26,13 @@ struct stackframe {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline bool on_thread_stack(void)
|
||||||
|
{
|
||||||
|
unsigned long delta = current_stack_pointer ^ (unsigned long)current->stack;
|
||||||
|
|
||||||
|
return delta < THREAD_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline
|
static __always_inline
|
||||||
void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
|
void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
|
||||||
{
|
{
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
#define PROC_INFO \
|
#define PROC_INFO \
|
||||||
. = ALIGN(4); \
|
. = ALIGN(4); \
|
||||||
__proc_info_begin = .; \
|
__proc_info_begin = .; \
|
||||||
*(.proc.info.init) \
|
KEEP(*(.proc.info.init)) \
|
||||||
__proc_info_end = .;
|
__proc_info_end = .;
|
||||||
|
|
||||||
#define IDMAP_TEXT \
|
#define IDMAP_TEXT \
|
||||||
|
@ -1065,6 +1065,7 @@ vector_addrexcptn:
|
|||||||
.globl vector_fiq
|
.globl vector_fiq
|
||||||
|
|
||||||
.section .vectors, "ax", %progbits
|
.section .vectors, "ax", %progbits
|
||||||
|
.reloc .text, R_ARM_NONE, .
|
||||||
W(b) vector_rst
|
W(b) vector_rst
|
||||||
W(b) vector_und
|
W(b) vector_und
|
||||||
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
|
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
|
||||||
@ -1078,6 +1079,7 @@ THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
|
|||||||
|
|
||||||
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||||
.section .vectors.bhb.loop8, "ax", %progbits
|
.section .vectors.bhb.loop8, "ax", %progbits
|
||||||
|
.reloc .text, R_ARM_NONE, .
|
||||||
W(b) vector_rst
|
W(b) vector_rst
|
||||||
W(b) vector_bhb_loop8_und
|
W(b) vector_bhb_loop8_und
|
||||||
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
|
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
|
||||||
@ -1090,6 +1092,7 @@ THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
|
|||||||
W(b) vector_bhb_loop8_fiq
|
W(b) vector_bhb_loop8_fiq
|
||||||
|
|
||||||
.section .vectors.bhb.bpiall, "ax", %progbits
|
.section .vectors.bhb.bpiall, "ax", %progbits
|
||||||
|
.reloc .text, R_ARM_NONE, .
|
||||||
W(b) vector_rst
|
W(b) vector_rst
|
||||||
W(b) vector_bhb_bpiall_und
|
W(b) vector_bhb_bpiall_und
|
||||||
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
|
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
|
||||||
|
@ -119,6 +119,9 @@ no_work_pending:
|
|||||||
|
|
||||||
ct_user_enter save = 0
|
ct_user_enter save = 0
|
||||||
|
|
||||||
|
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||||
|
bl stackleak_erase_on_task_stack
|
||||||
|
#endif
|
||||||
restore_user_regs fast = 0, offset = 0
|
restore_user_regs fast = 0, offset = 0
|
||||||
ENDPROC(ret_to_user_from_irq)
|
ENDPROC(ret_to_user_from_irq)
|
||||||
ENDPROC(ret_to_user)
|
ENDPROC(ret_to_user)
|
||||||
|
@ -395,11 +395,6 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mod_unwind_map {
|
|
||||||
const Elf_Shdr *unw_sec;
|
|
||||||
const Elf_Shdr *txt_sec;
|
|
||||||
};
|
|
||||||
|
|
||||||
static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
|
static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
|
||||||
const Elf_Shdr *sechdrs, const char *name)
|
const Elf_Shdr *sechdrs, const char *name)
|
||||||
{
|
{
|
||||||
|
@ -85,8 +85,7 @@ static bool
|
|||||||
callchain_trace(void *data, unsigned long pc)
|
callchain_trace(void *data, unsigned long pc)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry_ctx *entry = data;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
perf_callchain_store(entry, pc);
|
return perf_callchain_store(entry, pc) == 0;
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -63,7 +63,7 @@ SECTIONS
|
|||||||
. = ALIGN(4);
|
. = ALIGN(4);
|
||||||
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
|
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
|
||||||
__start___ex_table = .;
|
__start___ex_table = .;
|
||||||
ARM_MMU_KEEP(*(__ex_table))
|
ARM_MMU_KEEP(KEEP(*(__ex_table)))
|
||||||
__stop___ex_table = .;
|
__stop___ex_table = .;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,7 +83,7 @@ SECTIONS
|
|||||||
}
|
}
|
||||||
.init.arch.info : {
|
.init.arch.info : {
|
||||||
__arch_info_begin = .;
|
__arch_info_begin = .;
|
||||||
*(.arch.info.init)
|
KEEP(*(.arch.info.init))
|
||||||
__arch_info_end = .;
|
__arch_info_end = .;
|
||||||
}
|
}
|
||||||
.init.tagtable : {
|
.init.tagtable : {
|
||||||
|
@ -74,7 +74,7 @@ SECTIONS
|
|||||||
. = ALIGN(4);
|
. = ALIGN(4);
|
||||||
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
|
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
|
||||||
__start___ex_table = .;
|
__start___ex_table = .;
|
||||||
ARM_MMU_KEEP(*(__ex_table))
|
ARM_MMU_KEEP(KEEP(*(__ex_table)))
|
||||||
__stop___ex_table = .;
|
__stop___ex_table = .;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ SECTIONS
|
|||||||
}
|
}
|
||||||
.init.arch.info : {
|
.init.arch.info : {
|
||||||
__arch_info_begin = .;
|
__arch_info_begin = .;
|
||||||
*(.arch.info.init)
|
KEEP(*(.arch.info.init))
|
||||||
__arch_info_end = .;
|
__arch_info_end = .;
|
||||||
}
|
}
|
||||||
.init.tagtable : {
|
.init.tagtable : {
|
||||||
@ -116,7 +116,7 @@ SECTIONS
|
|||||||
#endif
|
#endif
|
||||||
.init.pv_table : {
|
.init.pv_table : {
|
||||||
__pv_table_begin = .;
|
__pv_table_begin = .;
|
||||||
*(.pv_table)
|
KEEP(*(.pv_table))
|
||||||
__pv_table_end = .;
|
__pv_table_end = .;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr)
|
|||||||
/*
|
/*
|
||||||
* Set CPU resume address -
|
* Set CPU resume address -
|
||||||
* secure firmware running on boot will jump to this address
|
* secure firmware running on boot will jump to this address
|
||||||
* after setting proper CPU mode, and initialiing e.g. secure
|
* after setting proper CPU mode, and initializing e.g. secure
|
||||||
* regs (the same mode all CPUs are booted to - usually HYP)
|
* regs (the same mode all CPUs are booted to - usually HYP)
|
||||||
*/
|
*/
|
||||||
writel(phys_resume_addr,
|
writel(phys_resume_addr,
|
||||||
|
@ -17,7 +17,7 @@ void cpu_arm7tdmi_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm7tdmi_proc_init);
|
__ADDRESSABLE(cpu_arm7tdmi_proc_init);
|
||||||
void cpu_arm7tdmi_proc_fin(void);
|
void cpu_arm7tdmi_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm7tdmi_proc_fin);
|
__ADDRESSABLE(cpu_arm7tdmi_proc_fin);
|
||||||
void cpu_arm7tdmi_reset(void);
|
void cpu_arm7tdmi_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm7tdmi_reset);
|
__ADDRESSABLE(cpu_arm7tdmi_reset);
|
||||||
int cpu_arm7tdmi_do_idle(void);
|
int cpu_arm7tdmi_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm7tdmi_do_idle);
|
__ADDRESSABLE(cpu_arm7tdmi_do_idle);
|
||||||
@ -32,7 +32,7 @@ void cpu_arm720_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm720_proc_init);
|
__ADDRESSABLE(cpu_arm720_proc_init);
|
||||||
void cpu_arm720_proc_fin(void);
|
void cpu_arm720_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm720_proc_fin);
|
__ADDRESSABLE(cpu_arm720_proc_fin);
|
||||||
void cpu_arm720_reset(void);
|
void cpu_arm720_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm720_reset);
|
__ADDRESSABLE(cpu_arm720_reset);
|
||||||
int cpu_arm720_do_idle(void);
|
int cpu_arm720_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm720_do_idle);
|
__ADDRESSABLE(cpu_arm720_do_idle);
|
||||||
@ -49,7 +49,7 @@ void cpu_arm740_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm740_proc_init);
|
__ADDRESSABLE(cpu_arm740_proc_init);
|
||||||
void cpu_arm740_proc_fin(void);
|
void cpu_arm740_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm740_proc_fin);
|
__ADDRESSABLE(cpu_arm740_proc_fin);
|
||||||
void cpu_arm740_reset(void);
|
void cpu_arm740_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm740_reset);
|
__ADDRESSABLE(cpu_arm740_reset);
|
||||||
int cpu_arm740_do_idle(void);
|
int cpu_arm740_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm740_do_idle);
|
__ADDRESSABLE(cpu_arm740_do_idle);
|
||||||
@ -64,7 +64,7 @@ void cpu_arm9tdmi_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm9tdmi_proc_init);
|
__ADDRESSABLE(cpu_arm9tdmi_proc_init);
|
||||||
void cpu_arm9tdmi_proc_fin(void);
|
void cpu_arm9tdmi_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm9tdmi_proc_fin);
|
__ADDRESSABLE(cpu_arm9tdmi_proc_fin);
|
||||||
void cpu_arm9tdmi_reset(void);
|
void cpu_arm9tdmi_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm9tdmi_reset);
|
__ADDRESSABLE(cpu_arm9tdmi_reset);
|
||||||
int cpu_arm9tdmi_do_idle(void);
|
int cpu_arm9tdmi_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm9tdmi_do_idle);
|
__ADDRESSABLE(cpu_arm9tdmi_do_idle);
|
||||||
@ -79,7 +79,7 @@ void cpu_arm920_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm920_proc_init);
|
__ADDRESSABLE(cpu_arm920_proc_init);
|
||||||
void cpu_arm920_proc_fin(void);
|
void cpu_arm920_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm920_proc_fin);
|
__ADDRESSABLE(cpu_arm920_proc_fin);
|
||||||
void cpu_arm920_reset(void);
|
void cpu_arm920_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm920_reset);
|
__ADDRESSABLE(cpu_arm920_reset);
|
||||||
int cpu_arm920_do_idle(void);
|
int cpu_arm920_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm920_do_idle);
|
__ADDRESSABLE(cpu_arm920_do_idle);
|
||||||
@ -102,7 +102,7 @@ void cpu_arm922_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm922_proc_init);
|
__ADDRESSABLE(cpu_arm922_proc_init);
|
||||||
void cpu_arm922_proc_fin(void);
|
void cpu_arm922_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm922_proc_fin);
|
__ADDRESSABLE(cpu_arm922_proc_fin);
|
||||||
void cpu_arm922_reset(void);
|
void cpu_arm922_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm922_reset);
|
__ADDRESSABLE(cpu_arm922_reset);
|
||||||
int cpu_arm922_do_idle(void);
|
int cpu_arm922_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm922_do_idle);
|
__ADDRESSABLE(cpu_arm922_do_idle);
|
||||||
@ -119,7 +119,7 @@ void cpu_arm925_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm925_proc_init);
|
__ADDRESSABLE(cpu_arm925_proc_init);
|
||||||
void cpu_arm925_proc_fin(void);
|
void cpu_arm925_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm925_proc_fin);
|
__ADDRESSABLE(cpu_arm925_proc_fin);
|
||||||
void cpu_arm925_reset(void);
|
void cpu_arm925_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm925_reset);
|
__ADDRESSABLE(cpu_arm925_reset);
|
||||||
int cpu_arm925_do_idle(void);
|
int cpu_arm925_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm925_do_idle);
|
__ADDRESSABLE(cpu_arm925_do_idle);
|
||||||
@ -159,7 +159,7 @@ void cpu_arm940_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm940_proc_init);
|
__ADDRESSABLE(cpu_arm940_proc_init);
|
||||||
void cpu_arm940_proc_fin(void);
|
void cpu_arm940_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm940_proc_fin);
|
__ADDRESSABLE(cpu_arm940_proc_fin);
|
||||||
void cpu_arm940_reset(void);
|
void cpu_arm940_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm940_reset);
|
__ADDRESSABLE(cpu_arm940_reset);
|
||||||
int cpu_arm940_do_idle(void);
|
int cpu_arm940_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm940_do_idle);
|
__ADDRESSABLE(cpu_arm940_do_idle);
|
||||||
@ -174,7 +174,7 @@ void cpu_arm946_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_arm946_proc_init);
|
__ADDRESSABLE(cpu_arm946_proc_init);
|
||||||
void cpu_arm946_proc_fin(void);
|
void cpu_arm946_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_arm946_proc_fin);
|
__ADDRESSABLE(cpu_arm946_proc_fin);
|
||||||
void cpu_arm946_reset(void);
|
void cpu_arm946_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_arm946_reset);
|
__ADDRESSABLE(cpu_arm946_reset);
|
||||||
int cpu_arm946_do_idle(void);
|
int cpu_arm946_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_arm946_do_idle);
|
__ADDRESSABLE(cpu_arm946_do_idle);
|
||||||
@ -429,7 +429,7 @@ void cpu_v7_proc_init(void);
|
|||||||
__ADDRESSABLE(cpu_v7_proc_init);
|
__ADDRESSABLE(cpu_v7_proc_init);
|
||||||
void cpu_v7_proc_fin(void);
|
void cpu_v7_proc_fin(void);
|
||||||
__ADDRESSABLE(cpu_v7_proc_fin);
|
__ADDRESSABLE(cpu_v7_proc_fin);
|
||||||
void cpu_v7_reset(void);
|
void cpu_v7_reset(unsigned long addr, bool hvc);
|
||||||
__ADDRESSABLE(cpu_v7_reset);
|
__ADDRESSABLE(cpu_v7_reset);
|
||||||
int cpu_v7_do_idle(void);
|
int cpu_v7_do_idle(void);
|
||||||
__ADDRESSABLE(cpu_v7_do_idle);
|
__ADDRESSABLE(cpu_v7_do_idle);
|
||||||
|
@ -1069,18 +1069,28 @@ config ARM64_ERRATUM_3117295
|
|||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
config ARM64_ERRATUM_3194386
|
config ARM64_ERRATUM_3194386
|
||||||
bool "Cortex-{A720,X4,X925}/Neoverse-V3: workaround for MSR SSBS not self-synchronizing"
|
bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
This option adds the workaround for the following errata:
|
This option adds the workaround for the following errata:
|
||||||
|
|
||||||
|
* ARM Cortex-A76 erratum 3324349
|
||||||
|
* ARM Cortex-A77 erratum 3324348
|
||||||
|
* ARM Cortex-A78 erratum 3324344
|
||||||
|
* ARM Cortex-A78C erratum 3324346
|
||||||
|
* ARM Cortex-A78C erratum 3324347
|
||||||
* ARM Cortex-A710 erratam 3324338
|
* ARM Cortex-A710 erratam 3324338
|
||||||
* ARM Cortex-A720 erratum 3456091
|
* ARM Cortex-A720 erratum 3456091
|
||||||
|
* ARM Cortex-A725 erratum 3456106
|
||||||
|
* ARM Cortex-X1 erratum 3324344
|
||||||
|
* ARM Cortex-X1C erratum 3324346
|
||||||
* ARM Cortex-X2 erratum 3324338
|
* ARM Cortex-X2 erratum 3324338
|
||||||
* ARM Cortex-X3 erratum 3324335
|
* ARM Cortex-X3 erratum 3324335
|
||||||
* ARM Cortex-X4 erratum 3194386
|
* ARM Cortex-X4 erratum 3194386
|
||||||
* ARM Cortex-X925 erratum 3324334
|
* ARM Cortex-X925 erratum 3324334
|
||||||
|
* ARM Neoverse-N1 erratum 3324349
|
||||||
* ARM Neoverse N2 erratum 3324339
|
* ARM Neoverse N2 erratum 3324339
|
||||||
|
* ARM Neoverse-V1 erratum 3324341
|
||||||
* ARM Neoverse V2 erratum 3324336
|
* ARM Neoverse V2 erratum 3324336
|
||||||
* ARM Neoverse-V3 erratum 3312417
|
* ARM Neoverse-V3 erratum 3312417
|
||||||
|
|
||||||
@ -1088,11 +1098,11 @@ config ARM64_ERRATUM_3194386
|
|||||||
subsequent speculative instructions, which may permit unexepected
|
subsequent speculative instructions, which may permit unexepected
|
||||||
speculative store bypassing.
|
speculative store bypassing.
|
||||||
|
|
||||||
Work around this problem by placing a speculation barrier after
|
Work around this problem by placing a Speculation Barrier (SB) or
|
||||||
kernel changes to SSBS. The presence of the SSBS special-purpose
|
Instruction Synchronization Barrier (ISB) after kernel changes to
|
||||||
register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such
|
SSBS. The presence of the SSBS special-purpose register is hidden
|
||||||
that userspace will use the PR_SPEC_STORE_BYPASS prctl to change
|
from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
|
||||||
SSBS.
|
will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
|
||||||
|
|
||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
|
@ -86,12 +86,14 @@
|
|||||||
#define ARM_CPU_PART_CORTEX_X2 0xD48
|
#define ARM_CPU_PART_CORTEX_X2 0xD48
|
||||||
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
|
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
|
||||||
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
|
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
|
||||||
|
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
|
||||||
#define ARM_CPU_PART_CORTEX_X3 0xD4E
|
#define ARM_CPU_PART_CORTEX_X3 0xD4E
|
||||||
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
|
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
|
||||||
#define ARM_CPU_PART_CORTEX_A720 0xD81
|
#define ARM_CPU_PART_CORTEX_A720 0xD81
|
||||||
#define ARM_CPU_PART_CORTEX_X4 0xD82
|
#define ARM_CPU_PART_CORTEX_X4 0xD82
|
||||||
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
|
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
|
||||||
#define ARM_CPU_PART_CORTEX_X925 0xD85
|
#define ARM_CPU_PART_CORTEX_X925 0xD85
|
||||||
|
#define ARM_CPU_PART_CORTEX_A725 0xD87
|
||||||
|
|
||||||
#define APM_CPU_PART_XGENE 0x000
|
#define APM_CPU_PART_XGENE 0x000
|
||||||
#define APM_CPU_VAR_POTENZA 0x00
|
#define APM_CPU_VAR_POTENZA 0x00
|
||||||
@ -165,12 +167,14 @@
|
|||||||
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
|
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
|
||||||
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
|
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
|
||||||
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
|
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
|
||||||
|
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
|
||||||
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
|
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
|
||||||
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
|
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
|
||||||
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
|
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
|
||||||
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
|
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
|
||||||
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
|
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
|
||||||
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
|
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
|
||||||
|
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
|
||||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <asm/insn.h>
|
#include <asm/insn.h>
|
||||||
|
|
||||||
|
#define HAVE_JUMP_LABEL_BATCH
|
||||||
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
|
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
|
||||||
|
|
||||||
#define JUMP_TABLE_ENTRY(key, label) \
|
#define JUMP_TABLE_ENTRY(key, label) \
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
syscall_abis_32 +=
|
syscall_abis_32 +=
|
||||||
syscall_abis_64 += renameat newstat rlimit memfd_secret
|
syscall_abis_64 += renameat rlimit memfd_secret
|
||||||
|
|
||||||
syscalltbl = arch/arm64/tools/syscall_%.tbl
|
syscalltbl = arch/arm64/tools/syscall_%.tbl
|
||||||
|
@ -434,15 +434,24 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = {
|
|||||||
|
|
||||||
#ifdef CONFIG_ARM64_ERRATUM_3194386
|
#ifdef CONFIG_ARM64_ERRATUM_3194386
|
||||||
static const struct midr_range erratum_spec_ssbs_list[] = {
|
static const struct midr_range erratum_spec_ssbs_list[] = {
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
@ -7,11 +7,12 @@
|
|||||||
*/
|
*/
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
#include <asm/insn.h>
|
#include <asm/insn.h>
|
||||||
#include <asm/patching.h>
|
#include <asm/patching.h>
|
||||||
|
|
||||||
void arch_jump_label_transform(struct jump_entry *entry,
|
bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
||||||
enum jump_label_type type)
|
enum jump_label_type type)
|
||||||
{
|
{
|
||||||
void *addr = (void *)jump_entry_code(entry);
|
void *addr = (void *)jump_entry_code(entry);
|
||||||
u32 insn;
|
u32 insn;
|
||||||
@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
|||||||
}
|
}
|
||||||
|
|
||||||
aarch64_insn_patch_text_nosync(addr, insn);
|
aarch64_insn_patch_text_nosync(addr, insn);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void arch_jump_label_transform_apply(void)
|
||||||
|
{
|
||||||
|
kick_all_cpus_sync();
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
syscall_abis_64 += newstat
|
# No special ABIs on loongarch so far
|
||||||
|
syscall_abis_64 +=
|
||||||
|
@ -20,6 +20,7 @@ config PARISC
|
|||||||
select ARCH_SUPPORTS_HUGETLBFS if PA20
|
select ARCH_SUPPORTS_HUGETLBFS if PA20
|
||||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||||
select ARCH_STACKWALK
|
select ARCH_STACKWALK
|
||||||
|
select ARCH_HAS_CACHE_LINE_SIZE
|
||||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||||
select HAVE_RELIABLE_STACKTRACE
|
select HAVE_RELIABLE_STACKTRACE
|
||||||
select DMA_OPS
|
select DMA_OPS
|
||||||
|
@ -20,7 +20,16 @@
|
|||||||
|
|
||||||
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
||||||
|
|
||||||
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
|
#ifdef CONFIG_PA20
|
||||||
|
#define ARCH_DMA_MINALIGN 128
|
||||||
|
#else
|
||||||
|
#define ARCH_DMA_MINALIGN 32
|
||||||
|
#endif
|
||||||
|
#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
|
||||||
|
|
||||||
|
#define arch_slab_minalign() ((unsigned)dcache_stride)
|
||||||
|
#define cache_line_size() dcache_stride
|
||||||
|
#define dma_get_cache_alignment cache_line_size
|
||||||
|
|
||||||
#define __read_mostly __section(".data..read_mostly")
|
#define __read_mostly __section(".data..read_mostly")
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||||||
jit_data->header =
|
jit_data->header =
|
||||||
bpf_jit_binary_alloc(prog_size + extable_size,
|
bpf_jit_binary_alloc(prog_size + extable_size,
|
||||||
&jit_data->image,
|
&jit_data->image,
|
||||||
sizeof(u32),
|
sizeof(long),
|
||||||
bpf_fill_ill_insns);
|
bpf_fill_ill_insns);
|
||||||
if (!jit_data->header) {
|
if (!jit_data->header) {
|
||||||
prog = orig_prog;
|
prog = orig_prog;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
syscall_abis_32 += riscv memfd_secret
|
syscall_abis_32 += riscv memfd_secret
|
||||||
syscall_abis_64 += riscv newstat rlimit memfd_secret
|
syscall_abis_64 += riscv rlimit memfd_secret
|
||||||
|
@ -432,28 +432,26 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
|
|||||||
bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX);
|
bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX);
|
||||||
for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) {
|
for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) {
|
||||||
ext = riscv_get_isa_ext_data(bit);
|
ext = riscv_get_isa_ext_data(bit);
|
||||||
if (!ext)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (ext->validate) {
|
if (ext && ext->validate) {
|
||||||
ret = ext->validate(ext, resolved_isa);
|
ret = ext->validate(ext, resolved_isa);
|
||||||
if (ret == -EPROBE_DEFER) {
|
if (ret == -EPROBE_DEFER) {
|
||||||
loop = true;
|
loop = true;
|
||||||
continue;
|
continue;
|
||||||
} else if (ret) {
|
} else if (ret) {
|
||||||
/* Disable the extension entirely */
|
/* Disable the extension entirely */
|
||||||
clear_bit(ext->id, source_isa);
|
clear_bit(bit, source_isa);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
set_bit(ext->id, resolved_isa);
|
set_bit(bit, resolved_isa);
|
||||||
/* No need to keep it in source isa now that it is enabled */
|
/* No need to keep it in source isa now that it is enabled */
|
||||||
clear_bit(ext->id, source_isa);
|
clear_bit(bit, source_isa);
|
||||||
|
|
||||||
/* Single letter extensions get set in hwcap */
|
/* Single letter extensions get set in hwcap */
|
||||||
if (ext->id < RISCV_ISA_EXT_BASE)
|
if (bit < RISCV_ISA_EXT_BASE)
|
||||||
*this_hwcap |= isa2hwcap[ext->id];
|
*this_hwcap |= isa2hwcap[bit];
|
||||||
}
|
}
|
||||||
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
|
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ void __init sbi_ipi_init(void)
|
|||||||
* the masking/unmasking of virtual IPIs is done
|
* the masking/unmasking of virtual IPIs is done
|
||||||
* via generic IPI-Mux
|
* via generic IPI-Mux
|
||||||
*/
|
*/
|
||||||
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
|
||||||
"irqchip/sbi-ipi:starting",
|
"irqchip/sbi-ipi:starting",
|
||||||
sbi_ipi_starting_cpu, NULL);
|
sbi_ipi_starting_cpu, NULL);
|
||||||
|
|
||||||
|
@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
|
|||||||
|
|
||||||
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
|
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
|
||||||
{
|
{
|
||||||
|
if (!user_mode(regs)) {
|
||||||
|
no_context(regs, addr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (fault & VM_FAULT_OOM) {
|
if (fault & VM_FAULT_OOM) {
|
||||||
/*
|
/*
|
||||||
* We ran out of memory, call the OOM killer, and return the userspace
|
* We ran out of memory, call the OOM killer, and return the userspace
|
||||||
* (which will retry the fault, or kill us if we got oom-killed).
|
* (which will retry the fault, or kill us if we got oom-killed).
|
||||||
*/
|
*/
|
||||||
if (!user_mode(regs)) {
|
|
||||||
no_context(regs, addr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
return;
|
return;
|
||||||
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
|
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
|
||||||
/* Kernel mode? Handle exceptions or die */
|
/* Kernel mode? Handle exceptions or die */
|
||||||
if (!user_mode(regs)) {
|
|
||||||
no_context(regs, addr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
|
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
|
||||||
return;
|
return;
|
||||||
|
} else if (fault & VM_FAULT_SIGSEGV) {
|
||||||
|
do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,8 +234,6 @@ static void __init setup_bootmem(void)
|
|||||||
*/
|
*/
|
||||||
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
||||||
|
|
||||||
phys_ram_end = memblock_end_of_DRAM();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure we align the start of the memory on a PMD boundary so that
|
* Make sure we align the start of the memory on a PMD boundary so that
|
||||||
* at worst, we map the linear mapping with PMD mappings.
|
* at worst, we map the linear mapping with PMD mappings.
|
||||||
@ -250,6 +248,16 @@ static void __init setup_bootmem(void)
|
|||||||
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
|
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
|
||||||
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
|
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The size of the linear page mapping may restrict the amount of
|
||||||
|
* usable RAM.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||||
|
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
|
||||||
|
memblock_cap_memory_range(phys_ram_base,
|
||||||
|
max_mapped_addr - phys_ram_base);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reserve physical address space that would be mapped to virtual
|
* Reserve physical address space that would be mapped to virtual
|
||||||
* addresses greater than (void *)(-PAGE_SIZE) because:
|
* addresses greater than (void *)(-PAGE_SIZE) because:
|
||||||
@ -266,6 +274,7 @@ static void __init setup_bootmem(void)
|
|||||||
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
|
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
phys_ram_end = memblock_end_of_DRAM();
|
||||||
min_low_pfn = PFN_UP(phys_ram_base);
|
min_low_pfn = PFN_UP(phys_ram_base);
|
||||||
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
|
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
|
||||||
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
|
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
|
||||||
@ -1284,8 +1293,6 @@ static void __init create_linear_mapping_page_table(void)
|
|||||||
if (start <= __pa(PAGE_OFFSET) &&
|
if (start <= __pa(PAGE_OFFSET) &&
|
||||||
__pa(PAGE_OFFSET) < end)
|
__pa(PAGE_OFFSET) < end)
|
||||||
start = __pa(PAGE_OFFSET);
|
start = __pa(PAGE_OFFSET);
|
||||||
if (end >= __pa(PAGE_OFFSET) + memory_limit)
|
|
||||||
end = __pa(PAGE_OFFSET) + memory_limit;
|
|
||||||
|
|
||||||
create_linear_mapping_range(start, end, 0, NULL);
|
create_linear_mapping_range(start, end, 0, NULL);
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
* Author: Li Zhengyu (lizhengyu3@huawei.com)
|
* Author: Li Zhengyu (lizhengyu3@huawei.com)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
#include <asm/asm.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
|
||||||
.text
|
.text
|
||||||
@ -34,6 +35,7 @@ SYM_CODE_END(purgatory_start)
|
|||||||
|
|
||||||
.data
|
.data
|
||||||
|
|
||||||
|
.align LGREG
|
||||||
SYM_DATA(riscv_kernel_entry, .quad 0)
|
SYM_DATA(riscv_kernel_entry, .quad 0)
|
||||||
|
|
||||||
.end
|
.end
|
||||||
|
@ -113,7 +113,7 @@ void load_fpu_state(struct fpu *state, int flags)
|
|||||||
int mask;
|
int mask;
|
||||||
|
|
||||||
if (flags & KERNEL_FPC)
|
if (flags & KERNEL_FPC)
|
||||||
fpu_lfpc(&state->fpc);
|
fpu_lfpc_safe(&state->fpc);
|
||||||
if (!cpu_has_vx()) {
|
if (!cpu_has_vx()) {
|
||||||
if (flags & KERNEL_VXR_V0V7)
|
if (flags & KERNEL_VXR_V0V7)
|
||||||
load_fp_regs_vx(state->vxrs);
|
load_fp_regs_vx(state->vxrs);
|
||||||
|
@ -59,14 +59,6 @@ SECTIONS
|
|||||||
} :text = 0x0700
|
} :text = 0x0700
|
||||||
|
|
||||||
RO_DATA(PAGE_SIZE)
|
RO_DATA(PAGE_SIZE)
|
||||||
.data.rel.ro : {
|
|
||||||
*(.data.rel.ro .data.rel.ro.*)
|
|
||||||
}
|
|
||||||
.got : {
|
|
||||||
__got_start = .;
|
|
||||||
*(.got)
|
|
||||||
__got_end = .;
|
|
||||||
}
|
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
_sdata = .; /* Start of data section */
|
_sdata = .; /* Start of data section */
|
||||||
@ -80,6 +72,15 @@ SECTIONS
|
|||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
__end_ro_after_init = .;
|
__end_ro_after_init = .;
|
||||||
|
|
||||||
|
.data.rel.ro : {
|
||||||
|
*(.data.rel.ro .data.rel.ro.*)
|
||||||
|
}
|
||||||
|
.got : {
|
||||||
|
__got_start = .;
|
||||||
|
*(.got)
|
||||||
|
__got_end = .;
|
||||||
|
}
|
||||||
|
|
||||||
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
|
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
|
||||||
.data.rel : {
|
.data.rel : {
|
||||||
*(.data.rel*)
|
*(.data.rel*)
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <linux/ptdump.h>
|
#include <linux/ptdump.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
|
#include <linux/sort.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/kfence.h>
|
#include <linux/kfence.h>
|
||||||
#include <linux/kasan.h>
|
#include <linux/kasan.h>
|
||||||
@ -15,13 +16,15 @@
|
|||||||
static unsigned long max_addr;
|
static unsigned long max_addr;
|
||||||
|
|
||||||
struct addr_marker {
|
struct addr_marker {
|
||||||
|
int is_start;
|
||||||
unsigned long start_address;
|
unsigned long start_address;
|
||||||
const char *name;
|
const char *name;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum address_markers_idx {
|
enum address_markers_idx {
|
||||||
IDENTITY_BEFORE_NR = 0,
|
KVA_NR = 0,
|
||||||
IDENTITY_BEFORE_END_NR,
|
LOWCORE_START_NR,
|
||||||
|
LOWCORE_END_NR,
|
||||||
AMODE31_START_NR,
|
AMODE31_START_NR,
|
||||||
AMODE31_END_NR,
|
AMODE31_END_NR,
|
||||||
KERNEL_START_NR,
|
KERNEL_START_NR,
|
||||||
@ -30,8 +33,8 @@ enum address_markers_idx {
|
|||||||
KFENCE_START_NR,
|
KFENCE_START_NR,
|
||||||
KFENCE_END_NR,
|
KFENCE_END_NR,
|
||||||
#endif
|
#endif
|
||||||
IDENTITY_AFTER_NR,
|
IDENTITY_START_NR,
|
||||||
IDENTITY_AFTER_END_NR,
|
IDENTITY_END_NR,
|
||||||
VMEMMAP_NR,
|
VMEMMAP_NR,
|
||||||
VMEMMAP_END_NR,
|
VMEMMAP_END_NR,
|
||||||
VMALLOC_NR,
|
VMALLOC_NR,
|
||||||
@ -59,43 +62,44 @@ enum address_markers_idx {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct addr_marker address_markers[] = {
|
static struct addr_marker address_markers[] = {
|
||||||
[IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"},
|
[KVA_NR] = {0, 0, "Kernel Virtual Address Space"},
|
||||||
[IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
|
[LOWCORE_START_NR] = {1, 0, "Lowcore Start"},
|
||||||
[AMODE31_START_NR] = {0, "Amode31 Area Start"},
|
[LOWCORE_END_NR] = {0, 0, "Lowcore End"},
|
||||||
[AMODE31_END_NR] = {0, "Amode31 Area End"},
|
[IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"},
|
||||||
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
|
[IDENTITY_END_NR] = {0, 0, "Identity Mapping End"},
|
||||||
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
|
[AMODE31_START_NR] = {1, 0, "Amode31 Area Start"},
|
||||||
|
[AMODE31_END_NR] = {0, 0, "Amode31 Area End"},
|
||||||
|
[KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"},
|
||||||
|
[KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"},
|
||||||
#ifdef CONFIG_KFENCE
|
#ifdef CONFIG_KFENCE
|
||||||
[KFENCE_START_NR] = {0, "KFence Pool Start"},
|
[KFENCE_START_NR] = {1, 0, "KFence Pool Start"},
|
||||||
[KFENCE_END_NR] = {0, "KFence Pool End"},
|
[KFENCE_END_NR] = {0, 0, "KFence Pool End"},
|
||||||
#endif
|
#endif
|
||||||
[IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
|
[VMEMMAP_NR] = {1, 0, "vmemmap Area Start"},
|
||||||
[IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
|
[VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"},
|
||||||
[VMEMMAP_NR] = {0, "vmemmap Area Start"},
|
[VMALLOC_NR] = {1, 0, "vmalloc Area Start"},
|
||||||
[VMEMMAP_END_NR] = {0, "vmemmap Area End"},
|
[VMALLOC_END_NR] = {0, 0, "vmalloc Area End"},
|
||||||
[VMALLOC_NR] = {0, "vmalloc Area Start"},
|
|
||||||
[VMALLOC_END_NR] = {0, "vmalloc Area End"},
|
|
||||||
#ifdef CONFIG_KMSAN
|
#ifdef CONFIG_KMSAN
|
||||||
[KMSAN_VMALLOC_SHADOW_START_NR] = {0, "Kmsan vmalloc Shadow Start"},
|
[KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"},
|
||||||
[KMSAN_VMALLOC_SHADOW_END_NR] = {0, "Kmsan vmalloc Shadow End"},
|
[KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"},
|
||||||
[KMSAN_VMALLOC_ORIGIN_START_NR] = {0, "Kmsan vmalloc Origins Start"},
|
[KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"},
|
||||||
[KMSAN_VMALLOC_ORIGIN_END_NR] = {0, "Kmsan vmalloc Origins End"},
|
[KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"},
|
||||||
[KMSAN_MODULES_SHADOW_START_NR] = {0, "Kmsan Modules Shadow Start"},
|
[KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"},
|
||||||
[KMSAN_MODULES_SHADOW_END_NR] = {0, "Kmsan Modules Shadow End"},
|
[KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"},
|
||||||
[KMSAN_MODULES_ORIGIN_START_NR] = {0, "Kmsan Modules Origins Start"},
|
[KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"},
|
||||||
[KMSAN_MODULES_ORIGIN_END_NR] = {0, "Kmsan Modules Origins End"},
|
[KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"},
|
||||||
#endif
|
#endif
|
||||||
[MODULES_NR] = {0, "Modules Area Start"},
|
[MODULES_NR] = {1, 0, "Modules Area Start"},
|
||||||
[MODULES_END_NR] = {0, "Modules Area End"},
|
[MODULES_END_NR] = {0, 0, "Modules Area End"},
|
||||||
[ABS_LOWCORE_NR] = {0, "Lowcore Area Start"},
|
[ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"},
|
||||||
[ABS_LOWCORE_END_NR] = {0, "Lowcore Area End"},
|
[ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"},
|
||||||
[MEMCPY_REAL_NR] = {0, "Real Memory Copy Area Start"},
|
[MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"},
|
||||||
[MEMCPY_REAL_END_NR] = {0, "Real Memory Copy Area End"},
|
[MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"},
|
||||||
#ifdef CONFIG_KASAN
|
#ifdef CONFIG_KASAN
|
||||||
[KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
|
[KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"},
|
||||||
[KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
|
[KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"},
|
||||||
#endif
|
#endif
|
||||||
{ -1, NULL }
|
{1, -1UL, NULL}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pg_state {
|
struct pg_state {
|
||||||
@ -163,6 +167,19 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
|
|||||||
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
|
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void note_page_update_state(struct pg_state *st, unsigned long addr, unsigned int prot, int level)
|
||||||
|
{
|
||||||
|
struct seq_file *m = st->seq;
|
||||||
|
|
||||||
|
while (addr >= st->marker[1].start_address) {
|
||||||
|
st->marker++;
|
||||||
|
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
|
||||||
|
}
|
||||||
|
st->start_address = addr;
|
||||||
|
st->current_prot = prot;
|
||||||
|
st->level = level;
|
||||||
|
}
|
||||||
|
|
||||||
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
|
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
|
||||||
{
|
{
|
||||||
int width = sizeof(unsigned long) * 2;
|
int width = sizeof(unsigned long) * 2;
|
||||||
@ -186,9 +203,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
|
|||||||
addr = max_addr;
|
addr = max_addr;
|
||||||
if (st->level == -1) {
|
if (st->level == -1) {
|
||||||
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
|
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
|
||||||
st->start_address = addr;
|
note_page_update_state(st, addr, prot, level);
|
||||||
st->current_prot = prot;
|
|
||||||
st->level = level;
|
|
||||||
} else if (prot != st->current_prot || level != st->level ||
|
} else if (prot != st->current_prot || level != st->level ||
|
||||||
addr >= st->marker[1].start_address) {
|
addr >= st->marker[1].start_address) {
|
||||||
note_prot_wx(st, addr);
|
note_prot_wx(st, addr);
|
||||||
@ -202,13 +217,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
|
|||||||
}
|
}
|
||||||
pt_dump_seq_printf(m, "%9lu%c ", delta, *unit);
|
pt_dump_seq_printf(m, "%9lu%c ", delta, *unit);
|
||||||
print_prot(m, st->current_prot, st->level);
|
print_prot(m, st->current_prot, st->level);
|
||||||
while (addr >= st->marker[1].start_address) {
|
note_page_update_state(st, addr, prot, level);
|
||||||
st->marker++;
|
|
||||||
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
|
|
||||||
}
|
|
||||||
st->start_address = addr;
|
|
||||||
st->current_prot = prot;
|
|
||||||
st->level = level;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,22 +289,25 @@ static int ptdump_show(struct seq_file *m, void *v)
|
|||||||
DEFINE_SHOW_ATTRIBUTE(ptdump);
|
DEFINE_SHOW_ATTRIBUTE(ptdump);
|
||||||
#endif /* CONFIG_PTDUMP_DEBUGFS */
|
#endif /* CONFIG_PTDUMP_DEBUGFS */
|
||||||
|
|
||||||
/*
|
static int ptdump_cmp(const void *a, const void *b)
|
||||||
* Heapsort from lib/sort.c is not a stable sorting algorithm, do a simple
|
|
||||||
* insertion sort to preserve the original order of markers with the same
|
|
||||||
* start address.
|
|
||||||
*/
|
|
||||||
static void sort_address_markers(void)
|
|
||||||
{
|
{
|
||||||
struct addr_marker tmp;
|
const struct addr_marker *ama = a;
|
||||||
int i, j;
|
const struct addr_marker *amb = b;
|
||||||
|
|
||||||
for (i = 1; i < ARRAY_SIZE(address_markers) - 1; i++) {
|
if (ama->start_address > amb->start_address)
|
||||||
tmp = address_markers[i];
|
return 1;
|
||||||
for (j = i - 1; j >= 0 && address_markers[j].start_address > tmp.start_address; j--)
|
if (ama->start_address < amb->start_address)
|
||||||
address_markers[j + 1] = address_markers[j];
|
return -1;
|
||||||
address_markers[j + 1] = tmp;
|
/*
|
||||||
}
|
* If the start addresses of two markers are identical consider the
|
||||||
|
* marker which defines the start of an area higher than the one which
|
||||||
|
* defines the end of an area. This keeps pairs of markers sorted.
|
||||||
|
*/
|
||||||
|
if (ama->is_start)
|
||||||
|
return 1;
|
||||||
|
if (amb->is_start)
|
||||||
|
return -1;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pt_dump_init(void)
|
static int pt_dump_init(void)
|
||||||
@ -303,6 +315,8 @@ static int pt_dump_init(void)
|
|||||||
#ifdef CONFIG_KFENCE
|
#ifdef CONFIG_KFENCE
|
||||||
unsigned long kfence_start = (unsigned long)__kfence_pool;
|
unsigned long kfence_start = (unsigned long)__kfence_pool;
|
||||||
#endif
|
#endif
|
||||||
|
unsigned long lowcore = (unsigned long)get_lowcore();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Figure out the maximum virtual address being accessible with the
|
* Figure out the maximum virtual address being accessible with the
|
||||||
* kernel ASCE. We need this to keep the page table walker functions
|
* kernel ASCE. We need this to keep the page table walker functions
|
||||||
@ -310,7 +324,10 @@ static int pt_dump_init(void)
|
|||||||
*/
|
*/
|
||||||
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
|
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
|
||||||
max_addr = 1UL << (max_addr * 11 + 31);
|
max_addr = 1UL << (max_addr * 11 + 31);
|
||||||
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
|
address_markers[LOWCORE_START_NR].start_address = lowcore;
|
||||||
|
address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore);
|
||||||
|
address_markers[IDENTITY_START_NR].start_address = __identity_base;
|
||||||
|
address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
|
||||||
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
|
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
|
||||||
address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
|
address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
|
||||||
address_markers[MODULES_NR].start_address = MODULES_VADDR;
|
address_markers[MODULES_NR].start_address = MODULES_VADDR;
|
||||||
@ -337,7 +354,8 @@ static int pt_dump_init(void)
|
|||||||
address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
|
address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
|
||||||
address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
|
address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
|
||||||
#endif
|
#endif
|
||||||
sort_address_markers();
|
sort(address_markers, ARRAY_SIZE(address_markers) - 1,
|
||||||
|
sizeof(address_markers[0]), ptdump_cmp, NULL);
|
||||||
#ifdef CONFIG_PTDUMP_DEBUGFS
|
#ifdef CONFIG_PTDUMP_DEBUGFS
|
||||||
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
|
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
|
||||||
#endif /* CONFIG_PTDUMP_DEBUGFS */
|
#endif /* CONFIG_PTDUMP_DEBUGFS */
|
||||||
|
@ -108,6 +108,8 @@ void mark_rodata_ro(void)
|
|||||||
{
|
{
|
||||||
unsigned long size = __end_ro_after_init - __start_ro_after_init;
|
unsigned long size = __end_ro_after_init - __start_ro_after_init;
|
||||||
|
|
||||||
|
if (MACHINE_HAS_NX)
|
||||||
|
system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
|
||||||
__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
|
__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
|
||||||
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
|
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
|
||||||
}
|
}
|
||||||
@ -170,13 +172,6 @@ void __init mem_init(void)
|
|||||||
setup_zero_pages(); /* Setup zeroed pages. */
|
setup_zero_pages(); /* Setup zeroed pages. */
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_initmem(void)
|
|
||||||
{
|
|
||||||
set_memory_rwnx((unsigned long)_sinittext,
|
|
||||||
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
|
|
||||||
free_initmem_default(POISON_FREE_INITMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long memory_block_size_bytes(void)
|
unsigned long memory_block_size_bytes(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -661,7 +661,6 @@ void __init vmem_map_init(void)
|
|||||||
{
|
{
|
||||||
__set_memory_rox(_stext, _etext);
|
__set_memory_rox(_stext, _etext);
|
||||||
__set_memory_ro(_etext, __end_rodata);
|
__set_memory_ro(_etext, __end_rodata);
|
||||||
__set_memory_rox(_sinittext, _einittext);
|
|
||||||
__set_memory_rox(__stext_amode31, __etext_amode31);
|
__set_memory_rox(__stext_amode31, __etext_amode31);
|
||||||
/*
|
/*
|
||||||
* If the BEAR-enhancement facility is not installed the first
|
* If the BEAR-enhancement facility is not installed the first
|
||||||
@ -670,16 +669,8 @@ void __init vmem_map_init(void)
|
|||||||
*/
|
*/
|
||||||
if (!static_key_enabled(&cpu_has_bear))
|
if (!static_key_enabled(&cpu_has_bear))
|
||||||
set_memory_x(0, 1);
|
set_memory_x(0, 1);
|
||||||
if (debug_pagealloc_enabled()) {
|
if (debug_pagealloc_enabled())
|
||||||
/*
|
__set_memory_4k(__va(0), __va(0) + ident_map_size);
|
||||||
* Use RELOC_HIDE() as long as __va(0) translates to NULL,
|
|
||||||
* since performing pointer arithmetic on a NULL pointer
|
|
||||||
* has undefined behavior and generates compiler warnings.
|
|
||||||
*/
|
|
||||||
__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
|
|
||||||
}
|
|
||||||
if (MACHINE_HAS_NX)
|
|
||||||
system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
|
|
||||||
pr_info("Write protected kernel read-only data: %luk\n",
|
pr_info("Write protected kernel read-only data: %luk\n",
|
||||||
(unsigned long)(__end_rodata - _stext) >> 10);
|
(unsigned long)(__end_rodata - _stext) >> 10);
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,9 @@ static struct mconsole_command *mconsole_parse(struct mc_request *req)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef MIN
|
||||||
#define MIN(a,b) ((a)<(b) ? (a):(b))
|
#define MIN(a,b) ((a)<(b) ? (a):(b))
|
||||||
|
#endif
|
||||||
|
|
||||||
#define STRINGX(x) #x
|
#define STRINGX(x) #x
|
||||||
#define STRING(x) STRINGX(x)
|
#define STRING(x) STRINGX(x)
|
||||||
|
@ -163,7 +163,7 @@ struct sev_config {
|
|||||||
*/
|
*/
|
||||||
use_cas : 1,
|
use_cas : 1,
|
||||||
|
|
||||||
__reserved : 62;
|
__reserved : 61;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct sev_config sev_cfg __read_mostly;
|
static struct sev_config sev_cfg __read_mostly;
|
||||||
|
@ -344,6 +344,7 @@
|
|||||||
332 common statx sys_statx
|
332 common statx sys_statx
|
||||||
333 common io_pgetevents sys_io_pgetevents
|
333 common io_pgetevents sys_io_pgetevents
|
||||||
334 common rseq sys_rseq
|
334 common rseq sys_rseq
|
||||||
|
335 common uretprobe sys_uretprobe
|
||||||
# don't use numbers 387 through 423, add new calls after the last
|
# don't use numbers 387 through 423, add new calls after the last
|
||||||
# 'common' entry
|
# 'common' entry
|
||||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||||
@ -385,7 +386,6 @@
|
|||||||
460 common lsm_set_self_attr sys_lsm_set_self_attr
|
460 common lsm_set_self_attr sys_lsm_set_self_attr
|
||||||
461 common lsm_list_modules sys_lsm_list_modules
|
461 common lsm_list_modules sys_lsm_list_modules
|
||||||
462 common mseal sys_mseal
|
462 common mseal sys_mseal
|
||||||
467 common uretprobe sys_uretprobe
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Due to a historical design error, certain syscalls are numbered differently
|
# Due to a historical design error, certain syscalls are numbered differently
|
||||||
|
@ -1520,20 +1520,23 @@ static void x86_pmu_start(struct perf_event *event, int flags)
|
|||||||
void perf_event_print_debug(void)
|
void perf_event_print_debug(void)
|
||||||
{
|
{
|
||||||
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
|
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
|
||||||
|
unsigned long *cntr_mask, *fixed_cntr_mask;
|
||||||
|
struct event_constraint *pebs_constraints;
|
||||||
|
struct cpu_hw_events *cpuc;
|
||||||
u64 pebs, debugctl;
|
u64 pebs, debugctl;
|
||||||
int cpu = smp_processor_id();
|
int cpu, idx;
|
||||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
|
||||||
unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
|
guard(irqsave)();
|
||||||
unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
|
|
||||||
struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
|
cpu = smp_processor_id();
|
||||||
unsigned long flags;
|
cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||||
int idx;
|
cntr_mask = hybrid(cpuc->pmu, cntr_mask);
|
||||||
|
fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
|
||||||
|
pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
|
||||||
|
|
||||||
if (!*(u64 *)cntr_mask)
|
if (!*(u64 *)cntr_mask)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
if (x86_pmu.version >= 2) {
|
if (x86_pmu.version >= 2) {
|
||||||
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
|
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
|
||||||
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
||||||
@ -1577,7 +1580,6 @@ void perf_event_print_debug(void)
|
|||||||
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
|
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
|
||||||
cpu, idx, pmc_count);
|
cpu, idx, pmc_count);
|
||||||
}
|
}
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void x86_pmu_stop(struct perf_event *event, int flags)
|
void x86_pmu_stop(struct perf_event *event, int flags)
|
||||||
|
@ -64,7 +64,7 @@
|
|||||||
* perf code: 0x00
|
* perf code: 0x00
|
||||||
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
|
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
|
||||||
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
|
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
|
||||||
* RPL,SPR,MTL,ARL,LNL
|
* RPL,SPR,MTL,ARL,LNL,SRF
|
||||||
* Scope: Package (physical package)
|
* Scope: Package (physical package)
|
||||||
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
|
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
|
||||||
* perf code: 0x01
|
* perf code: 0x01
|
||||||
@ -693,7 +693,8 @@ static const struct cstate_model srf_cstates __initconst = {
|
|||||||
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
|
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
|
||||||
BIT(PERF_CSTATE_CORE_C6_RES),
|
BIT(PERF_CSTATE_CORE_C6_RES),
|
||||||
|
|
||||||
.pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
|
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
|
||||||
|
BIT(PERF_CSTATE_PKG_C6_RES),
|
||||||
|
|
||||||
.module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
|
.module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
|
||||||
};
|
};
|
||||||
|
@ -2,6 +2,10 @@
|
|||||||
#ifndef _ASM_X86_CMDLINE_H
|
#ifndef _ASM_X86_CMDLINE_H
|
||||||
#define _ASM_X86_CMDLINE_H
|
#define _ASM_X86_CMDLINE_H
|
||||||
|
|
||||||
|
#include <asm/setup.h>
|
||||||
|
|
||||||
|
extern char builtin_cmdline[COMMAND_LINE_SIZE];
|
||||||
|
|
||||||
int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
|
int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
|
||||||
int cmdline_find_option(const char *cmdline_ptr, const char *option,
|
int cmdline_find_option(const char *cmdline_ptr, const char *option,
|
||||||
char *buffer, int bufsize);
|
char *buffer, int bufsize);
|
||||||
|
@ -1305,6 +1305,7 @@ struct kvm_arch {
|
|||||||
u8 vm_type;
|
u8 vm_type;
|
||||||
bool has_private_mem;
|
bool has_private_mem;
|
||||||
bool has_protected_state;
|
bool has_protected_state;
|
||||||
|
bool pre_fault_allowed;
|
||||||
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
|
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
|
||||||
struct list_head active_mmu_pages;
|
struct list_head active_mmu_pages;
|
||||||
struct list_head zapped_obsolete_pages;
|
struct list_head zapped_obsolete_pages;
|
||||||
|
@ -462,7 +462,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
|||||||
switch (c->x86_model) {
|
switch (c->x86_model) {
|
||||||
case 0x00 ... 0x2f:
|
case 0x00 ... 0x2f:
|
||||||
case 0x40 ... 0x4f:
|
case 0x40 ... 0x4f:
|
||||||
case 0x70 ... 0x7f:
|
case 0x60 ... 0x7f:
|
||||||
setup_force_cpu_cap(X86_FEATURE_ZEN5);
|
setup_force_cpu_cap(X86_FEATURE_ZEN5);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -306,7 +306,7 @@ static void freq_invariance_enable(void)
|
|||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
static_branch_enable(&arch_scale_freq_key);
|
static_branch_enable_cpuslocked(&arch_scale_freq_key);
|
||||||
register_freq_invariance_syscore_ops();
|
register_freq_invariance_syscore_ops();
|
||||||
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
|
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
|
||||||
}
|
}
|
||||||
@ -323,8 +323,10 @@ static void __init bp_init_freq_invariance(void)
|
|||||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (intel_set_max_freq_ratio())
|
if (intel_set_max_freq_ratio()) {
|
||||||
|
guard(cpus_read_lock)();
|
||||||
freq_invariance_enable();
|
freq_invariance_enable();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void disable_freq_invariance_workfn(struct work_struct *work)
|
static void disable_freq_invariance_workfn(struct work_struct *work)
|
||||||
|
@ -164,7 +164,7 @@ unsigned long saved_video_mode;
|
|||||||
|
|
||||||
static char __initdata command_line[COMMAND_LINE_SIZE];
|
static char __initdata command_line[COMMAND_LINE_SIZE];
|
||||||
#ifdef CONFIG_CMDLINE_BOOL
|
#ifdef CONFIG_CMDLINE_BOOL
|
||||||
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
|
char builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
|
||||||
bool builtin_cmdline_added __ro_after_init;
|
bool builtin_cmdline_added __ro_after_init;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -141,8 +141,8 @@ config KVM_AMD_SEV
|
|||||||
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
|
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
|
||||||
select ARCH_HAS_CC_PLATFORM
|
select ARCH_HAS_CC_PLATFORM
|
||||||
select KVM_GENERIC_PRIVATE_MEM
|
select KVM_GENERIC_PRIVATE_MEM
|
||||||
select HAVE_KVM_GMEM_PREPARE
|
select HAVE_KVM_ARCH_GMEM_PREPARE
|
||||||
select HAVE_KVM_GMEM_INVALIDATE
|
select HAVE_KVM_ARCH_GMEM_INVALIDATE
|
||||||
help
|
help
|
||||||
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
|
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
|
||||||
with Encrypted State (SEV-ES) on AMD processors.
|
with Encrypted State (SEV-ES) on AMD processors.
|
||||||
|
@ -1743,7 +1743,7 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
|
|||||||
s64 min_period = min_timer_period_us * 1000LL;
|
s64 min_period = min_timer_period_us * 1000LL;
|
||||||
|
|
||||||
if (apic->lapic_timer.period < min_period) {
|
if (apic->lapic_timer.period < min_period) {
|
||||||
pr_info_ratelimited(
|
pr_info_once(
|
||||||
"vcpu %i: requested %lld ns "
|
"vcpu %i: requested %lld ns "
|
||||||
"lapic timer period limited to %lld ns\n",
|
"lapic timer period limited to %lld ns\n",
|
||||||
apic->vcpu->vcpu_id,
|
apic->vcpu->vcpu_id,
|
||||||
|
@ -4335,7 +4335,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
|
|||||||
if (req_max_level)
|
if (req_max_level)
|
||||||
max_level = min(max_level, req_max_level);
|
max_level = min(max_level, req_max_level);
|
||||||
|
|
||||||
return req_max_level;
|
return max_level;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
|
static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
|
||||||
@ -4743,6 +4743,9 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
|
|||||||
u64 end;
|
u64 end;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (!vcpu->kvm->arch.pre_fault_allowed)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* reload is efficient when called repeatedly, so we can do it on
|
* reload is efficient when called repeatedly, so we can do it on
|
||||||
* every iteration.
|
* every iteration.
|
||||||
@ -7510,7 +7513,7 @@ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|||||||
const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
|
const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
|
||||||
|
|
||||||
if (level == PG_LEVEL_2M)
|
if (level == PG_LEVEL_2M)
|
||||||
return kvm_range_has_memory_attributes(kvm, start, end, attrs);
|
return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
|
||||||
|
|
||||||
for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
|
for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
|
||||||
if (hugepage_test_mixed(slot, gfn, level - 1) ||
|
if (hugepage_test_mixed(slot, gfn, level - 1) ||
|
||||||
|
@ -2279,18 +2279,11 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
|
|||||||
bool assigned;
|
bool assigned;
|
||||||
int level;
|
int level;
|
||||||
|
|
||||||
if (!kvm_mem_is_private(kvm, gfn)) {
|
|
||||||
pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
|
|
||||||
__func__, gfn);
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
|
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
|
||||||
if (ret || assigned) {
|
if (ret || assigned) {
|
||||||
pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
|
pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
|
||||||
__func__, gfn, ret, assigned);
|
__func__, gfn, ret, assigned);
|
||||||
ret = -EINVAL;
|
ret = ret ? -EINVAL : -EEXIST;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2549,6 +2542,14 @@ static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||||||
data->gctx_paddr = __psp_pa(sev->snp_context);
|
data->gctx_paddr = __psp_pa(sev->snp_context);
|
||||||
ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error);
|
ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now that there will be no more SNP_LAUNCH_UPDATE ioctls, private pages
|
||||||
|
* can be given to the guest simply by marking the RMP entry as private.
|
||||||
|
* This can happen on first access and also with KVM_PRE_FAULT_MEMORY.
|
||||||
|
*/
|
||||||
|
if (!ret)
|
||||||
|
kvm->arch.pre_fault_allowed = true;
|
||||||
|
|
||||||
kfree(id_auth);
|
kfree(id_auth);
|
||||||
|
|
||||||
e_free_id_block:
|
e_free_id_block:
|
||||||
|
@ -4949,6 +4949,7 @@ static int svm_vm_init(struct kvm *kvm)
|
|||||||
to_kvm_sev_info(kvm)->need_init = true;
|
to_kvm_sev_info(kvm)->need_init = true;
|
||||||
|
|
||||||
kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM);
|
kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM);
|
||||||
|
kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pause_filter_count || !pause_filter_thresh)
|
if (!pause_filter_count || !pause_filter_thresh)
|
||||||
|
@ -12646,6 +12646,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||||||
kvm->arch.vm_type = type;
|
kvm->arch.vm_type = type;
|
||||||
kvm->arch.has_private_mem =
|
kvm->arch.has_private_mem =
|
||||||
(type == KVM_X86_SW_PROTECTED_VM);
|
(type == KVM_X86_SW_PROTECTED_VM);
|
||||||
|
/* Decided by the vendor code for other VM types. */
|
||||||
|
kvm->arch.pre_fault_allowed =
|
||||||
|
type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
|
||||||
|
|
||||||
ret = kvm_page_track_init(kvm);
|
ret = kvm_page_track_init(kvm);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -13641,19 +13644,14 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
|
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
|
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
|
||||||
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
return kvm->arch.vm_type == KVM_X86_SNP_VM;
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
|
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
|
||||||
{
|
{
|
||||||
return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
|
return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
|
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
|
||||||
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
|
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
|
||||||
{
|
{
|
||||||
kvm_x86_call(gmem_invalidate)(start, end);
|
kvm_x86_call(gmem_invalidate)(start, end);
|
||||||
|
@ -207,18 +207,29 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
|
|||||||
|
|
||||||
int cmdline_find_option_bool(const char *cmdline, const char *option)
|
int cmdline_find_option_bool(const char *cmdline, const char *option)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
|
int ret;
|
||||||
WARN_ON_ONCE(!builtin_cmdline_added);
|
|
||||||
|
|
||||||
return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
|
ret = __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
|
||||||
|
if (ret > 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
|
||||||
|
return __cmdline_find_option_bool(builtin_cmdline, COMMAND_LINE_SIZE, option);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
|
int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
|
||||||
int bufsize)
|
int bufsize)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
|
int ret;
|
||||||
WARN_ON_ONCE(!builtin_cmdline_added);
|
|
||||||
|
|
||||||
return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
|
ret = __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
|
||||||
buffer, bufsize);
|
if (ret > 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
|
||||||
|
return __cmdline_find_option(builtin_cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -88,12 +88,14 @@ SYM_FUNC_END(__get_user_4)
|
|||||||
EXPORT_SYMBOL(__get_user_4)
|
EXPORT_SYMBOL(__get_user_4)
|
||||||
|
|
||||||
SYM_FUNC_START(__get_user_8)
|
SYM_FUNC_START(__get_user_8)
|
||||||
|
#ifndef CONFIG_X86_64
|
||||||
|
xor %ecx,%ecx
|
||||||
|
#endif
|
||||||
check_range size=8
|
check_range size=8
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
UACCESS movq (%_ASM_AX),%rdx
|
UACCESS movq (%_ASM_AX),%rdx
|
||||||
#else
|
#else
|
||||||
xor %ecx,%ecx
|
|
||||||
UACCESS movl (%_ASM_AX),%edx
|
UACCESS movl (%_ASM_AX),%edx
|
||||||
UACCESS movl 4(%_ASM_AX),%ecx
|
UACCESS movl 4(%_ASM_AX),%ecx
|
||||||
#endif
|
#endif
|
||||||
|
@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
|
|||||||
*/
|
*/
|
||||||
*target_pmd = *pmd;
|
*target_pmd = *pmd;
|
||||||
|
|
||||||
addr += PMD_SIZE;
|
addr = round_up(addr + 1, PMD_SIZE);
|
||||||
|
|
||||||
} else if (level == PTI_CLONE_PTE) {
|
} else if (level == PTI_CLONE_PTE) {
|
||||||
|
|
||||||
/* Walk the page-table down to the pte level */
|
/* Walk the page-table down to the pte level */
|
||||||
pte = pte_offset_kernel(pmd, addr);
|
pte = pte_offset_kernel(pmd, addr);
|
||||||
if (pte_none(*pte)) {
|
if (pte_none(*pte)) {
|
||||||
addr += PAGE_SIZE;
|
addr = round_up(addr + 1, PAGE_SIZE);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
|
|||||||
/* Clone the PTE */
|
/* Clone the PTE */
|
||||||
*target_pte = *pte;
|
*target_pte = *pte;
|
||||||
|
|
||||||
addr += PAGE_SIZE;
|
addr = round_up(addr + 1, PAGE_SIZE);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
BUG();
|
BUG();
|
||||||
@ -496,7 +496,7 @@ static void pti_clone_entry_text(void)
|
|||||||
{
|
{
|
||||||
pti_clone_pgtable((unsigned long) __entry_text_start,
|
pti_clone_pgtable((unsigned long) __entry_text_start,
|
||||||
(unsigned long) __entry_text_end,
|
(unsigned long) __entry_text_end,
|
||||||
PTI_CLONE_PMD);
|
PTI_LEVEL_KERNEL_IMAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -413,6 +413,7 @@ config BT_ATH3K
|
|||||||
config BT_MTKSDIO
|
config BT_MTKSDIO
|
||||||
tristate "MediaTek HCI SDIO driver"
|
tristate "MediaTek HCI SDIO driver"
|
||||||
depends on MMC
|
depends on MMC
|
||||||
|
depends on USB || !BT_HCIBTUSB_MTK
|
||||||
select BT_MTK
|
select BT_MTK
|
||||||
help
|
help
|
||||||
MediaTek Bluetooth HCI SDIO driver.
|
MediaTek Bluetooth HCI SDIO driver.
|
||||||
@ -425,6 +426,7 @@ config BT_MTKSDIO
|
|||||||
config BT_MTKUART
|
config BT_MTKUART
|
||||||
tristate "MediaTek HCI UART driver"
|
tristate "MediaTek HCI UART driver"
|
||||||
depends on SERIAL_DEV_BUS
|
depends on SERIAL_DEV_BUS
|
||||||
|
depends on USB || !BT_HCIBTUSB_MTK
|
||||||
select BT_MTK
|
select BT_MTK
|
||||||
help
|
help
|
||||||
MediaTek Bluetooth HCI UART driver.
|
MediaTek Bluetooth HCI UART driver.
|
||||||
|
@ -3085,6 +3085,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
|||||||
btintel_set_dsm_reset_method(hdev, &ver_tlv);
|
btintel_set_dsm_reset_method(hdev, &ver_tlv);
|
||||||
|
|
||||||
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
|
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
|
||||||
|
if (err)
|
||||||
|
goto exit_error;
|
||||||
|
|
||||||
btintel_register_devcoredump_support(hdev);
|
btintel_register_devcoredump_support(hdev);
|
||||||
btintel_print_fseq_info(hdev);
|
btintel_print_fseq_info(hdev);
|
||||||
break;
|
break;
|
||||||
|
@ -437,6 +437,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
|
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK)
|
||||||
static void btmtk_usb_wmt_recv(struct urb *urb)
|
static void btmtk_usb_wmt_recv(struct urb *urb)
|
||||||
{
|
{
|
||||||
struct hci_dev *hdev = urb->context;
|
struct hci_dev *hdev = urb->context;
|
||||||
@ -1262,7 +1263,8 @@ int btmtk_usb_suspend(struct hci_dev *hdev)
|
|||||||
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
|
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
|
||||||
|
|
||||||
/* Stop urb anchor for iso data transmission */
|
/* Stop urb anchor for iso data transmission */
|
||||||
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
|
if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
|
||||||
|
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1487,6 +1489,7 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
|
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
|
||||||
|
#endif
|
||||||
|
|
||||||
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
|
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
|
||||||
MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
|
MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
|
||||||
|
1
drivers/cache/Kconfig
vendored
1
drivers/cache/Kconfig
vendored
@ -18,6 +18,7 @@ config STARFIVE_STARLINK_CACHE
|
|||||||
bool "StarFive StarLink Cache controller"
|
bool "StarFive StarLink Cache controller"
|
||||||
depends on RISCV
|
depends on RISCV
|
||||||
depends on ARCH_STARFIVE
|
depends on ARCH_STARFIVE
|
||||||
|
depends on 64BIT
|
||||||
select RISCV_DMA_NONCOHERENT
|
select RISCV_DMA_NONCOHERENT
|
||||||
select RISCV_NONSTANDARD_CACHE_OPS
|
select RISCV_NONSTANDARD_CACHE_OPS
|
||||||
help
|
help
|
||||||
|
@ -45,7 +45,6 @@
|
|||||||
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
|
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
|
||||||
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
|
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
|
||||||
|
|
||||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
||||||
#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
|
#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
|
||||||
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
|
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
|
||||||
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
|
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
|
||||||
|
@ -27,7 +27,8 @@ cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
|
|||||||
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
|
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
|
||||||
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
|
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
|
||||||
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
|
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
|
||||||
$(call cc-option,-mno-single-pic-base)
|
$(call cc-option,-mno-single-pic-base) \
|
||||||
|
$(DISABLE_STACKLEAK_PLUGIN)
|
||||||
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
|
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
|
||||||
$(DISABLE_STACKLEAK_PLUGIN)
|
$(DISABLE_STACKLEAK_PLUGIN)
|
||||||
cflags-$(CONFIG_LOONGARCH) += -fpie
|
cflags-$(CONFIG_LOONGARCH) += -fpie
|
||||||
@ -57,6 +58,10 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
|
|||||||
# disable LTO
|
# disable LTO
|
||||||
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
|
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
|
||||||
|
|
||||||
|
# The .data section would be renamed to .data.efistub, therefore, remove
|
||||||
|
# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
|
||||||
|
KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
|
||||||
|
|
||||||
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
|
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
|
||||||
file.o mem.o random.o randomalloc.o pci.o \
|
file.o mem.o random.o randomalloc.o pci.o \
|
||||||
skip_spaces.o lib-cmdline.o lib-ctype.o \
|
skip_spaces.o lib-cmdline.o lib-ctype.o \
|
||||||
|
@ -268,6 +268,7 @@ config DRM_EXEC
|
|||||||
config DRM_GPUVM
|
config DRM_GPUVM
|
||||||
tristate
|
tristate
|
||||||
depends on DRM
|
depends on DRM
|
||||||
|
select DRM_EXEC
|
||||||
help
|
help
|
||||||
GPU-VM representation providing helpers to manage a GPUs virtual
|
GPU-VM representation providing helpers to manage a GPUs virtual
|
||||||
address space
|
address space
|
||||||
|
@ -1778,7 +1778,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
|||||||
struct ttm_operation_ctx ctx = { false, false };
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
struct amdgpu_vm *vm = &fpriv->vm;
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
struct amdgpu_bo_va_mapping *mapping;
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
int r;
|
int i, r;
|
||||||
|
|
||||||
addr /= AMDGPU_GPU_PAGE_SIZE;
|
addr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
@ -1793,13 +1793,13 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
|||||||
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
|
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||||
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
|
||||||
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
|
for (i = 0; i < (*bo)->placement.num_placement; i++)
|
||||||
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
|
(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
|
||||||
if (r)
|
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
|
||||||
return r;
|
if (r)
|
||||||
}
|
return r;
|
||||||
|
|
||||||
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
|
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
|
|||||||
if (!amdgpu_mes_log_enable)
|
if (!amdgpu_mes_log_enable)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
|
r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
|
||||||
AMDGPU_GEM_DOMAIN_GTT,
|
AMDGPU_GEM_DOMAIN_GTT,
|
||||||
&adev->mes.event_log_gpu_obj,
|
&adev->mes.event_log_gpu_obj,
|
||||||
&adev->mes.event_log_gpu_addr,
|
&adev->mes.event_log_gpu_addr,
|
||||||
@ -113,7 +113,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
|
memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1573,7 +1573,7 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
|
|||||||
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
|
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
|
||||||
|
|
||||||
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
|
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
|
||||||
mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
|
mem, adev->mes.event_log_size, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,6 @@ enum amdgpu_mes_priority_level {
|
|||||||
|
|
||||||
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
|
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
|
||||||
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
|
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
|
||||||
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
|
|
||||||
|
|
||||||
struct amdgpu_mes_funcs;
|
struct amdgpu_mes_funcs;
|
||||||
|
|
||||||
@ -135,8 +134,9 @@ struct amdgpu_mes {
|
|||||||
unsigned long *doorbell_bitmap;
|
unsigned long *doorbell_bitmap;
|
||||||
|
|
||||||
/* MES event log buffer */
|
/* MES event log buffer */
|
||||||
struct amdgpu_bo *event_log_gpu_obj;
|
uint32_t event_log_size;
|
||||||
uint64_t event_log_gpu_addr;
|
struct amdgpu_bo *event_log_gpu_obj;
|
||||||
|
uint64_t event_log_gpu_addr;
|
||||||
void *event_log_cpu_addr;
|
void *event_log_cpu_addr;
|
||||||
|
|
||||||
/* ip specific functions */
|
/* ip specific functions */
|
||||||
|
@ -1163,6 +1163,8 @@ static int mes_v11_0_sw_init(void *handle)
|
|||||||
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
|
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
|
||||||
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
|
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
|
||||||
|
|
||||||
|
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
|
||||||
|
|
||||||
r = amdgpu_mes_init(adev);
|
r = amdgpu_mes_init(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -551,8 +551,10 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
|
|||||||
mes_set_hw_res_pkt.oversubscription_timer = 50;
|
mes_set_hw_res_pkt.oversubscription_timer = 50;
|
||||||
mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
|
mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
|
||||||
|
|
||||||
mes_set_hw_res_pkt.enable_mes_event_int_logging = 0;
|
if (amdgpu_mes_log_enable) {
|
||||||
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
|
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
|
||||||
|
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
|
||||||
|
}
|
||||||
|
|
||||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
||||||
@ -1237,6 +1239,8 @@ static int mes_v12_0_sw_init(void *handle)
|
|||||||
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
|
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
|
||||||
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
|
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
|
||||||
|
|
||||||
|
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
|
||||||
|
|
||||||
r = amdgpu_mes_init(adev);
|
r = amdgpu_mes_init(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -35,8 +35,10 @@
|
|||||||
#include "dc_stream_priv.h"
|
#include "dc_stream_priv.h"
|
||||||
|
|
||||||
#define DC_LOGGER dc->ctx->logger
|
#define DC_LOGGER dc->ctx->logger
|
||||||
|
#ifndef MIN
|
||||||
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
|
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
|
||||||
#define MAX(x, y) ((x > y) ? x : y)
|
#define MAX(x, y) ((x > y) ? x : y)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Private functions
|
* Private functions
|
||||||
|
@ -25,7 +25,9 @@
|
|||||||
|
|
||||||
#include "hdcp.h"
|
#include "hdcp.h"
|
||||||
|
|
||||||
|
#ifndef MIN
|
||||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||||
|
#endif
|
||||||
#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
|
#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
|
||||||
#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
|
#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
|
||||||
#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
|
#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
|
||||||
|
@ -28,6 +28,9 @@
|
|||||||
|
|
||||||
#define MES_API_VERSION 1
|
#define MES_API_VERSION 1
|
||||||
|
|
||||||
|
/* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG */
|
||||||
|
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000
|
||||||
|
|
||||||
/* Driver submits one API(cmd) as a single Frame and this command size is same
|
/* Driver submits one API(cmd) as a single Frame and this command size is same
|
||||||
* for all API to ease the debugging and parsing of ring buffer.
|
* for all API to ease the debugging and parsing of ring buffer.
|
||||||
*/
|
*/
|
||||||
|
@ -28,6 +28,9 @@
|
|||||||
|
|
||||||
#define MES_API_VERSION 0x14
|
#define MES_API_VERSION 0x14
|
||||||
|
|
||||||
|
/* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG_12 */
|
||||||
|
#define AMDGPU_MES_LOG_BUFFER_SIZE 0xC000
|
||||||
|
|
||||||
/* Driver submits one API(cmd) as a single Frame and this command size is same for all API
|
/* Driver submits one API(cmd) as a single Frame and this command size is same for all API
|
||||||
* to ease the debugging and parsing of ring buffer.
|
* to ease the debugging and parsing of ring buffer.
|
||||||
*/
|
*/
|
||||||
|
@ -618,7 +618,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
|
|||||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
if (!pp_funcs || !pp_funcs->load_firmware || adev->flags & AMD_IS_APU)
|
if (!pp_funcs || !pp_funcs->load_firmware ||
|
||||||
|
(is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&adev->pm.mutex);
|
mutex_lock(&adev->pm.mutex);
|
||||||
|
@ -22,12 +22,18 @@
|
|||||||
*/
|
*/
|
||||||
#include <asm/div64.h>
|
#include <asm/div64.h>
|
||||||
|
|
||||||
#define SHIFT_AMOUNT 16 /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
|
enum ppevvmath_constants {
|
||||||
|
/* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
|
||||||
|
SHIFT_AMOUNT = 16,
|
||||||
|
|
||||||
#define PRECISION 5 /* Change this value to change the number of decimal places in the final output - 5 is a good default */
|
/* Change this value to change the number of decimal places in the final output - 5 is a good default */
|
||||||
|
PRECISION = 5,
|
||||||
|
|
||||||
#define SHIFTED_2 (2 << SHIFT_AMOUNT)
|
SHIFTED_2 = (2 << SHIFT_AMOUNT),
|
||||||
#define MAX (1 << (SHIFT_AMOUNT - 1)) - 1 /* 32767 - Might change in the future */
|
|
||||||
|
/* 32767 - Might change in the future */
|
||||||
|
MAX = (1 << (SHIFT_AMOUNT - 1)) - 1,
|
||||||
|
};
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------------
|
/* -------------------------------------------------------------------------------
|
||||||
* NEW TYPE - fINT
|
* NEW TYPE - fINT
|
||||||
|
@ -66,6 +66,7 @@
|
|||||||
|
|
||||||
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
|
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
|
||||||
#define DEBUGSMC_MSG_Mode1Reset 2
|
#define DEBUGSMC_MSG_Mode1Reset 2
|
||||||
|
#define LINK_SPEED_MAX 3
|
||||||
|
|
||||||
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
|
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
|
||||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
|
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
|
||||||
@ -221,7 +222,6 @@ static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COU
|
|||||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
|
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
|
||||||
};
|
};
|
||||||
|
|
||||||
#if 0
|
|
||||||
static const uint8_t smu_v14_0_2_throttler_map[] = {
|
static const uint8_t smu_v14_0_2_throttler_map[] = {
|
||||||
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
|
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
|
||||||
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
|
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
|
||||||
@ -241,7 +241,6 @@ static const uint8_t smu_v14_0_2_throttler_map[] = {
|
|||||||
[THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
|
[THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
|
||||||
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
|
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
|
||||||
};
|
};
|
||||||
#endif
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
|
smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
|
||||||
@ -1869,6 +1868,88 @@ static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
|
||||||
|
void **table)
|
||||||
|
{
|
||||||
|
struct smu_table_context *smu_table = &smu->smu_table;
|
||||||
|
struct gpu_metrics_v1_3 *gpu_metrics =
|
||||||
|
(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
|
||||||
|
SmuMetricsExternal_t metrics_ext;
|
||||||
|
SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
ret = smu_cmn_get_metrics_table(smu,
|
||||||
|
&metrics_ext,
|
||||||
|
true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
|
||||||
|
|
||||||
|
gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
|
||||||
|
gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
|
||||||
|
gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
|
||||||
|
gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
|
||||||
|
gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
|
||||||
|
gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
|
||||||
|
metrics->AvgTemperature[TEMP_VR_MEM1]);
|
||||||
|
|
||||||
|
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
|
||||||
|
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
|
||||||
|
gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
|
||||||
|
metrics->Vcn1ActivityPercentage);
|
||||||
|
|
||||||
|
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
|
||||||
|
gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
|
||||||
|
|
||||||
|
if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
|
||||||
|
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
|
||||||
|
else
|
||||||
|
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
|
||||||
|
|
||||||
|
if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
|
||||||
|
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
|
||||||
|
else
|
||||||
|
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
|
||||||
|
|
||||||
|
gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
|
||||||
|
gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
|
||||||
|
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
|
||||||
|
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
|
||||||
|
|
||||||
|
gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
|
||||||
|
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
|
||||||
|
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
|
||||||
|
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
|
||||||
|
gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
|
||||||
|
gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0];
|
||||||
|
gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0];
|
||||||
|
|
||||||
|
gpu_metrics->throttle_status =
|
||||||
|
smu_v14_0_2_get_throttler_status(metrics);
|
||||||
|
gpu_metrics->indep_throttle_status =
|
||||||
|
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
|
||||||
|
smu_v14_0_2_throttler_map);
|
||||||
|
|
||||||
|
gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
|
||||||
|
|
||||||
|
gpu_metrics->pcie_link_width = metrics->PcieWidth;
|
||||||
|
if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
|
||||||
|
gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
|
||||||
|
else
|
||||||
|
gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
|
||||||
|
|
||||||
|
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||||
|
|
||||||
|
gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
|
||||||
|
gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC];
|
||||||
|
gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM];
|
||||||
|
|
||||||
|
*table = (void *)gpu_metrics;
|
||||||
|
|
||||||
|
return sizeof(struct gpu_metrics_v1_3);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
|
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
|
||||||
.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
|
.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
|
||||||
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
|
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
|
||||||
@ -1905,6 +1986,7 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
|
|||||||
.enable_thermal_alert = smu_v14_0_enable_thermal_alert,
|
.enable_thermal_alert = smu_v14_0_enable_thermal_alert,
|
||||||
.disable_thermal_alert = smu_v14_0_disable_thermal_alert,
|
.disable_thermal_alert = smu_v14_0_disable_thermal_alert,
|
||||||
.notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
|
.notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
|
||||||
|
.get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
|
||||||
.set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
|
.set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
|
||||||
.init_pptable_microcode = smu_v14_0_init_pptable_microcode,
|
.init_pptable_microcode = smu_v14_0_init_pptable_microcode,
|
||||||
.populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
|
.populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
|
||||||
|
@ -794,7 +794,7 @@ static const char *smu_get_feature_name(struct smu_context *smu,
|
|||||||
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
|
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
|
int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
|
||||||
uint64_t feature_mask;
|
uint64_t feature_mask;
|
||||||
int i, feature_index;
|
int i, feature_index;
|
||||||
uint32_t count = 0;
|
uint32_t count = 0;
|
||||||
|
@ -158,7 +158,14 @@ void ast_dp_launch(struct drm_device *dev)
|
|||||||
ASTDP_HOST_EDID_READ_DONE);
|
ASTDP_HOST_EDID_READ_DONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ast_dp_power_is_on(struct ast_device *ast)
|
||||||
|
{
|
||||||
|
u8 vgacre3;
|
||||||
|
|
||||||
|
vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
|
||||||
|
|
||||||
|
return !(vgacre3 & AST_DP_PHY_SLEEP);
|
||||||
|
}
|
||||||
|
|
||||||
void ast_dp_power_on_off(struct drm_device *dev, bool on)
|
void ast_dp_power_on_off(struct drm_device *dev, bool on)
|
||||||
{
|
{
|
||||||
|
@ -391,6 +391,11 @@ static int ast_drm_freeze(struct drm_device *dev)
|
|||||||
|
|
||||||
static int ast_drm_thaw(struct drm_device *dev)
|
static int ast_drm_thaw(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
struct ast_device *ast = to_ast_device(dev);
|
||||||
|
|
||||||
|
ast_enable_vga(ast->ioregs);
|
||||||
|
ast_open_key(ast->ioregs);
|
||||||
|
ast_enable_mmio(dev->dev, ast->ioregs);
|
||||||
ast_post_gpu(dev);
|
ast_post_gpu(dev);
|
||||||
|
|
||||||
return drm_mode_config_helper_resume(dev);
|
return drm_mode_config_helper_resume(dev);
|
||||||
|
@ -472,6 +472,7 @@ void ast_init_3rdtx(struct drm_device *dev);
|
|||||||
bool ast_astdp_is_connected(struct ast_device *ast);
|
bool ast_astdp_is_connected(struct ast_device *ast);
|
||||||
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
|
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
|
||||||
void ast_dp_launch(struct drm_device *dev);
|
void ast_dp_launch(struct drm_device *dev);
|
||||||
|
bool ast_dp_power_is_on(struct ast_device *ast);
|
||||||
void ast_dp_power_on_off(struct drm_device *dev, bool no);
|
void ast_dp_power_on_off(struct drm_device *dev, bool no);
|
||||||
void ast_dp_set_on_off(struct drm_device *dev, bool no);
|
void ast_dp_set_on_off(struct drm_device *dev, bool no);
|
||||||
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
|
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
* Authors: Dave Airlie <airlied@redhat.com>
|
* Authors: Dave Airlie <airlied@redhat.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
|
||||||
@ -1687,11 +1688,35 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector
|
|||||||
struct drm_modeset_acquire_ctx *ctx,
|
struct drm_modeset_acquire_ctx *ctx,
|
||||||
bool force)
|
bool force)
|
||||||
{
|
{
|
||||||
|
struct drm_device *dev = connector->dev;
|
||||||
struct ast_device *ast = to_ast_device(connector->dev);
|
struct ast_device *ast = to_ast_device(connector->dev);
|
||||||
|
enum drm_connector_status status = connector_status_disconnected;
|
||||||
|
struct drm_connector_state *connector_state = connector->state;
|
||||||
|
bool is_active = false;
|
||||||
|
|
||||||
|
mutex_lock(&ast->modeset_lock);
|
||||||
|
|
||||||
|
if (connector_state && connector_state->crtc) {
|
||||||
|
struct drm_crtc_state *crtc_state = connector_state->crtc->state;
|
||||||
|
|
||||||
|
if (crtc_state && crtc_state->active)
|
||||||
|
is_active = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_active && !ast_dp_power_is_on(ast)) {
|
||||||
|
ast_dp_power_on_off(dev, true);
|
||||||
|
msleep(50);
|
||||||
|
}
|
||||||
|
|
||||||
if (ast_astdp_is_connected(ast))
|
if (ast_astdp_is_connected(ast))
|
||||||
return connector_status_connected;
|
status = connector_status_connected;
|
||||||
return connector_status_disconnected;
|
|
||||||
|
if (!is_active && status == connector_status_disconnected)
|
||||||
|
ast_dp_power_on_off(dev, false);
|
||||||
|
|
||||||
|
mutex_unlock(&ast->modeset_lock);
|
||||||
|
|
||||||
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
|
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
|
||||||
|
@ -1070,7 +1070,10 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (async_flip && prop != config->prop_fb_id) {
|
if (async_flip &&
|
||||||
|
prop != config->prop_fb_id &&
|
||||||
|
prop != config->prop_in_fence_fd &&
|
||||||
|
prop != config->prop_fb_damage_clips) {
|
||||||
ret = drm_atomic_plane_get_property(plane, plane_state,
|
ret = drm_atomic_plane_get_property(plane, plane_state,
|
||||||
prop, &old_val);
|
prop, &old_val);
|
||||||
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
|
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
|
||||||
|
@ -355,7 +355,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
|
|||||||
|
|
||||||
err_drm_gem_vmap_unlocked:
|
err_drm_gem_vmap_unlocked:
|
||||||
drm_gem_unlock(gem);
|
drm_gem_unlock(gem);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_client_buffer_vmap_local);
|
EXPORT_SYMBOL(drm_client_buffer_vmap_local);
|
||||||
|
|
||||||
|
@ -624,6 +624,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
|
|||||||
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
|
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
|
||||||
u32 width, u32 height)
|
u32 width, u32 height)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* This function may be invoked by panic() to flush the frame
|
||||||
|
* buffer, where all CPUs except the panic CPU are stopped.
|
||||||
|
* During the following schedule_work(), the panic CPU needs
|
||||||
|
* the worker_pool lock, which might be held by a stopped CPU,
|
||||||
|
* causing schedule_work() and panic() to block. Return early on
|
||||||
|
* oops_in_progress to prevent this blocking.
|
||||||
|
*/
|
||||||
|
if (oops_in_progress)
|
||||||
|
return;
|
||||||
|
|
||||||
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
|
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
|
||||||
|
|
||||||
schedule_work(&helper->damage_work);
|
schedule_work(&helper->damage_work);
|
||||||
|
@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = {
|
|||||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
|
||||||
},
|
},
|
||||||
.driver_data = (void *)&lcd1600x2560_leftside_up,
|
.driver_data = (void *)&lcd1600x2560_leftside_up,
|
||||||
|
}, { /* OrangePi Neo */
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||||
}, { /* Samsung GalaxyBook 10.6 */
|
}, { /* Samsung GalaxyBook 10.6 */
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||||
|
@ -1658,7 +1658,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
skl_ddi_calculate_wrpll(int clock /* in Hz */,
|
skl_ddi_calculate_wrpll(int clock,
|
||||||
int ref_clock,
|
int ref_clock,
|
||||||
struct skl_wrpll_params *wrpll_params)
|
struct skl_wrpll_params *wrpll_params)
|
||||||
{
|
{
|
||||||
@ -1683,7 +1683,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
|
|||||||
};
|
};
|
||||||
unsigned int dco, d, i;
|
unsigned int dco, d, i;
|
||||||
unsigned int p0, p1, p2;
|
unsigned int p0, p1, p2;
|
||||||
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
|
u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
|
||||||
|
|
||||||
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
|
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
|
||||||
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
|
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
|
||||||
@ -1808,7 +1808,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
|
|||||||
struct skl_wrpll_params wrpll_params = {};
|
struct skl_wrpll_params wrpll_params = {};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
|
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
|
||||||
i915->display.dpll.ref_clks.nssc, &wrpll_params);
|
i915->display.dpll.ref_clks.nssc, &wrpll_params);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -251,7 +251,7 @@
|
|||||||
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
|
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
|
||||||
(TRANS_HDCP(dev_priv) ? \
|
(TRANS_HDCP(dev_priv) ? \
|
||||||
TRANS_HDCP2_STREAM_STATUS(trans) : \
|
TRANS_HDCP2_STREAM_STATUS(trans) : \
|
||||||
PIPE_HDCP2_STREAM_STATUS(pipe))
|
PIPE_HDCP2_STREAM_STATUS(port))
|
||||||
|
|
||||||
#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
|
#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
|
||||||
#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
|
#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user