Linux 5.17-rc2

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmH2lIMeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGWzkH/jmR4RpGgrV5SUHF
 8R6tsIjae1WItJ8NoaDgImWRxLKXFU7MM77HoFykuZx5Y5yz5Xf5zuJNXRPrHQbC
 lE/8wqWut89LX0+k/pKSzt8BQbyZzImfm5rOSBpMC9VkvMcbuQ8NSJ/YbJOD4iAp
 jfRRIkJtLQWL6YRc/xPlHCfVY5Qx+TkNZuFFckPtM2HKIEXS225VCESX9JcABDcQ
 j7zG5bq/H+8qGItD2iUg8Nw9hqrLutdKSOgP29wxVOajY5QWVczYr1J0nfyRSPMb
 /Xs1Oo1/+zO+dfCmR45puJZYua+e/iFTJv2RPFLRIE2AV1vMq4WNIgTJlq8DwfQK
 C7Jq080=
 =6u+c
 -----END PGP SIGNATURE-----

Merge tag 'v5.17-rc2' into char-misc-next

We need the char/misc fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2022-01-30 15:00:39 +01:00
commit 7ab004dbcb
433 changed files with 4638 additions and 2084 deletions

View File

@ -70,6 +70,7 @@ Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com> Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
Brian Avery <b.avery@hp.com> Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com> Brian King <brking@us.ibm.com>
Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com> Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com> Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com> Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>

View File

@ -92,7 +92,8 @@ Triggers can be set on more than one psi metric and more than one trigger
for the same psi metric can be specified. However for each trigger a separate for the same psi metric can be specified. However for each trigger a separate
file descriptor is required to be able to poll it separately from others, file descriptor is required to be able to poll it separately from others,
therefore for each trigger a separate open() syscall should be made even therefore for each trigger a separate open() syscall should be made even
when opening the same psi interface file. when opening the same psi interface file. Write operations to a file descriptor
with an already existing psi trigger will fail with EBUSY.
Monitors activate only when system enters stall state for the monitored Monitors activate only when system enters stall state for the monitored
psi metric and deactivates upon exit from the stall state. While system is psi metric and deactivates upon exit from the stall state. While system is

View File

@ -10,6 +10,7 @@ gpio
gpio-aggregator gpio-aggregator
sysfs sysfs
gpio-mockup gpio-mockup
gpio-sim
.. only:: subproject and html .. only:: subproject and html

View File

@ -266,10 +266,12 @@ Avanta family
------------- -------------
Flavors: Flavors:
- 88F6500
- 88F6510 - 88F6510
- 88F6530P - 88F6530P
- 88F6550 - 88F6550
- 88F6560 - 88F6560
- 88F6601
Homepage: Homepage:
https://web.archive.org/web/20181005145041/http://www.marvell.com/broadband/ https://web.archive.org/web/20181005145041/http://www.marvell.com/broadband/

View File

@ -52,6 +52,12 @@ stable kernels.
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 | | Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #1902691 | ARM64_ERRATUM_1902691 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
@ -92,12 +98,18 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A | | ARM | Neoverse-N1 | #1349291 | N/A |

View File

@ -36,6 +36,7 @@ properties:
- renesas,intc-ex-r8a77980 # R-Car V3H - renesas,intc-ex-r8a77980 # R-Car V3H
- renesas,intc-ex-r8a77990 # R-Car E3 - renesas,intc-ex-r8a77990 # R-Car E3
- renesas,intc-ex-r8a77995 # R-Car D3 - renesas,intc-ex-r8a77995 # R-Car D3
- renesas,intc-ex-r8a779a0 # R-Car V3U
- const: renesas,irqc - const: renesas,irqc
'#interrupt-cells': '#interrupt-cells':

View File

@ -62,6 +62,7 @@ properties:
interrupts-extended: interrupts-extended:
minItems: 1 minItems: 1
maxItems: 15872
description: description:
Specifies which contexts are connected to the PLIC, with "-1" specifying Specifies which contexts are connected to the PLIC, with "-1" specifying
that a context is not present. Each node pointed to should be a that a context is not present. Each node pointed to should be a
@ -90,12 +91,11 @@ examples:
#interrupt-cells = <1>; #interrupt-cells = <1>;
compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0"; compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
interrupt-controller; interrupt-controller;
interrupts-extended = < interrupts-extended = <&cpu0_intc 11>,
&cpu0_intc 11 <&cpu1_intc 11>, <&cpu1_intc 9>,
&cpu1_intc 11 &cpu1_intc 9 <&cpu2_intc 11>, <&cpu2_intc 9>,
&cpu2_intc 11 &cpu2_intc 9 <&cpu3_intc 11>, <&cpu3_intc 9>,
&cpu3_intc 11 &cpu3_intc 9 <&cpu4_intc 11>, <&cpu4_intc 9>;
&cpu4_intc 11 &cpu4_intc 9>;
reg = <0xc000000 0x4000000>; reg = <0xc000000 0x4000000>;
riscv,ndev = <10>; riscv,ndev = <10>;
}; };

View File

@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
spi-max-frequency = <10000000>; spi-max-frequency = <10000000>;
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>;
interrupt-parent = <&gpio1>; interrupt-parent = <&gpio1>;
interrupts = <14 IRQ_TYPE_LEVEL_LOW>; interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;

View File

@ -166,6 +166,7 @@ to ReStructured Text format, or are simply too old.
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
tools/index
staging/index staging/index
watch_queue watch_queue

View File

@ -295,7 +295,7 @@ Pete Zaitcev gives the following summary:
- If you are in a process context (any syscall) and want to lock other - If you are in a process context (any syscall) and want to lock other
process out, use a mutex. You can take a mutex and sleep process out, use a mutex. You can take a mutex and sleep
(``copy_from_user*(`` or ``kmalloc(x,GFP_KERNEL)``). (``copy_from_user()`` or ``kmalloc(x,GFP_KERNEL)``).
- Otherwise (== data can be touched in an interrupt), use - Otherwise (== data can be touched in an interrupt), use
spin_lock_irqsave() and spin_lock_irqsave() and

View File

@ -0,0 +1,20 @@
.. SPDX-License-Identifier: GPL-2.0
============
Kernel tools
============
This book covers user-space tools that are shipped with the kernel source;
more additions are needed here:
.. toctree::
:maxdepth: 1
rtla/index
.. only:: subproject and html
Indices
=======
* :ref:`genindex`

View File

@ -0,0 +1,26 @@
.. SPDX-License-Identifier: GPL-2.0
================================
The realtime Linux analysis tool
================================
RTLA provides a set of tools for the analysis of the kernel's realtime
behavior on specific hardware.
.. toctree::
:maxdepth: 1
rtla
rtla-osnoise
rtla-osnoise-hist
rtla-osnoise-top
rtla-timerlat
rtla-timerlat-hist
rtla-timerlat-top
.. only:: subproject and html
Indices
=======
* :ref:`genindex`

View File

@ -3268,6 +3268,7 @@ number.
:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, :Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
KVM_CAP_VCPU_ATTRIBUTES for vcpu device KVM_CAP_VCPU_ATTRIBUTES for vcpu device
KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device (no set)
:Type: device ioctl, vm ioctl, vcpu ioctl :Type: device ioctl, vm ioctl, vcpu ioctl
:Parameters: struct kvm_device_attr :Parameters: struct kvm_device_attr
:Returns: 0 on success, -1 on error :Returns: 0 on success, -1 on error
@ -3302,7 +3303,8 @@ transferred is defined by the particular attribute.
------------------------ ------------------------
:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, :Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
KVM_CAP_VCPU_ATTRIBUTES for vcpu device KVM_CAP_VCPU_ATTRIBUTES for vcpu device
KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device
:Type: device ioctl, vm ioctl, vcpu ioctl :Type: device ioctl, vm ioctl, vcpu ioctl
:Parameters: struct kvm_device_attr :Parameters: struct kvm_device_attr
:Returns: 0 on success, -1 on error :Returns: 0 on success, -1 on error

View File

@ -9,7 +9,7 @@ Page Table Check
Introduction Introduction
============ ============
Page table check allows to hardern the kernel by ensuring that some types of Page table check allows to harden the kernel by ensuring that some types of
the memory corruptions are prevented. the memory corruptions are prevented.
Page table check performs extra verifications at the time when new pages become Page table check performs extra verifications at the time when new pages become

View File

@ -190,8 +190,9 @@ M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Maintained S: Maintained
W: https://wireless.wiki.kernel.org/ W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git Q: https://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/driver-api/80211/cfg80211.rst F: Documentation/driver-api/80211/cfg80211.rst
F: Documentation/networking/regulatory.rst F: Documentation/networking/regulatory.rst
F: include/linux/ieee80211.h F: include/linux/ieee80211.h
@ -7208,8 +7209,10 @@ F: drivers/net/mdio/of_mdio.c
F: drivers/net/pcs/ F: drivers/net/pcs/
F: drivers/net/phy/ F: drivers/net/phy/
F: include/dt-bindings/net/qca-ar803x.h F: include/dt-bindings/net/qca-ar803x.h
F: include/linux/linkmode.h
F: include/linux/*mdio*.h F: include/linux/*mdio*.h
F: include/linux/mdio/*.h F: include/linux/mdio/*.h
F: include/linux/mii.h
F: include/linux/of_net.h F: include/linux/of_net.h
F: include/linux/phy.h F: include/linux/phy.h
F: include/linux/phy_fixed.h F: include/linux/phy_fixed.h
@ -11366,8 +11369,9 @@ M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Maintained S: Maintained
W: https://wireless.wiki.kernel.org/ W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git Q: https://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/networking/mac80211-injection.rst F: Documentation/networking/mac80211-injection.rst
F: Documentation/networking/mac80211_hwsim/mac80211_hwsim.rst F: Documentation/networking/mac80211_hwsim/mac80211_hwsim.rst
F: drivers/net/wireless/mac80211_hwsim.[ch] F: drivers/net/wireless/mac80211_hwsim.[ch]
@ -13374,9 +13378,10 @@ NETWORKING DRIVERS (WIRELESS)
M: Kalle Valo <kvalo@kernel.org> M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Maintained S: Maintained
Q: http://patchwork.kernel.org/project/linux-wireless/list/ W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git Q: https://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/devicetree/bindings/net/wireless/ F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/ F: drivers/net/wireless/
@ -13449,7 +13454,11 @@ L: netdev@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
F: arch/x86/net/* F: arch/x86/net/*
F: include/linux/ip.h
F: include/linux/ipv6*
F: include/net/fib*
F: include/net/ip* F: include/net/ip*
F: include/net/route.h
F: net/ipv4/ F: net/ipv4/
F: net/ipv6/ F: net/ipv6/
@ -13510,10 +13519,6 @@ F: include/net/tls.h
F: include/uapi/linux/tls.h F: include/uapi/linux/tls.h
F: net/tls/* F: net/tls/*
NETWORKING [WIRELESS]
L: linux-wireless@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-wireless/list/
NETXEN (1/10) GbE SUPPORT NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manishc@marvell.com> M: Manish Chopra <manishc@marvell.com>
M: Rahul Verma <rahulv@marvell.com> M: Rahul Verma <rahulv@marvell.com>
@ -16532,8 +16537,9 @@ M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Maintained S: Maintained
W: https://wireless.wiki.kernel.org/ W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git Q: https://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/ABI/stable/sysfs-class-rfkill F: Documentation/ABI/stable/sysfs-class-rfkill
F: Documentation/driver-api/rfkill.rst F: Documentation/driver-api/rfkill.rst
F: include/linux/rfkill.h F: include/linux/rfkill.h

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 17 PATCHLEVEL = 17
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Gobble Gobble NAME = Gobble Gobble
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -83,6 +83,7 @@ config ARM
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
select HAVE_CONTEXT_TRACKING select HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU

View File

@ -288,6 +288,7 @@
*/ */
#define ALT_UP(instr...) \ #define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\ .pushsection ".alt.smp.init", "a" ;\
.align 2 ;\
.long 9998b - . ;\ .long 9998b - . ;\
9997: instr ;\ 9997: instr ;\
.if . - 9997b == 2 ;\ .if . - 9997b == 2 ;\
@ -299,6 +300,7 @@
.popsection .popsection
#define ALT_UP_B(label) \ #define ALT_UP_B(label) \
.pushsection ".alt.smp.init", "a" ;\ .pushsection ".alt.smp.init", "a" ;\
.align 2 ;\
.long 9998b - . ;\ .long 9998b - . ;\
W(b) . + (label - 9998b) ;\ W(b) . + (label - 9998b) ;\
.popsection .popsection

View File

@ -96,6 +96,7 @@ unsigned long __get_wchan(struct task_struct *p);
#define __ALT_SMP_ASM(smp, up) \ #define __ALT_SMP_ASM(smp, up) \
"9998: " smp "\n" \ "9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \ " .pushsection \".alt.smp.init\", \"a\"\n" \
" .align 2\n" \
" .long 9998b - .\n" \ " .long 9998b - .\n" \
" " up "\n" \ " " up "\n" \
" .popsection\n" " .popsection\n"

View File

@ -11,6 +11,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/domain.h> #include <asm/domain.h>
#include <asm/unaligned.h>
#include <asm/unified.h> #include <asm/unified.h>
#include <asm/compiler.h> #include <asm/compiler.h>
@ -497,7 +498,10 @@ do { \
} \ } \
default: __err = __get_user_bad(); break; \ default: __err = __get_user_bad(); break; \
} \ } \
*(type *)(dst) = __val; \ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
put_unaligned(__val, (type *)(dst)); \
else \
*(type *)(dst) = __val; /* aligned by caller */ \
if (__err) \ if (__err) \
goto err_label; \ goto err_label; \
} while (0) } while (0)
@ -507,7 +511,9 @@ do { \
const type *__pk_ptr = (dst); \ const type *__pk_ptr = (dst); \
unsigned long __dst = (unsigned long)__pk_ptr; \ unsigned long __dst = (unsigned long)__pk_ptr; \
int __err = 0; \ int __err = 0; \
type __val = *(type *)src; \ type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
? get_unaligned((type *)(src)) \
: *(type *)(src); /* aligned by caller */ \
switch (sizeof(type)) { \ switch (sizeof(type)) { \
case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \ case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \ case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \

View File

@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
KASAN_SANITIZE_actions-common.o := n
KASAN_SANITIZE_actions-arm.o := n
KASAN_SANITIZE_actions-thumb.o := n
obj-$(CONFIG_KPROBES) += core.o actions-common.o checkers-common.o obj-$(CONFIG_KPROBES) += core.o actions-common.o checkers-common.o
obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
test-kprobes-objs := test-core.o test-kprobes-objs := test-core.o

View File

@ -670,15 +670,25 @@ config ARM64_ERRATUM_1508412
config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
bool bool
config ARM64_ERRATUM_2051678
bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit"
help
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
Affected Coretex-A510 might not respect the ordering rules for
hardware update of the page table's dirty bit. The workaround
is to not enable the feature on affected CPUs.
If unsure, say Y.
config ARM64_ERRATUM_2119858 config ARM64_ERRATUM_2119858
bool "Cortex-A710: 2119858: workaround TRBE overwriting trace data in FILL mode" bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
default y default y
depends on CORESIGHT_TRBE depends on CORESIGHT_TRBE
select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
help help
This option adds the workaround for ARM Cortex-A710 erratum 2119858. This option adds the workaround for ARM Cortex-A710/X2 erratum 2119858.
Affected Cortex-A710 cores could overwrite up to 3 cache lines of trace Affected Cortex-A710/X2 cores could overwrite up to 3 cache lines of trace
data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in
the event of a WRAP event. the event of a WRAP event.
@ -761,14 +771,14 @@ config ARM64_ERRATUM_2253138
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_2224489 config ARM64_ERRATUM_2224489
bool "Cortex-A710: 2224489: workaround TRBE writing to address out-of-range" bool "Cortex-A710/X2: 2224489: workaround TRBE writing to address out-of-range"
depends on CORESIGHT_TRBE depends on CORESIGHT_TRBE
default y default y
select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
help help
This option adds the workaround for ARM Cortex-A710 erratum 2224489. This option adds the workaround for ARM Cortex-A710/X2 erratum 2224489.
Affected Cortex-A710 cores might write to an out-of-range address, not reserved Affected Cortex-A710/X2 cores might write to an out-of-range address, not reserved
for TRBE. Under some conditions, the TRBE might generate a write to the next for TRBE. Under some conditions, the TRBE might generate a write to the next
virtually addressed page following the last page of the TRBE address space virtually addressed page following the last page of the TRBE address space
(i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base.
@ -778,6 +788,65 @@ config ARM64_ERRATUM_2224489
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_2064142
bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 2064142.
Affected Cortex-A510 core might fail to write into system registers after the
TRBE has been disabled. Under some conditions after the TRBE has been disabled
writes into TRBE registers TRBLIMITR_EL1, TRBPTR_EL1, TRBBASER_EL1, TRBSR_EL1,
and TRBTRG_EL1 will be ignored and will not be effected.
Work around this in the driver by executing TSB CSYNC and DSB after collection
is stopped and before performing a system register write to one of the affected
registers.
If unsure, say Y.
config ARM64_ERRATUM_2038923
bool "Cortex-A510: 2038923: workaround TRBE corruption with enable"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 2038923.
Affected Cortex-A510 core might cause an inconsistent view on whether trace is
prohibited within the CPU. As a result, the trace buffer or trace buffer state
might be corrupted. This happens after TRBE buffer has been enabled by setting
TRBLIMITR_EL1.E, followed by just a single context synchronization event before
execution changes from a context, in which trace is prohibited to one where it
isn't, or vice versa. In these mentioned conditions, the view of whether trace
is prohibited is inconsistent between parts of the CPU, and the trace buffer or
the trace buffer state might be corrupted.
Work around this in the driver by preventing an inconsistent view of whether the
trace is prohibited or not based on TRBLIMITR_EL1.E by immediately following a
change to TRBLIMITR_EL1.E with at least one ISB instruction before an ERET, or
two ISB instructions if no ERET is to take place.
If unsure, say Y.
config ARM64_ERRATUM_1902691
bool "Cortex-A510: 1902691: workaround TRBE trace corruption"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 1902691.
Affected Cortex-A510 core might cause trace data corruption, when being written
into the memory. Effectively TRBE is broken and hence cannot be used to capture
trace data.
Work around this problem in the driver by just preventing TRBE initialization on
affected cpus. The firmware must have disabled the access to TRBE for the kernel
on such implementations. This will cover the kernel for any firmware that doesn't
do this already.
If unsure, say Y.
config CAVIUM_ERRATUM_22375 config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313" bool "Cavium erratum 22375, 24313"
default y default y

View File

@ -73,7 +73,9 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D #define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A710 0xD47 #define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define APM_CPU_PART_POTENZA 0x000 #define APM_CPU_PART_POTENZA 0x000
@ -115,7 +117,9 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)

View File

@ -347,6 +347,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2119858 #ifdef CONFIG_ARM64_ERRATUM_2119858
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
#endif #endif
{}, {},
}; };
@ -371,6 +372,7 @@ static struct midr_range trbe_write_out_of_range_cpus[] = {
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2224489 #ifdef CONFIG_ARM64_ERRATUM_2224489
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
#endif #endif
{}, {},
}; };
@ -597,6 +599,33 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
}, },
#endif
#ifdef CONFIG_ARM64_ERRATUM_2064142
{
.desc = "ARM erratum 2064142",
.capability = ARM64_WORKAROUND_2064142,
/* Cortex-A510 r0p0 - r0p2 */
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2038923
{
.desc = "ARM erratum 2038923",
.capability = ARM64_WORKAROUND_2038923,
/* Cortex-A510 r0p0 - r0p2 */
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1902691
{
.desc = "ARM erratum 1902691",
.capability = ARM64_WORKAROUND_1902691,
/* Cortex-A510 r0p0 - r0p1 */
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
},
#endif #endif
{ {
} }

View File

@ -1645,6 +1645,9 @@ static bool cpu_has_broken_dbm(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
/* Kryo4xx Silver (rdpe => r1p0) */ /* Kryo4xx Silver (rdpe => r1p0) */
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
#ifdef CONFIG_ARM64_ERRATUM_2051678
MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
#endif #endif
{}, {},
}; };

View File

@ -33,8 +33,8 @@
*/ */
static void start_backtrace(struct stackframe *frame, unsigned long fp, static notrace void start_backtrace(struct stackframe *frame, unsigned long fp,
unsigned long pc) unsigned long pc)
{ {
frame->fp = fp; frame->fp = fp;
frame->pc = pc; frame->pc = pc;
@ -55,6 +55,7 @@ static void start_backtrace(struct stackframe *frame, unsigned long fp,
frame->prev_fp = 0; frame->prev_fp = 0;
frame->prev_type = STACK_TYPE_UNKNOWN; frame->prev_type = STACK_TYPE_UNKNOWN;
} }
NOKPROBE_SYMBOL(start_backtrace);
/* /*
* Unwind from one frame record (A) to the next frame record (B). * Unwind from one frame record (A) to the next frame record (B).

View File

@ -29,8 +29,11 @@ ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
# -Wmissing-prototypes and -Wmissing-declarations are removed from
# the CFLAGS of vgettimeofday.c to make possible to build the
# kernel with CONFIG_WERROR enabled.
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \
$(CC_FLAGS_LTO) $(CC_FLAGS_LTO) -Wmissing-prototypes -Wmissing-declarations
KASAN_SANITIZE := n KASAN_SANITIZE := n
KCSAN_SANITIZE := n KCSAN_SANITIZE := n
UBSAN_SANITIZE := n UBSAN_SANITIZE := n

View File

@ -38,7 +38,10 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val) static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
{ {
write_sysreg_el1(val, SYS_SPSR); if (has_vhe())
write_sysreg_el1(val, SYS_SPSR);
else
__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
} }
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val) static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)

View File

@ -983,13 +983,9 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
*/ */
stage2_put_pte(ptep, mmu, addr, level, mm_ops); stage2_put_pte(ptep, mmu, addr, level, mm_ops);
if (need_flush) { if (need_flush && mm_ops->dcache_clean_inval_poc)
kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops); mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
kvm_granule_size(level));
dcache_clean_inval_poc((unsigned long)pte_follow,
(unsigned long)pte_follow +
kvm_granule_size(level));
}
if (childp) if (childp)
mm_ops->put_page(childp); mm_ops->put_page(childp);
@ -1151,15 +1147,13 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
struct kvm_pgtable *pgt = arg; struct kvm_pgtable *pgt = arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep; kvm_pte_t pte = *ptep;
kvm_pte_t *pte_follow;
if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0; return 0;
pte_follow = kvm_pte_follow(pte, mm_ops); if (mm_ops->dcache_clean_inval_poc)
dcache_clean_inval_poc((unsigned long)pte_follow, mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
(unsigned long)pte_follow + kvm_granule_size(level));
kvm_granule_size(level));
return 0; return 0;
} }

View File

@ -983,6 +983,9 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT; val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
/* IDbits */ /* IDbits */
val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT; val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
/* SEIS */
if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
/* A3V */ /* A3V */
val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT; val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
/* EOImode */ /* EOImode */

View File

@ -609,6 +609,18 @@ static int __init early_gicv4_enable(char *buf)
} }
early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable); early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
static const struct midr_range broken_seis[] = {
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
{},
};
static bool vgic_v3_broken_seis(void)
{
return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
is_midr_in_range_list(read_cpuid_id(), broken_seis));
}
/** /**
* vgic_v3_probe - probe for a VGICv3 compatible interrupt controller * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
* @info: pointer to the GIC description * @info: pointer to the GIC description
@ -676,9 +688,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
group1_trap = true; group1_trap = true;
} }
if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) { if (vgic_v3_broken_seis()) {
kvm_info("GICv3 with locally generated SEI\n"); kvm_info("GICv3 with broken locally generated SEI\n");
kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
group0_trap = true; group0_trap = true;
group1_trap = true; group1_trap = true;
if (ich_vtr_el2 & ICH_VTR_TDS_MASK) if (ich_vtr_el2 & ICH_VTR_TDS_MASK)

View File

@ -40,8 +40,8 @@ static bool
ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex, ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
struct pt_regs *regs) struct pt_regs *regs)
{ {
int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->type); int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data);
int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->type); int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
unsigned long data, addr, offset; unsigned long data, addr, offset;
addr = pt_regs_read_reg(regs, reg_addr); addr = pt_regs_read_reg(regs, reg_addr);

View File

@ -55,6 +55,9 @@ WORKAROUND_1418040
WORKAROUND_1463225 WORKAROUND_1463225
WORKAROUND_1508412 WORKAROUND_1508412
WORKAROUND_1542419 WORKAROUND_1542419
WORKAROUND_2064142
WORKAROUND_2038923
WORKAROUND_1902691
WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE WORKAROUND_TRBE_WRITE_OUT_OF_RANGE

View File

@ -318,7 +318,7 @@ config ARCH_PROC_KCORE_TEXT
depends on PROC_KCORE depends on PROC_KCORE
config IA64_MCA_RECOVERY config IA64_MCA_RECOVERY
tristate "MCA recovery from errors other than TLB." bool "MCA recovery from errors other than TLB."
config IA64_PALINFO config IA64_PALINFO
tristate "/proc/pal support" tristate "/proc/pal support"

View File

@ -76,5 +76,5 @@ static void pci_fixup_video(struct pci_dev *pdev)
} }
} }
} }
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);

View File

@ -285,7 +285,7 @@ symbol = value
#define PTR_SCALESHIFT 2 #define PTR_SCALESHIFT 2
#define PTR .word #define PTR_WD .word
#define PTRSIZE 4 #define PTRSIZE 4
#define PTRLOG 2 #define PTRLOG 2
#endif #endif
@ -310,7 +310,7 @@ symbol = value
#define PTR_SCALESHIFT 3 #define PTR_SCALESHIFT 3
#define PTR .dword #define PTR_WD .dword
#define PTRSIZE 8 #define PTRSIZE 8
#define PTRLOG 3 #define PTRLOG 3
#endif #endif

View File

@ -32,7 +32,7 @@ do { \
".previous\n" \ ".previous\n" \
\ \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR) "\t1b, 3b\n\t" \ STR(PTR_WD) "\t1b, 3b\n\t" \
".previous\n" \ ".previous\n" \
\ \
: [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\ : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
@ -54,7 +54,7 @@ do { \
".previous\n" \ ".previous\n" \
\ \
".section\t__ex_table,\"a\"\n\t"\ ".section\t__ex_table,\"a\"\n\t"\
STR(PTR) "\t1b, 3b\n\t" \ STR(PTR_WD) "\t1b, 3b\n\t" \
".previous\n" \ ".previous\n" \
\ \
: [tmp_err] "=r" (error) \ : [tmp_err] "=r" (error) \

View File

@ -119,7 +119,7 @@ static inline void flush_scache_line(unsigned long addr)
" j 2b \n" \ " j 2b \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" "STR(PTR)" 1b, 3b \n" \ " "STR(PTR_WD)" 1b, 3b \n" \
" .previous" \ " .previous" \
: "+r" (__err) \ : "+r" (__err) \
: "i" (op), "r" (addr), "i" (-EFAULT)); \ : "i" (op), "r" (addr), "i" (-EFAULT)); \
@ -142,7 +142,7 @@ static inline void flush_scache_line(unsigned long addr)
" j 2b \n" \ " j 2b \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" "STR(PTR)" 1b, 3b \n" \ " "STR(PTR_WD)" 1b, 3b \n" \
" .previous" \ " .previous" \
: "+r" (__err) \ : "+r" (__err) \
: "i" (op), "r" (addr), "i" (-EFAULT)); \ : "i" (op), "r" (addr), "i" (-EFAULT)); \

View File

@ -20,8 +20,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -41,8 +41,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -74,10 +74,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -102,8 +102,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -125,8 +125,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -145,8 +145,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -178,10 +178,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -223,14 +223,14 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
STR(PTR)"\t5b, 11b\n\t" \ STR(PTR_WD)"\t5b, 11b\n\t" \
STR(PTR)"\t6b, 11b\n\t" \ STR(PTR_WD)"\t6b, 11b\n\t" \
STR(PTR)"\t7b, 11b\n\t" \ STR(PTR_WD)"\t7b, 11b\n\t" \
STR(PTR)"\t8b, 11b\n\t" \ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -255,8 +255,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT));\ : "r" (value), "r" (addr), "i" (-EFAULT));\
@ -276,8 +276,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \ : "r" (value), "r" (addr), "i" (-EFAULT)); \
@ -296,8 +296,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \ : "r" (value), "r" (addr), "i" (-EFAULT)); \
@ -325,10 +325,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (res) \ : "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \ : "r" (value), "r" (addr), "i" (-EFAULT) \
@ -365,14 +365,14 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
STR(PTR)"\t5b, 11b\n\t" \ STR(PTR_WD)"\t5b, 11b\n\t" \
STR(PTR)"\t6b, 11b\n\t" \ STR(PTR_WD)"\t6b, 11b\n\t" \
STR(PTR)"\t7b, 11b\n\t" \ STR(PTR_WD)"\t7b, 11b\n\t" \
STR(PTR)"\t8b, 11b\n\t" \ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (res) \ : "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \ : "r" (value), "r" (addr), "i" (-EFAULT) \
@ -398,8 +398,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -419,8 +419,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -452,10 +452,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -481,8 +481,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -504,8 +504,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -524,8 +524,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -557,10 +557,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -602,14 +602,14 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
STR(PTR)"\t5b, 11b\n\t" \ STR(PTR_WD)"\t5b, 11b\n\t" \
STR(PTR)"\t6b, 11b\n\t" \ STR(PTR_WD)"\t6b, 11b\n\t" \
STR(PTR)"\t7b, 11b\n\t" \ STR(PTR_WD)"\t7b, 11b\n\t" \
STR(PTR)"\t8b, 11b\n\t" \ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -632,8 +632,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT));\ : "r" (value), "r" (addr), "i" (-EFAULT));\
@ -653,8 +653,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \ : "r" (value), "r" (addr), "i" (-EFAULT)); \
@ -673,8 +673,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \ : "r" (value), "r" (addr), "i" (-EFAULT)); \
@ -703,10 +703,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (res) \ : "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \ : "r" (value), "r" (addr), "i" (-EFAULT) \
@ -743,14 +743,14 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
STR(PTR)"\t5b, 11b\n\t" \ STR(PTR_WD)"\t5b, 11b\n\t" \
STR(PTR)"\t6b, 11b\n\t" \ STR(PTR_WD)"\t6b, 11b\n\t" \
STR(PTR)"\t7b, 11b\n\t" \ STR(PTR_WD)"\t7b, 11b\n\t" \
STR(PTR)"\t8b, 11b\n\t" \ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (res) \ : "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \ : "r" (value), "r" (addr), "i" (-EFAULT) \

View File

@ -1258,10 +1258,10 @@ fpu_emul:
" j 10b\n" " j 10b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1333,10 +1333,10 @@ fpu_emul:
" j 10b\n" " j 10b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1404,10 +1404,10 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1474,10 +1474,10 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1589,14 +1589,14 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
STR(PTR) " 5b,8b\n" STR(PTR_WD) " 5b,8b\n"
STR(PTR) " 6b,8b\n" STR(PTR_WD) " 6b,8b\n"
STR(PTR) " 7b,8b\n" STR(PTR_WD) " 7b,8b\n"
STR(PTR) " 0b,8b\n" STR(PTR_WD) " 0b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1708,14 +1708,14 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
STR(PTR) " 5b,8b\n" STR(PTR_WD) " 5b,8b\n"
STR(PTR) " 6b,8b\n" STR(PTR_WD) " 6b,8b\n"
STR(PTR) " 7b,8b\n" STR(PTR_WD) " 7b,8b\n"
STR(PTR) " 0b,8b\n" STR(PTR_WD) " 0b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1827,14 +1827,14 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
STR(PTR) " 5b,8b\n" STR(PTR_WD) " 5b,8b\n"
STR(PTR) " 6b,8b\n" STR(PTR_WD) " 6b,8b\n"
STR(PTR) " 7b,8b\n" STR(PTR_WD) " 7b,8b\n"
STR(PTR) " 0b,8b\n" STR(PTR_WD) " 0b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1945,14 +1945,14 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
STR(PTR) " 5b,8b\n" STR(PTR_WD) " 5b,8b\n"
STR(PTR) " 6b,8b\n" STR(PTR_WD) " 6b,8b\n"
STR(PTR) " 7b,8b\n" STR(PTR_WD) " 7b,8b\n"
STR(PTR) " 0b,8b\n" STR(PTR_WD) " 0b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -2007,7 +2007,7 @@ fpu_emul:
"j 2b\n" "j 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
STR(PTR) " 1b,3b\n" STR(PTR_WD) " 1b,3b\n"
".previous\n" ".previous\n"
: "=&r"(res), "+&r"(err) : "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV) : "r"(vaddr), "i"(SIGSEGV)
@ -2065,7 +2065,7 @@ fpu_emul:
"j 2b\n" "j 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
STR(PTR) " 1b,3b\n" STR(PTR_WD) " 1b,3b\n"
".previous\n" ".previous\n"
: "+&r"(res), "+&r"(err) : "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)); : "r"(vaddr), "i"(SIGSEGV));
@ -2126,7 +2126,7 @@ fpu_emul:
"j 2b\n" "j 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
STR(PTR) " 1b,3b\n" STR(PTR_WD) " 1b,3b\n"
".previous\n" ".previous\n"
: "=&r"(res), "+&r"(err) : "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV) : "r"(vaddr), "i"(SIGSEGV)
@ -2189,7 +2189,7 @@ fpu_emul:
"j 2b\n" "j 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
STR(PTR) " 1b,3b\n" STR(PTR_WD) " 1b,3b\n"
".previous\n" ".previous\n"
: "+&r"(res), "+&r"(err) : "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)); : "r"(vaddr), "i"(SIGSEGV));

View File

@ -23,14 +23,14 @@
#define EX(a,b) \ #define EX(a,b) \
9: a,##b; \ 9: a,##b; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b,fault; \ PTR_WD 9b,fault; \
.previous .previous
#define EX2(a,b) \ #define EX2(a,b) \
9: a,##b; \ 9: a,##b; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b,fault; \ PTR_WD 9b,fault; \
PTR 9b+4,fault; \ PTR_WD 9b+4,fault; \
.previous .previous
.set mips1 .set mips1

View File

@ -31,7 +31,7 @@
.ex\@: \insn \reg, \src .ex\@: \insn \reg, \src
.set pop .set pop
.section __ex_table,"a" .section __ex_table,"a"
PTR .ex\@, fault PTR_WD .ex\@, fault
.previous .previous
.endm .endm

View File

@ -147,10 +147,10 @@ LEAF(kexec_smp_wait)
kexec_args: kexec_args:
EXPORT(kexec_args) EXPORT(kexec_args)
arg0: PTR 0x0 arg0: PTR_WD 0x0
arg1: PTR 0x0 arg1: PTR_WD 0x0
arg2: PTR 0x0 arg2: PTR_WD 0x0
arg3: PTR 0x0 arg3: PTR_WD 0x0
.size kexec_args,PTRSIZE*4 .size kexec_args,PTRSIZE*4
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -161,10 +161,10 @@ arg3: PTR 0x0
*/ */
secondary_kexec_args: secondary_kexec_args:
EXPORT(secondary_kexec_args) EXPORT(secondary_kexec_args)
s_arg0: PTR 0x0 s_arg0: PTR_WD 0x0
s_arg1: PTR 0x0 s_arg1: PTR_WD 0x0
s_arg2: PTR 0x0 s_arg2: PTR_WD 0x0
s_arg3: PTR 0x0 s_arg3: PTR_WD 0x0
.size secondary_kexec_args,PTRSIZE*4 .size secondary_kexec_args,PTRSIZE*4
kexec_flag: kexec_flag:
LONG 0x1 LONG 0x1
@ -173,17 +173,17 @@ kexec_flag:
kexec_start_address: kexec_start_address:
EXPORT(kexec_start_address) EXPORT(kexec_start_address)
PTR 0x0 PTR_WD 0x0
.size kexec_start_address, PTRSIZE .size kexec_start_address, PTRSIZE
kexec_indirection_page: kexec_indirection_page:
EXPORT(kexec_indirection_page) EXPORT(kexec_indirection_page)
PTR 0 PTR_WD 0
.size kexec_indirection_page, PTRSIZE .size kexec_indirection_page, PTRSIZE
relocate_new_kernel_end: relocate_new_kernel_end:
relocate_new_kernel_size: relocate_new_kernel_size:
EXPORT(relocate_new_kernel_size) EXPORT(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel PTR_WD relocate_new_kernel_end - relocate_new_kernel
.size relocate_new_kernel_size, PTRSIZE .size relocate_new_kernel_size, PTRSIZE

View File

@ -72,10 +72,10 @@ loads_done:
.set pop .set pop
.section __ex_table,"a" .section __ex_table,"a"
PTR load_a4, bad_stack_a4 PTR_WD load_a4, bad_stack_a4
PTR load_a5, bad_stack_a5 PTR_WD load_a5, bad_stack_a5
PTR load_a6, bad_stack_a6 PTR_WD load_a6, bad_stack_a6
PTR load_a7, bad_stack_a7 PTR_WD load_a7, bad_stack_a7
.previous .previous
lw t0, TI_FLAGS($28) # syscall tracing enabled? lw t0, TI_FLAGS($28) # syscall tracing enabled?
@ -216,7 +216,7 @@ einval: li v0, -ENOSYS
#endif /* CONFIG_MIPS_MT_FPAFF */ #endif /* CONFIG_MIPS_MT_FPAFF */
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
#define __SYSCALL(nr, entry) PTR entry #define __SYSCALL(nr, entry) PTR_WD entry
.align 2 .align 2
.type sys_call_table, @object .type sys_call_table, @object
EXPORT(sys_call_table) EXPORT(sys_call_table)

View File

@ -101,7 +101,7 @@ not_n32_scall:
END(handle_sysn32) END(handle_sysn32)
#define __SYSCALL(nr, entry) PTR entry #define __SYSCALL(nr, entry) PTR_WD entry
.type sysn32_call_table, @object .type sysn32_call_table, @object
EXPORT(sysn32_call_table) EXPORT(sysn32_call_table)
#include <asm/syscall_table_n32.h> #include <asm/syscall_table_n32.h>

View File

@ -109,7 +109,7 @@ illegal_syscall:
j n64_syscall_exit j n64_syscall_exit
END(handle_sys64) END(handle_sys64)
#define __SYSCALL(nr, entry) PTR entry #define __SYSCALL(nr, entry) PTR_WD entry
.align 3 .align 3
.type sys_call_table, @object .type sys_call_table, @object
EXPORT(sys_call_table) EXPORT(sys_call_table)

View File

@ -73,10 +73,10 @@ load_a7: lw a7, 28(t0) # argument #8 from usp
loads_done: loads_done:
.section __ex_table,"a" .section __ex_table,"a"
PTR load_a4, bad_stack_a4 PTR_WD load_a4, bad_stack_a4
PTR load_a5, bad_stack_a5 PTR_WD load_a5, bad_stack_a5
PTR load_a6, bad_stack_a6 PTR_WD load_a6, bad_stack_a6
PTR load_a7, bad_stack_a7 PTR_WD load_a7, bad_stack_a7
.previous .previous
li t1, _TIF_WORK_SYSCALL_ENTRY li t1, _TIF_WORK_SYSCALL_ENTRY
@ -214,7 +214,7 @@ einval: li v0, -ENOSYS
END(sys32_syscall) END(sys32_syscall)
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
#define __SYSCALL(nr, entry) PTR entry #define __SYSCALL(nr, entry) PTR_WD entry
.align 3 .align 3
.type sys32_call_table,@object .type sys32_call_table,@object
EXPORT(sys32_call_table) EXPORT(sys32_call_table)

View File

@ -122,8 +122,8 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" j 3b \n" " j 3b \n"
" .previous \n" " .previous \n"
" .section __ex_table,\"a\" \n" " .section __ex_table,\"a\" \n"
" "STR(PTR)" 1b, 4b \n" " "STR(PTR_WD)" 1b, 4b \n"
" "STR(PTR)" 2b, 4b \n" " "STR(PTR_WD)" 2b, 4b \n"
" .previous \n" " .previous \n"
" .set pop \n" " .set pop \n"
: [old] "=&r" (old), : [old] "=&r" (old),
@ -152,8 +152,8 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" j 3b \n" " j 3b \n"
" .previous \n" " .previous \n"
" .section __ex_table,\"a\" \n" " .section __ex_table,\"a\" \n"
" "STR(PTR)" 1b, 5b \n" " "STR(PTR_WD)" 1b, 5b \n"
" "STR(PTR)" 2b, 5b \n" " "STR(PTR_WD)" 2b, 5b \n"
" .previous \n" " .previous \n"
" .set pop \n" " .set pop \n"
: [old] "=&r" (old), : [old] "=&r" (old),

View File

@ -347,7 +347,7 @@ EXPORT_SYMBOL(csum_partial)
.if \mode == LEGACY_MODE; \ .if \mode == LEGACY_MODE; \
9: insn reg, addr; \ 9: insn reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, .L_exc; \ PTR_WD 9b, .L_exc; \
.previous; \ .previous; \
/* This is enabled in EVA mode */ \ /* This is enabled in EVA mode */ \
.else; \ .else; \
@ -356,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
((\to == USEROP) && (type == ST_INSN)); \ ((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, .L_exc; \ PTR_WD 9b, .L_exc; \
.previous; \ .previous; \
.else; \ .else; \
/* EVA without exception */ \ /* EVA without exception */ \

View File

@ -116,7 +116,7 @@
.if \mode == LEGACY_MODE; \ .if \mode == LEGACY_MODE; \
9: insn reg, addr; \ 9: insn reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous; \ .previous; \
/* This is assembled in EVA mode */ \ /* This is assembled in EVA mode */ \
.else; \ .else; \
@ -125,7 +125,7 @@
((\to == USEROP) && (type == ST_INSN)); \ ((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous; \ .previous; \
.else; \ .else; \
/* \ /* \

View File

@ -52,7 +52,7 @@
9: ___BUILD_EVA_INSN(insn, reg, addr); \ 9: ___BUILD_EVA_INSN(insn, reg, addr); \
.endif; \ .endif; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous .previous
.macro f_fill64 dst, offset, val, fixup, mode .macro f_fill64 dst, offset, val, fixup, mode

View File

@ -15,7 +15,7 @@
#define EX(insn,reg,addr,handler) \ #define EX(insn,reg,addr,handler) \
9: insn reg, addr; \ 9: insn reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous .previous
/* /*
@ -59,7 +59,7 @@ LEAF(__strncpy_from_user_asm)
jr ra jr ra
.section __ex_table,"a" .section __ex_table,"a"
PTR 1b, .Lfault PTR_WD 1b, .Lfault
.previous .previous
EXPORT_SYMBOL(__strncpy_from_user_asm) EXPORT_SYMBOL(__strncpy_from_user_asm)

View File

@ -14,7 +14,7 @@
#define EX(insn,reg,addr,handler) \ #define EX(insn,reg,addr,handler) \
9: insn reg, addr; \ 9: insn reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous .previous
/* /*

View File

@ -3,7 +3,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <loongson.h> #include <loongson.h>
static void pci_fixup_radeon(struct pci_dev *pdev) static void pci_fixup_video(struct pci_dev *pdev)
{ {
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
@ -22,8 +22,7 @@ static void pci_fixup_radeon(struct pci_dev *pdev)
res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW | res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
IORESOURCE_PCI_FIXED; IORESOURCE_PCI_FIXED;
dev_info(&pdev->dev, "BAR %d: assigned %pR for Radeon ROM\n", dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n", res);
PCI_ROM_RESOURCE, res);
} }
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, 0x9615, DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, 0x9615,
PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_radeon); PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);

View File

@ -223,6 +223,8 @@ static __always_inline void update_user_segments(u32 val)
update_user_segment(15, val); update_user_segment(15, val);
} }
int __init find_free_bat(void);
unsigned int bat_block_size(unsigned long base, unsigned long top);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use /* We happily ignore the smaller BATs on 601, we don't actually use

View File

@ -178,6 +178,7 @@ static inline bool pte_user(pte_t pte)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
void unmap_kernel_page(unsigned long va);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -1082,6 +1082,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p
return hash__map_kernel_page(ea, pa, prot); return hash__map_kernel_page(ea, pa, prot);
} }
void unmap_kernel_page(unsigned long va);
static inline int __meminit vmemmap_create_mapping(unsigned long start, static inline int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size, unsigned long page_size,
unsigned long phys) unsigned long phys)

View File

@ -111,8 +111,10 @@ static inline void __set_fixmap(enum fixed_addresses idx,
BUILD_BUG_ON(idx >= __end_of_fixed_addresses); BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
else if (WARN_ON(idx >= __end_of_fixed_addresses)) else if (WARN_ON(idx >= __end_of_fixed_addresses))
return; return;
if (pgprot_val(flags))
map_kernel_page(__fix_to_virt(idx), phys, flags); map_kernel_page(__fix_to_virt(idx), phys, flags);
else
unmap_kernel_page(__fix_to_virt(idx));
} }
#define __early_set_fixmap __set_fixmap #define __early_set_fixmap __set_fixmap

View File

@ -473,7 +473,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE); return !(regs->msr & MSR_EE);
} }
static inline bool should_hard_irq_enable(void) static __always_inline bool should_hard_irq_enable(void)
{ {
return false; return false;
} }

View File

@ -39,7 +39,6 @@ struct kvm_nested_guest {
pgd_t *shadow_pgtable; /* our page table for this guest */ pgd_t *shadow_pgtable; /* our page table for this guest */
u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */ u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
u64 process_table; /* process table entry for this guest */ u64 process_table; /* process table entry for this guest */
u64 hfscr; /* HFSCR that the L1 requested for this nested guest */
long refcnt; /* number of pointers to this struct */ long refcnt; /* number of pointers to this struct */
struct mutex tlb_lock; /* serialize page faults and tlbies */ struct mutex tlb_lock; /* serialize page faults and tlbies */
struct kvm_nested_guest *next; struct kvm_nested_guest *next;

View File

@ -818,6 +818,7 @@ struct kvm_vcpu_arch {
/* For support of nested guests */ /* For support of nested guests */
struct kvm_nested_guest *nested; struct kvm_nested_guest *nested;
u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id; u32 nested_vcpu_id;
gpa_t nested_io_gpr; gpa_t nested_io_gpr;
#endif #endif

View File

@ -64,6 +64,7 @@ extern int icache_44x_need_flush;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
void unmap_kernel_page(unsigned long va);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -308,6 +308,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __swp_entry_to_pte(x) __pte((x).val) #define __swp_entry_to_pte(x) __pte((x).val)
int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
void unmap_kernel_page(unsigned long va);
extern int __meminit vmemmap_create_mapping(unsigned long start, extern int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size, unsigned long page_size,
unsigned long phys); unsigned long phys);

View File

@ -500,6 +500,7 @@
#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i)) #define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i))

View File

@ -90,7 +90,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long val, mask = -1UL; unsigned long val, mask = -1UL;
unsigned int n = 6; unsigned int n = 6;
if (is_32bit_task()) if (is_tsk_32bit_task(task))
mask = 0xffffffff; mask = 0xffffffff;
while (n--) { while (n--) {
@ -105,7 +105,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
static inline int syscall_get_arch(struct task_struct *task) static inline int syscall_get_arch(struct task_struct *task)
{ {
if (is_32bit_task()) if (is_tsk_32bit_task(task))
return AUDIT_ARCH_PPC; return AUDIT_ARCH_PPC;
else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
return AUDIT_ARCH_PPC64LE; return AUDIT_ARCH_PPC64LE;

View File

@ -168,8 +168,10 @@ static inline bool test_thread_local_flags(unsigned int flags)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#define is_32bit_task() (test_thread_flag(TIF_32BIT)) #define is_32bit_task() (test_thread_flag(TIF_32BIT))
#define is_tsk_32bit_task(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT))
#else #else
#define is_32bit_task() (IS_ENABLED(CONFIG_PPC32)) #define is_32bit_task() (IS_ENABLED(CONFIG_PPC32))
#define is_tsk_32bit_task(tsk) (IS_ENABLED(CONFIG_PPC32))
#endif #endif
#if defined(CONFIG_PPC64) #if defined(CONFIG_PPC64)

View File

@ -30,6 +30,7 @@ COMPAT_SYS_CALL_TABLE:
.ifc \srr,srr .ifc \srr,srr
mfspr r11,SPRN_SRR0 mfspr r11,SPRN_SRR0
ld r12,_NIP(r1) ld r12,_NIP(r1)
clrrdi r11,r11,2
clrrdi r12,r12,2 clrrdi r12,r12,2
100: tdne r11,r12 100: tdne r11,r12
EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
@ -40,6 +41,7 @@ COMPAT_SYS_CALL_TABLE:
.else .else
mfspr r11,SPRN_HSRR0 mfspr r11,SPRN_HSRR0
ld r12,_NIP(r1) ld r12,_NIP(r1)
clrrdi r11,r11,2
clrrdi r12,r12,2 clrrdi r12,r12,2
100: tdne r11,r12 100: tdne r11,r12
EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)

View File

@ -649,8 +649,9 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
__this_cpu_inc(irq_stat.timer_irqs_event); __this_cpu_inc(irq_stat.timer_irqs_event);
} else { } else {
now = *next_tb - now; now = *next_tb - now;
if (now <= decrementer_max) if (now > decrementer_max)
set_dec_or_work(now); now = decrementer_max;
set_dec_or_work(now);
__this_cpu_inc(irq_stat.timer_irqs_others); __this_cpu_inc(irq_stat.timer_irqs_others);
} }

View File

@ -1816,7 +1816,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{ {
struct kvm_nested_guest *nested = vcpu->arch.nested;
int r; int r;
int srcu_idx; int srcu_idx;
@ -1922,7 +1921,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
* it into a HEAI. * it into a HEAI.
*/ */
if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
(nested->hfscr & (1UL << cause))) { (vcpu->arch.nested_hfscr & (1UL << cause))) {
vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
/* /*

View File

@ -363,7 +363,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
/* set L1 state to L2 state */ /* set L1 state to L2 state */
vcpu->arch.nested = l2; vcpu->arch.nested = l2;
vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token; vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
l2->hfscr = l2_hv.hfscr; vcpu->arch.nested_hfscr = l2_hv.hfscr;
vcpu->arch.regs = l2_regs; vcpu->arch.regs = l2_regs;
/* Guest must always run with ME enabled, HV disabled. */ /* Guest must always run with ME enabled, HV disabled. */

View File

@ -76,7 +76,7 @@ unsigned long p_block_mapped(phys_addr_t pa)
return 0; return 0;
} }
static int __init find_free_bat(void) int __init find_free_bat(void)
{ {
int b; int b;
int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
@ -100,7 +100,7 @@ static int __init find_free_bat(void)
* - block size has to be a power of two. This is calculated by finding the * - block size has to be a power of two. This is calculated by finding the
* highest bit set to 1. * highest bit set to 1.
*/ */
static unsigned int block_size(unsigned long base, unsigned long top) unsigned int bat_block_size(unsigned long base, unsigned long top)
{ {
unsigned int max_size = SZ_256M; unsigned int max_size = SZ_256M;
unsigned int base_shift = (ffs(base) - 1) & 31; unsigned int base_shift = (ffs(base) - 1) & 31;
@ -145,7 +145,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
int idx; int idx;
while ((idx = find_free_bat()) != -1 && base != top) { while ((idx = find_free_bat()) != -1 && base != top) {
unsigned int size = block_size(base, top); unsigned int size = bat_block_size(base, top);
if (size < 128 << 10) if (size < 128 << 10)
break; break;
@ -201,12 +201,12 @@ void mmu_mark_initmem_nx(void)
unsigned long size; unsigned long size;
for (i = 0; i < nb - 1 && base < top;) { for (i = 0; i < nb - 1 && base < top;) {
size = block_size(base, top); size = bat_block_size(base, top);
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
base += size; base += size;
} }
if (base < top) { if (base < top) {
size = block_size(base, top); size = bat_block_size(base, top);
if ((top - base) > size) { if ((top - base) > size) {
size <<= 1; size <<= 1;
if (strict_kernel_rwx_enabled() && base + size > border) if (strict_kernel_rwx_enabled() && base + size > border)

View File

@ -10,48 +10,51 @@ int __init kasan_init_region(void *start, size_t size)
{ {
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
unsigned long k_cur = k_start; unsigned long k_nobat = k_start;
int k_size = k_end - k_start; unsigned long k_cur;
int k_size_base = 1 << (ffs(k_size) - 1); phys_addr_t phys;
int ret; int ret;
void *block;
block = memblock_alloc(k_size, k_size_base); while (k_nobat < k_end) {
unsigned int k_size = bat_block_size(k_nobat, k_end);
int idx = find_free_bat();
if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) { if (idx == -1)
int shift = ffs(k_size - k_size_base); break;
int k_size_more = shift ? 1 << (shift - 1) : 0; if (k_size < SZ_128K)
break;
phys = memblock_phys_alloc_range(k_size, k_size, 0,
MEMBLOCK_ALLOC_ANYWHERE);
if (!phys)
break;
setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL); setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
if (k_size_more >= SZ_128K) k_nobat += k_size;
setbat(-1, k_start + k_size_base, __pa(block) + k_size_base,
k_size_more, PAGE_KERNEL);
if (v_block_mapped(k_start))
k_cur = k_start + k_size_base;
if (v_block_mapped(k_start + k_size_base))
k_cur = k_start + k_size_base + k_size_more;
update_bats();
} }
if (k_nobat != k_start)
update_bats();
if (!block) if (k_nobat < k_end) {
block = memblock_alloc(k_size, PAGE_SIZE); phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
if (!block) MEMBLOCK_ALLOC_ANYWHERE);
return -ENOMEM; if (!phys)
return -ENOMEM;
}
ret = kasan_init_shadow_page_tables(k_start, k_end); ret = kasan_init_shadow_page_tables(k_start, k_end);
if (ret) if (ret)
return ret; return ret;
kasan_update_early_region(k_start, k_cur, __pte(0)); kasan_update_early_region(k_start, k_nobat, __pte(0));
for (; k_cur < k_end; k_cur += PAGE_SIZE) { for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_off_k(k_cur); pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
} }
flush_tlb_kernel_range(k_start, k_end); flush_tlb_kernel_range(k_start, k_end);
memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
return 0; return 0;
} }

View File

@ -206,6 +206,15 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
__set_pte_at(mm, addr, ptep, pte, 0); __set_pte_at(mm, addr, ptep, pte, 0);
} }
void unmap_kernel_page(unsigned long va)
{
pmd_t *pmdp = pmd_off_k(va);
pte_t *ptep = pte_offset_kernel(pmdp, va);
pte_clear(&init_mm, va, ptep);
flush_tlb_kernel_range(va, va + PAGE_SIZE);
}
/* /*
* This is called when relaxing access to a PTE. It's also called in the page * This is called when relaxing access to a PTE. It's also called in the page
* fault path when we don't hit any of the major fault cases, ie, a minor * fault path when we don't hit any of the major fault cases, ie, a minor

View File

@ -23,15 +23,15 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
memset32(area, BREAKPOINT_INSTRUCTION, size / 4); memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
} }
/* Fix the branch target addresses for subprog calls */ /* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image, static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
struct codegen_context *ctx, u32 *addrs) struct codegen_context *ctx, u32 *addrs)
{ {
const struct bpf_insn *insn = fp->insnsi; const struct bpf_insn *insn = fp->insnsi;
bool func_addr_fixed; bool func_addr_fixed;
u64 func_addr; u64 func_addr;
u32 tmp_idx; u32 tmp_idx;
int i, ret; int i, j, ret;
for (i = 0; i < fp->len; i++) { for (i = 0; i < fp->len; i++) {
/* /*
@ -66,6 +66,23 @@ static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
* of the JITed sequence remains unchanged. * of the JITed sequence remains unchanged.
*/ */
ctx->idx = tmp_idx; ctx->idx = tmp_idx;
} else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
tmp_idx = ctx->idx;
ctx->idx = addrs[i] / 4;
#ifdef CONFIG_PPC32
PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 1].imm);
PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm);
for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
EMIT(PPC_RAW_NOP());
#else
func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
PPC_LI64(b2p[insn[i].dst_reg], func_addr);
/* overwrite rest with nops */
for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
EMIT(PPC_RAW_NOP());
#endif
ctx->idx = tmp_idx;
i++;
} }
} }
@ -200,13 +217,13 @@ skip_init_ctx:
/* /*
* Do not touch the prologue and epilogue as they will remain * Do not touch the prologue and epilogue as they will remain
* unchanged. Only fix the branch target address for subprog * unchanged. Only fix the branch target address for subprog
* calls in the body. * calls in the body, and ldimm64 instructions.
* *
* This does not change the offsets and lengths of the subprog * This does not change the offsets and lengths of the subprog
* call instruction sequences and hence, the size of the JITed * call instruction sequences and hence, the size of the JITed
* image as well. * image as well.
*/ */
bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs); bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
/* There is no need to perform the usual passes. */ /* There is no need to perform the usual passes. */
goto skip_codegen_passes; goto skip_codegen_passes;

View File

@ -191,6 +191,9 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
if (image && rel < 0x2000000 && rel >= -0x2000000) { if (image && rel < 0x2000000 && rel >= -0x2000000) {
PPC_BL_ABS(func); PPC_BL_ABS(func);
EMIT(PPC_RAW_NOP());
EMIT(PPC_RAW_NOP());
EMIT(PPC_RAW_NOP());
} else { } else {
/* Load function address into r0 */ /* Load function address into r0 */
EMIT(PPC_RAW_LIS(_R0, IMM_H(func))); EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
@ -290,6 +293,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
bool func_addr_fixed; bool func_addr_fixed;
u64 func_addr; u64 func_addr;
u32 true_cond; u32 true_cond;
u32 tmp_idx;
int j;
/* /*
* addrs[] maps a BPF bytecode address into a real offset from * addrs[] maps a BPF bytecode address into a real offset from
@ -905,8 +910,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* 16 byte instruction that uses two 'struct bpf_insn' * 16 byte instruction that uses two 'struct bpf_insn'
*/ */
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
tmp_idx = ctx->idx;
PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
PPC_LI32(dst_reg, (u32)insn[i].imm); PPC_LI32(dst_reg, (u32)insn[i].imm);
/* padding to allow full 4 instructions for later patching */
for (j = ctx->idx - tmp_idx; j < 4; j++)
EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */ /* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4; addrs[++i] = ctx->idx * 4;
break; break;

View File

@ -319,6 +319,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u64 imm64; u64 imm64;
u32 true_cond; u32 true_cond;
u32 tmp_idx; u32 tmp_idx;
int j;
/* /*
* addrs[] maps a BPF bytecode address into a real offset from * addrs[] maps a BPF bytecode address into a real offset from
@ -633,17 +634,21 @@ bpf_alu32_trunc:
EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1])); EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
break; break;
case 64: case 64:
/* /* Store the value to stack and then use byte-reverse loads */
* Way easier and faster(?) to store the value
* into stack and then use ldbrx
*
* ctx->seen will be reliable in pass2, but
* the instructions generated will remain the
* same across all passes
*/
PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx))); EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); if (cpu_has_feature(CPU_FTR_ARCH_206)) {
EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
} else {
EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
}
break; break;
} }
break; break;
@ -848,9 +853,13 @@ emit_clear:
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
imm64 = ((u64)(u32) insn[i].imm) | imm64 = ((u64)(u32) insn[i].imm) |
(((u64)(u32) insn[i+1].imm) << 32); (((u64)(u32) insn[i+1].imm) << 32);
tmp_idx = ctx->idx;
PPC_LI64(dst_reg, imm64);
/* padding to allow full 5 instructions for later patching */
for (j = ctx->idx - tmp_idx; j < 5; j++)
EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */ /* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4; addrs[++i] = ctx->idx * 4;
PPC_LI64(dst_reg, imm64);
break; break;
/* /*

View File

@ -776,6 +776,34 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC6, pmcs[5]); mtspr(SPRN_PMC6, pmcs[5]);
} }
/*
* If the perf subsystem wants performance monitor interrupts as soon as
* possible (e.g., to sample the instruction address and stack chain),
* this should return true. The IRQ masking code can then enable MSR[EE]
* in some places (e.g., interrupt handlers) that allows PMI interrupts
* through to improve accuracy of profiles, at the cost of some performance.
*
* The PMU counters can be enabled by other means (e.g., sysfs raw SPR
* access), but in that case there is no need for prompt PMI handling.
*
* This currently returns true if any perf counter is being used. It
* could possibly return false if only events are being counted rather than
* samples being taken, but for now this is good enough.
*/
bool power_pmu_wants_prompt_pmi(void)
{
struct cpu_hw_events *cpuhw;
/*
* This could simply test local_paca->pmcregs_in_use if that were not
* under ifdef KVM.
*/
if (!ppmu)
return false;
cpuhw = this_cpu_ptr(&cpu_hw_events);
return cpuhw->n_events;
}
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs); static void perf_event_interrupt(struct pt_regs *regs);
@ -1327,9 +1355,20 @@ static void power_pmu_disable(struct pmu *pmu)
* Otherwise provide a warning if there is PMI pending, but * Otherwise provide a warning if there is PMI pending, but
* no counter is found overflown. * no counter is found overflown.
*/ */
if (any_pmc_overflown(cpuhw)) if (any_pmc_overflown(cpuhw)) {
clear_pmi_irq_pending(); /*
else * Since power_pmu_disable runs under local_irq_save, it
* could happen that code hits a PMC overflow without PMI
* pending in paca. Hence only clear PMI pending if it was
* set.
*
* If a PMI is pending, then MSR[EE] must be disabled (because
* the masked PMI handler disabling EE). So it is safe to
* call clear_pmi_irq_pending().
*/
if (pmi_irq_pending())
clear_pmi_irq_pending();
} else
WARN_ON(pmi_irq_pending()); WARN_ON(pmi_irq_pending());
val = mmcra = cpuhw->mmcr.mmcra; val = mmcra = cpuhw->mmcr.mmcra;
@ -2438,36 +2477,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
perf_sample_event_took(sched_clock() - start_clock); perf_sample_event_took(sched_clock() - start_clock);
} }
/*
* If the perf subsystem wants performance monitor interrupts as soon as
* possible (e.g., to sample the instruction address and stack chain),
* this should return true. The IRQ masking code can then enable MSR[EE]
* in some places (e.g., interrupt handlers) that allows PMI interrupts
* though to improve accuracy of profiles, at the cost of some performance.
*
* The PMU counters can be enabled by other means (e.g., sysfs raw SPR
* access), but in that case there is no need for prompt PMI handling.
*
* This currently returns true if any perf counter is being used. It
* could possibly return false if only events are being counted rather than
* samples being taken, but for now this is good enough.
*/
bool power_pmu_wants_prompt_pmi(void)
{
struct cpu_hw_events *cpuhw;
/*
* This could simply test local_paca->pmcregs_in_use if that were not
* under ifdef KVM.
*/
if (!ppmu)
return false;
cpuhw = this_cpu_ptr(&cpu_hw_events);
return cpuhw->n_events;
}
static int power_pmu_prepare_cpu(unsigned int cpu) static int power_pmu_prepare_cpu(unsigned int cpu)
{ {
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);

View File

@ -945,6 +945,9 @@ config S390_GUEST
endmenu endmenu
config S390_MODULES_SANITY_TEST_HELPERS
def_bool n
menu "Selftests" menu "Selftests"
config S390_UNWIND_SELFTEST config S390_UNWIND_SELFTEST
@ -971,4 +974,16 @@ config S390_KPROBES_SANITY_TEST
Say N if you are unsure. Say N if you are unsure.
config S390_MODULES_SANITY_TEST
def_tristate n
depends on KUNIT
default KUNIT_ALL_TESTS
prompt "Enable s390 specific modules tests"
select S390_MODULES_SANITY_TEST_HELPERS
help
This option enables an s390 specific modules test. This option is
not useful for distributions or general kernels, but only for
kernel developers working on architecture code.
Say N if you are unsure.
endmenu endmenu

View File

@ -63,6 +63,7 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
CONFIG_KPROBES=y CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y CONFIG_STATIC_KEYS_SELFTEST=y
@ -96,7 +97,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA_DEBUG=y CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y CONFIG_CMA_SYSFS=y
@ -109,6 +109,7 @@ CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y CONFIG_PERCPU_STATS=y
CONFIG_GUP_TEST=y CONFIG_GUP_TEST=y
CONFIG_ANON_VMA_NAME=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m CONFIG_PACKET_DIAG=m
@ -116,7 +117,6 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m CONFIG_SMC=m
CONFIG_SMC_DIAG=m CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
@ -185,7 +185,6 @@ CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_INET=y
CONFIG_NFT_CT=m CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m CONFIG_NFT_NAT=m
@ -391,6 +390,7 @@ CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m CONFIG_NETLINK_DIAG=m
CONFIG_NET_SWITCHDEV=y
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_PCI=y CONFIG_PCI=y
@ -400,6 +400,7 @@ CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=y CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
@ -501,6 +502,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_ENGLEDER is not set
# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_HUAWEI is not set
@ -511,7 +513,6 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_CORE_EN=y
CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MICROSEMI is not set
@ -542,6 +543,7 @@ CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VERTEXCOM is not set
# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set # CONFIG_NET_VENDOR_XILINX is not set
@ -592,6 +594,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m CONFIG_VHOST_VSOCK=m
# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
@ -756,9 +759,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y CONFIG_CRYPTO_STATS=y
CONFIG_CRYPTO_LIB_BLAKE2S=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_PAES_S390=m
@ -774,6 +774,8 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m CONFIG_CORDIC=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_CRC32_SELFTEST=y CONFIG_CRC32_SELFTEST=y
CONFIG_CRC4=m CONFIG_CRC4=m
CONFIG_CRC7=m CONFIG_CRC7=m
@ -807,7 +809,6 @@ CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y CONFIG_SLUB_STATS=y
CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_VMACACHE=y
CONFIG_DEBUG_VM_PGFLAGS=y CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
@ -819,12 +820,11 @@ CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y CONFIG_WQ_WATCHDOG=y
CONFIG_TEST_LOCKUP=m CONFIG_TEST_LOCKUP=m
CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_PROVE_LOCKING=y CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
CONFIG_DEBUG_IRQFLAGS=y
CONFIG_DEBUG_SG=y CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y CONFIG_DEBUG_NOTIFIERS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@ -61,6 +61,7 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
CONFIG_KPROBES=y CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y CONFIG_JUMP_LABEL=y
# CONFIG_GCC_PLUGINS is not set # CONFIG_GCC_PLUGINS is not set
@ -91,7 +92,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA_SYSFS=y CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7 CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y CONFIG_MEM_SOFT_DIRTY=y
@ -101,6 +101,7 @@ CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y CONFIG_PERCPU_STATS=y
CONFIG_ANON_VMA_NAME=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m CONFIG_PACKET_DIAG=m
@ -108,7 +109,6 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m CONFIG_SMC=m
CONFIG_SMC_DIAG=m CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
@ -177,7 +177,6 @@ CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_INET=y
CONFIG_NFT_CT=m CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m CONFIG_NFT_NAT=m
@ -382,6 +381,7 @@ CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m CONFIG_NETLINK_DIAG=m
CONFIG_NET_SWITCHDEV=y
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_PCI=y CONFIG_PCI=y
@ -391,6 +391,7 @@ CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y CONFIG_HOTPLUG_PCI_S390=y
CONFIG_UEVENT_HELPER=y CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=y CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
@ -492,6 +493,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_ENGLEDER is not set
# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_HUAWEI is not set
@ -502,7 +504,6 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_CORE_EN=y
CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MICROSEMI is not set
@ -533,6 +534,7 @@ CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VERTEXCOM is not set
# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set # CONFIG_NET_VENDOR_XILINX is not set
@ -582,6 +584,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m CONFIG_VHOST_VSOCK=m
# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
@ -743,9 +746,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y CONFIG_CRYPTO_STATS=y
CONFIG_CRYPTO_LIB_BLAKE2S=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_PAES_S390=m
@ -762,6 +762,8 @@ CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m CONFIG_PRIME_NUMBERS=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_CRC4=m CONFIG_CRC4=m
CONFIG_CRC7=m CONFIG_CRC7=m
CONFIG_CRC8=m CONFIG_CRC8=m

View File

@ -1,6 +1,7 @@
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
# CONFIG_CPU_ISOLATION is not set # CONFIG_CPU_ISOLATION is not set
# CONFIG_UTS_NS is not set # CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set # CONFIG_TIME_NS is not set
@ -34,6 +35,7 @@ CONFIG_NET=y
# CONFIG_PCPU_DEV_REFCNT is not set # CONFIG_PCPU_DEV_REFCNT is not set
# CONFIG_ETHTOOL_NETLINK is not set # CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
# CONFIG_DCSSBLK is not set # CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set # CONFIG_DASD is not set
@ -58,6 +60,7 @@ CONFIG_ZFCP=y
# CONFIG_HID is not set # CONFIG_HID is not set
# CONFIG_VIRTIO_MENU is not set # CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set # CONFIG_VHOST_MENU is not set
# CONFIG_SURFACE_PLATFORMS is not set
# CONFIG_IOMMU_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set # CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set # CONFIG_INOTIFY_USER is not set

View File

@ -20,6 +20,7 @@
static char local_guest[] = " "; static char local_guest[] = " ";
static char all_guests[] = "* "; static char all_guests[] = "* ";
static char *all_groups = all_guests;
static char *guest_query; static char *guest_query;
struct diag2fc_data { struct diag2fc_data {
@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
memcpy(parm_list.userid, query, NAME_LEN); memcpy(parm_list.userid, query, NAME_LEN);
ASCEBC(parm_list.userid, NAME_LEN); ASCEBC(parm_list.userid, NAME_LEN);
parm_list.addr = (unsigned long) addr ; memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
ASCEBC(parm_list.aci_grp, NAME_LEN);
parm_list.addr = (unsigned long)addr;
parm_list.size = size; parm_list.size = size;
parm_list.fmt = 0x02; parm_list.fmt = 0x02;
memset(parm_list.aci_grp, 0x40, NAME_LEN);
rc = -1; rc = -1;
diag_stat_inc(DIAG_STAT_X2FC); diag_stat_inc(DIAG_STAT_X2FC);

View File

@ -47,8 +47,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
int __put_user_bad(void) __attribute__((noreturn)); int __put_user_bad(void) __attribute__((noreturn));
int __get_user_bad(void) __attribute__((noreturn)); int __get_user_bad(void) __attribute__((noreturn));
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
union oac { union oac {
unsigned int val; unsigned int val;
struct { struct {
@ -71,6 +69,8 @@ union oac {
}; };
}; };
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
#define __put_get_user_asm(to, from, size, oac_spec) \ #define __put_get_user_asm(to, from, size, oac_spec) \
({ \ ({ \
int __rc; \ int __rc; \

View File

@ -33,7 +33,7 @@
#define DEBUGP(fmt , ...) #define DEBUGP(fmt , ...)
#endif #endif
#define PLT_ENTRY_SIZE 20 #define PLT_ENTRY_SIZE 22
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
@ -341,27 +341,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) { if (info->plt_initialized == 0) {
unsigned int insn[5]; unsigned char insn[PLT_ENTRY_SIZE];
unsigned int *ip = me->core_layout.base + char *plt_base;
me->arch.plt_offset + char *ip;
info->plt_offset;
insn[0] = 0x0d10e310; /* basr 1,0 */ plt_base = me->core_layout.base + me->arch.plt_offset;
insn[1] = 0x100a0004; /* lg 1,10(1) */ ip = plt_base + info->plt_offset;
*(int *)insn = 0x0d10e310; /* basr 1,0 */
*(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
unsigned int *ij; char *jump_r1;
ij = me->core_layout.base +
me->arch.plt_offset + jump_r1 = plt_base + me->arch.plt_size -
me->arch.plt_size - PLT_ENTRY_SIZE; PLT_ENTRY_SIZE;
insn[2] = 0xa7f40000 + /* j __jump_r1 */ /* brcl 0xf,__jump_r1 */
(unsigned int)(u16) *(short *)&insn[8] = 0xc0f4;
(((unsigned long) ij - 8 - *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
(unsigned long) ip) / 2);
} else { } else {
insn[2] = 0x07f10000; /* br %r1 */ *(int *)&insn[8] = 0x07f10000; /* br %r1 */
} }
insn[3] = (unsigned int) (val >> 32); *(long *)&insn[14] = val;
insn[4] = (unsigned int) val;
write(ip, insn, sizeof(insn)); write(ip, insn, sizeof(insn));
info->plt_initialized = 1; info->plt_initialized = 1;

View File

@ -264,7 +264,14 @@ static int notrace s390_validate_registers(union mci mci, int umode)
/* Validate vector registers */ /* Validate vector registers */
union ctlreg0 cr0; union ctlreg0 cr0;
if (!mci.vr) { /*
* The vector validity must only be checked if not running a
* KVM guest. For KVM guests the machine check is forwarded by
* KVM and it is the responsibility of the guest to take
* appropriate actions. The host vector or FPU values have been
* saved by KVM and will be restored by KVM.
*/
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) {
/* /*
* Vector registers can't be restored. If the kernel * Vector registers can't be restored. If the kernel
* currently uses vector registers the system is * currently uses vector registers the system is
@ -307,11 +314,21 @@ static int notrace s390_validate_registers(union mci mci, int umode)
if (cr2.gse) { if (cr2.gse) {
if (!mci.gs) { if (!mci.gs) {
/* /*
* Guarded storage register can't be restored and * 2 cases:
* the current processes uses guarded storage. * - machine check in kernel or userspace
* It has to be terminated. * - machine check while running SIE (KVM guest)
* For kernel or userspace the userspace values of
* guarded storage control can not be recreated, the
* process must be terminated.
* For SIE the guest values of guarded storage can not
* be recreated. This is either due to a bug or due to
* GS being disabled in the guest. The guest will be
* notified by KVM code and the guests machine check
* handling must take care of this. The host values
* are saved by KVM and are not affected.
*/ */
kill_task = 1; if (!test_cpu_flag(CIF_MCCK_GUEST))
kill_task = 1;
} else { } else {
load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area); load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
} }

View File

@ -17,4 +17,7 @@ KASAN_SANITIZE_uaccess.o := n
obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o
CFLAGS_test_unwind.o += -fno-optimize-sibling-calls CFLAGS_test_unwind.o += -fno-optimize-sibling-calls
obj-$(CONFIG_S390_MODULES_SANITY_TEST) += test_modules.o
obj-$(CONFIG_S390_MODULES_SANITY_TEST_HELPERS) += test_modules_helpers.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o

View File

@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0+
#include <kunit/test.h>
#include <linux/module.h>
#include "test_modules.h"
#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
REPEAT_10000(DECLARE_RETURN);
/*
* Test that modules with many relocations are loaded properly.
*/
static void test_modules_many_vmlinux_relocs(struct kunit *test)
{
int result = 0;
#define CALL_RETURN(i) result += test_modules_return_ ## i()
REPEAT_10000(CALL_RETURN);
KUNIT_ASSERT_EQ(test, result, 49995000);
}
static struct kunit_case modules_testcases[] = {
KUNIT_CASE(test_modules_many_vmlinux_relocs),
{}
};
static struct kunit_suite modules_test_suite = {
.name = "modules_test_s390",
.test_cases = modules_testcases,
};
kunit_test_suites(&modules_test_suite);
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef TEST_MODULES_H
#define TEST_MODULES_H
#define __REPEAT_10000_3(f, x) \
f(x ## 0); \
f(x ## 1); \
f(x ## 2); \
f(x ## 3); \
f(x ## 4); \
f(x ## 5); \
f(x ## 6); \
f(x ## 7); \
f(x ## 8); \
f(x ## 9)
#define __REPEAT_10000_2(f, x) \
__REPEAT_10000_3(f, x ## 0); \
__REPEAT_10000_3(f, x ## 1); \
__REPEAT_10000_3(f, x ## 2); \
__REPEAT_10000_3(f, x ## 3); \
__REPEAT_10000_3(f, x ## 4); \
__REPEAT_10000_3(f, x ## 5); \
__REPEAT_10000_3(f, x ## 6); \
__REPEAT_10000_3(f, x ## 7); \
__REPEAT_10000_3(f, x ## 8); \
__REPEAT_10000_3(f, x ## 9)
#define __REPEAT_10000_1(f, x) \
__REPEAT_10000_2(f, x ## 0); \
__REPEAT_10000_2(f, x ## 1); \
__REPEAT_10000_2(f, x ## 2); \
__REPEAT_10000_2(f, x ## 3); \
__REPEAT_10000_2(f, x ## 4); \
__REPEAT_10000_2(f, x ## 5); \
__REPEAT_10000_2(f, x ## 6); \
__REPEAT_10000_2(f, x ## 7); \
__REPEAT_10000_2(f, x ## 8); \
__REPEAT_10000_2(f, x ## 9)
#define REPEAT_10000(f) \
__REPEAT_10000_1(f, 0); \
__REPEAT_10000_1(f, 1); \
__REPEAT_10000_1(f, 2); \
__REPEAT_10000_1(f, 3); \
__REPEAT_10000_1(f, 4); \
__REPEAT_10000_1(f, 5); \
__REPEAT_10000_1(f, 6); \
__REPEAT_10000_1(f, 7); \
__REPEAT_10000_1(f, 8); \
__REPEAT_10000_1(f, 9)
#endif

View File

@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/export.h>
#include "test_modules.h"
#define DEFINE_RETURN(i) \
int test_modules_return_ ## i(void) \
{ \
return 1 ## i - 10000; \
} \
EXPORT_SYMBOL_GPL(test_modules_return_ ## i)
REPEAT_10000(DEFINE_RETURN);

View File

@ -186,6 +186,7 @@ config X86
select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION
select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE

View File

@ -6236,6 +6236,19 @@ __init int intel_pmu_init(void)
pmu->num_counters = x86_pmu.num_counters; pmu->num_counters = x86_pmu.num_counters;
pmu->num_counters_fixed = x86_pmu.num_counters_fixed; pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
} }
/*
* Quirk: For some Alder Lake machine, when all E-cores are disabled in
* a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
* the X86_FEATURE_HYBRID_CPU is still set. The above codes will
* mistakenly add extra counters for P-cores. Correct the number of
* counters here.
*/
if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) {
pmu->num_counters = x86_pmu.num_counters;
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
}
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
pmu->unconstrained = (struct event_constraint) pmu->unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
@ -6340,6 +6353,8 @@ __init int intel_pmu_init(void)
} }
if (x86_pmu.lbr_nr) { if (x86_pmu.lbr_nr) {
intel_pmu_lbr_init();
pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
/* only support branch_stack snapshot for perfmon >= v2 */ /* only support branch_stack snapshot for perfmon >= v2 */

View File

@ -8,14 +8,6 @@
#include "../perf_event.h" #include "../perf_event.h"
static const enum {
LBR_EIP_FLAGS = 1,
LBR_TSX = 2,
} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
[LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
[LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
};
/* /*
* Intel LBR_SELECT bits * Intel LBR_SELECT bits
* Intel Vol3a, April 2011, Section 16.7 Table 16-10 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
@ -243,7 +235,7 @@ void intel_pmu_lbr_reset_64(void)
for (i = 0; i < x86_pmu.lbr_nr; i++) { for (i = 0; i < x86_pmu.lbr_nr; i++) {
wrmsrl(x86_pmu.lbr_from + i, 0); wrmsrl(x86_pmu.lbr_from + i, 0);
wrmsrl(x86_pmu.lbr_to + i, 0); wrmsrl(x86_pmu.lbr_to + i, 0);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) if (x86_pmu.lbr_has_info)
wrmsrl(x86_pmu.lbr_info + i, 0); wrmsrl(x86_pmu.lbr_info + i, 0);
} }
} }
@ -305,11 +297,10 @@ enum {
*/ */
static inline bool lbr_from_signext_quirk_needed(void) static inline bool lbr_from_signext_quirk_needed(void)
{ {
int lbr_format = x86_pmu.intel_cap.lbr_format;
bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) || bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
boot_cpu_has(X86_FEATURE_RTM); boot_cpu_has(X86_FEATURE_RTM);
return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX); return !tsx_support && x86_pmu.lbr_has_tsx;
} }
static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key); static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
@ -427,12 +418,12 @@ rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
void intel_pmu_lbr_restore(void *ctx) void intel_pmu_lbr_restore(void *ctx)
{ {
bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx = ctx; struct x86_perf_task_context *task_ctx = ctx;
int i; bool need_info = x86_pmu.lbr_has_info;
unsigned lbr_idx, mask;
u64 tos = task_ctx->tos; u64 tos = task_ctx->tos;
unsigned lbr_idx, mask;
int i;
mask = x86_pmu.lbr_nr - 1; mask = x86_pmu.lbr_nr - 1;
for (i = 0; i < task_ctx->valid_lbrs; i++) { for (i = 0; i < task_ctx->valid_lbrs; i++) {
@ -444,7 +435,7 @@ void intel_pmu_lbr_restore(void *ctx)
lbr_idx = (tos - i) & mask; lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, 0); wrlbr_from(lbr_idx, 0);
wrlbr_to(lbr_idx, 0); wrlbr_to(lbr_idx, 0);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) if (need_info)
wrlbr_info(lbr_idx, 0); wrlbr_info(lbr_idx, 0);
} }
@ -519,9 +510,9 @@ static void __intel_pmu_lbr_restore(void *ctx)
void intel_pmu_lbr_save(void *ctx) void intel_pmu_lbr_save(void *ctx)
{ {
bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx = ctx; struct x86_perf_task_context *task_ctx = ctx;
bool need_info = x86_pmu.lbr_has_info;
unsigned lbr_idx, mask; unsigned lbr_idx, mask;
u64 tos; u64 tos;
int i; int i;
@ -816,7 +807,6 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{ {
bool need_info = false, call_stack = false; bool need_info = false, call_stack = false;
unsigned long mask = x86_pmu.lbr_nr - 1; unsigned long mask = x86_pmu.lbr_nr - 1;
int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos(); u64 tos = intel_pmu_lbr_tos();
int i; int i;
int out = 0; int out = 0;
@ -831,9 +821,7 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
unsigned long lbr_idx = (tos - i) & mask; unsigned long lbr_idx = (tos - i) & mask;
u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0; u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
int skip = 0;
u16 cycles = 0; u16 cycles = 0;
int lbr_flags = lbr_desc[lbr_format];
from = rdlbr_from(lbr_idx, NULL); from = rdlbr_from(lbr_idx, NULL);
to = rdlbr_to(lbr_idx, NULL); to = rdlbr_to(lbr_idx, NULL);
@ -845,38 +833,40 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
if (call_stack && !from) if (call_stack && !from)
break; break;
if (lbr_format == LBR_FORMAT_INFO && need_info) { if (x86_pmu.lbr_has_info) {
u64 info; if (need_info) {
u64 info;
info = rdlbr_info(lbr_idx, NULL); info = rdlbr_info(lbr_idx, NULL);
mis = !!(info & LBR_INFO_MISPRED); mis = !!(info & LBR_INFO_MISPRED);
pred = !mis; pred = !mis;
in_tx = !!(info & LBR_INFO_IN_TX); cycles = (info & LBR_INFO_CYCLES);
abort = !!(info & LBR_INFO_ABORT); if (x86_pmu.lbr_has_tsx) {
cycles = (info & LBR_INFO_CYCLES); in_tx = !!(info & LBR_INFO_IN_TX);
} abort = !!(info & LBR_INFO_ABORT);
}
}
} else {
int skip = 0;
if (lbr_format == LBR_FORMAT_TIME) { if (x86_pmu.lbr_from_flags) {
mis = !!(from & LBR_FROM_FLAG_MISPRED); mis = !!(from & LBR_FROM_FLAG_MISPRED);
pred = !mis; pred = !mis;
skip = 1; skip = 1;
cycles = ((to >> 48) & LBR_INFO_CYCLES); }
if (x86_pmu.lbr_has_tsx) {
in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
abort = !!(from & LBR_FROM_FLAG_ABORT);
skip = 3;
}
from = (u64)((((s64)from) << skip) >> skip);
to = (u64)((((s64)to) << 16) >> 16); if (x86_pmu.lbr_to_cycles) {
cycles = ((to >> 48) & LBR_INFO_CYCLES);
to = (u64)((((s64)to) << 16) >> 16);
}
} }
if (lbr_flags & LBR_EIP_FLAGS) {
mis = !!(from & LBR_FROM_FLAG_MISPRED);
pred = !mis;
skip = 1;
}
if (lbr_flags & LBR_TSX) {
in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
abort = !!(from & LBR_FROM_FLAG_ABORT);
skip = 3;
}
from = (u64)((((s64)from) << skip) >> skip);
/* /*
* Some CPUs report duplicated abort records, * Some CPUs report duplicated abort records,
* with the second entry not having an abort bit set. * with the second entry not having an abort bit set.
@ -903,37 +893,40 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
cpuc->lbr_stack.hw_idx = tos; cpuc->lbr_stack.hw_idx = tos;
} }
static DEFINE_STATIC_KEY_FALSE(x86_lbr_mispred);
static DEFINE_STATIC_KEY_FALSE(x86_lbr_cycles);
static DEFINE_STATIC_KEY_FALSE(x86_lbr_type);
static __always_inline int get_lbr_br_type(u64 info) static __always_inline int get_lbr_br_type(u64 info)
{ {
if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type) int type = 0;
return 0;
return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET; if (static_branch_likely(&x86_lbr_type))
type = (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
return type;
} }
static __always_inline bool get_lbr_mispred(u64 info) static __always_inline bool get_lbr_mispred(u64 info)
{ {
if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred) bool mispred = 0;
return 0;
return !!(info & LBR_INFO_MISPRED); if (static_branch_likely(&x86_lbr_mispred))
} mispred = !!(info & LBR_INFO_MISPRED);
static __always_inline bool get_lbr_predicted(u64 info) return mispred;
{
if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
return 0;
return !(info & LBR_INFO_MISPRED);
} }
static __always_inline u16 get_lbr_cycles(u64 info) static __always_inline u16 get_lbr_cycles(u64 info)
{ {
if (static_cpu_has(X86_FEATURE_ARCH_LBR) && u16 cycles = info & LBR_INFO_CYCLES;
!(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
return 0;
return info & LBR_INFO_CYCLES; if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
(!static_branch_likely(&x86_lbr_cycles) ||
!(info & LBR_INFO_CYC_CNT_VALID)))
cycles = 0;
return cycles;
} }
static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
@ -961,7 +954,7 @@ static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
e->from = from; e->from = from;
e->to = to; e->to = to;
e->mispred = get_lbr_mispred(info); e->mispred = get_lbr_mispred(info);
e->predicted = get_lbr_predicted(info); e->predicted = !e->mispred;
e->in_tx = !!(info & LBR_INFO_IN_TX); e->in_tx = !!(info & LBR_INFO_IN_TX);
e->abort = !!(info & LBR_INFO_ABORT); e->abort = !!(info & LBR_INFO_ABORT);
e->cycles = get_lbr_cycles(info); e->cycles = get_lbr_cycles(info);
@ -1120,7 +1113,7 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) && if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
(br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) && (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
(x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)) x86_pmu.lbr_has_info)
reg->config |= LBR_NO_INFO; reg->config |= LBR_NO_INFO;
return 0; return 0;
@ -1706,6 +1699,38 @@ void intel_pmu_lbr_init_knl(void)
x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS; x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
} }
void intel_pmu_lbr_init(void)
{
switch (x86_pmu.intel_cap.lbr_format) {
case LBR_FORMAT_EIP_FLAGS2:
x86_pmu.lbr_has_tsx = 1;
fallthrough;
case LBR_FORMAT_EIP_FLAGS:
x86_pmu.lbr_from_flags = 1;
break;
case LBR_FORMAT_INFO:
x86_pmu.lbr_has_tsx = 1;
fallthrough;
case LBR_FORMAT_INFO2:
x86_pmu.lbr_has_info = 1;
break;
case LBR_FORMAT_TIME:
x86_pmu.lbr_from_flags = 1;
x86_pmu.lbr_to_cycles = 1;
break;
}
if (x86_pmu.lbr_has_info) {
/*
* Only used in combination with baseline pebs.
*/
static_branch_enable(&x86_lbr_mispred);
static_branch_enable(&x86_lbr_cycles);
}
}
/* /*
* LBR state size is variable based on the max number of registers. * LBR state size is variable based on the max number of registers.
* This calculates the expected state size, which should match * This calculates the expected state size, which should match
@ -1726,6 +1751,9 @@ static bool is_arch_lbr_xsave_available(void)
* Check the LBR state with the corresponding software structure. * Check the LBR state with the corresponding software structure.
* Disable LBR XSAVES support if the size doesn't match. * Disable LBR XSAVES support if the size doesn't match.
*/ */
if (xfeature_size(XFEATURE_LBR) == 0)
return false;
if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size())) if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
return false; return false;
@ -1765,6 +1793,12 @@ void __init intel_pmu_arch_lbr_init(void)
x86_pmu.lbr_br_type = ecx.split.lbr_br_type; x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
x86_pmu.lbr_nr = lbr_nr; x86_pmu.lbr_nr = lbr_nr;
if (x86_pmu.lbr_mispred)
static_branch_enable(&x86_lbr_mispred);
if (x86_pmu.lbr_timed_lbr)
static_branch_enable(&x86_lbr_cycles);
if (x86_pmu.lbr_br_type)
static_branch_enable(&x86_lbr_type);
arch_lbr_xsave = is_arch_lbr_xsave_available(); arch_lbr_xsave = is_arch_lbr_xsave_available();
if (arch_lbr_xsave) { if (arch_lbr_xsave) {

View File

@ -1762,7 +1762,7 @@ static const struct intel_uncore_init_fun rkl_uncore_init __initconst = {
static const struct intel_uncore_init_fun adl_uncore_init __initconst = { static const struct intel_uncore_init_fun adl_uncore_init __initconst = {
.cpu_init = adl_uncore_cpu_init, .cpu_init = adl_uncore_cpu_init,
.mmio_init = tgl_uncore_mmio_init, .mmio_init = adl_uncore_mmio_init,
}; };
static const struct intel_uncore_init_fun icx_uncore_init __initconst = { static const struct intel_uncore_init_fun icx_uncore_init __initconst = {

View File

@ -584,10 +584,11 @@ void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void); void nhm_uncore_cpu_init(void);
void skl_uncore_cpu_init(void); void skl_uncore_cpu_init(void);
void icl_uncore_cpu_init(void); void icl_uncore_cpu_init(void);
void adl_uncore_cpu_init(void);
void tgl_uncore_cpu_init(void); void tgl_uncore_cpu_init(void);
void adl_uncore_cpu_init(void);
void tgl_uncore_mmio_init(void); void tgl_uncore_mmio_init(void);
void tgl_l_uncore_mmio_init(void); void tgl_l_uncore_mmio_init(void);
void adl_uncore_mmio_init(void);
int snb_pci2phy_map_init(int devid); int snb_pci2phy_map_init(int devid);
/* uncore_snbep.c */ /* uncore_snbep.c */

View File

@ -494,8 +494,8 @@ void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
writel(0, box->io_addr); writel(0, box->io_addr);
} }
static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box, void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
struct perf_event *event) struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;

View File

@ -139,6 +139,8 @@ void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box);
void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box); void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box);
void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box, void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
struct perf_event *event); struct perf_event *event);
void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
struct perf_event *event);
void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box); void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box);
void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box); void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box);

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
#include "uncore.h" #include "uncore.h"
#include "uncore_discovery.h"
/* Uncore IMC PCI IDs */ /* Uncore IMC PCI IDs */
#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
@ -64,6 +65,20 @@
#define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53
#define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660
#define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641
#define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601
#define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602
#define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609
#define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a
#define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621
#define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623
#define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629
#define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637
#define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b
#define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648
#define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649
#define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650
#define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668
#define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670
/* SNB event control */ /* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@ -155,6 +170,7 @@
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
@ -1334,6 +1350,62 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC), PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
}, },
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_3_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_4_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_5_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_6_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_7_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_8_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_9_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_10_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_11_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_12_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_13_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_14_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_15_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_16_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* end: all zeroes */ } { /* end: all zeroes */ }
}; };
@ -1390,7 +1462,8 @@ static struct pci_dev *tgl_uncore_get_mc_dev(void)
#define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
#define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000 #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000
static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) static void __uncore_imc_init_box(struct intel_uncore_box *box,
unsigned int base_offset)
{ {
struct pci_dev *pdev = tgl_uncore_get_mc_dev(); struct pci_dev *pdev = tgl_uncore_get_mc_dev();
struct intel_uncore_pmu *pmu = box->pmu; struct intel_uncore_pmu *pmu = box->pmu;
@ -1417,11 +1490,17 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
addr |= ((resource_size_t)mch_bar << 32); addr |= ((resource_size_t)mch_bar << 32);
#endif #endif
addr += base_offset;
box->io_addr = ioremap(addr, type->mmio_map_size); box->io_addr = ioremap(addr, type->mmio_map_size);
if (!box->io_addr) if (!box->io_addr)
pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
} }
static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
{
__uncore_imc_init_box(box, 0);
}
static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
.init_box = tgl_uncore_imc_freerunning_init_box, .init_box = tgl_uncore_imc_freerunning_init_box,
.exit_box = uncore_mmio_exit_box, .exit_box = uncore_mmio_exit_box,
@ -1469,3 +1548,136 @@ void tgl_uncore_mmio_init(void)
} }
/* end of Tiger Lake MMIO uncore support */ /* end of Tiger Lake MMIO uncore support */
/* Alder Lake MMIO uncore support */
#define ADL_UNCORE_IMC_BASE 0xd900
#define ADL_UNCORE_IMC_MAP_SIZE 0x200
#define ADL_UNCORE_IMC_CTR 0xe8
#define ADL_UNCORE_IMC_CTRL 0xd0
#define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0
#define ADL_UNCORE_IMC_BOX_CTL 0xc4
#define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800
#define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100
#define ADL_UNCORE_IMC_CTL_FRZ (1 << 0)
#define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1)
#define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2)
#define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \
ADL_UNCORE_IMC_CTL_RST_CTRS)
static void adl_uncore_imc_init_box(struct intel_uncore_box *box)
{
__uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE);
/* The global control in MC1 can control both MCs. */
if (box->io_addr && (box->pmu->pmu_idx == 1))
writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL);
}
static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box)
{
if (!box->io_addr)
return;
writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box));
}
static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
{
if (!box->io_addr)
return;
writel(0, box->io_addr + uncore_mmio_box_ctl(box));
}
static struct intel_uncore_ops adl_uncore_mmio_ops = {
.init_box = adl_uncore_imc_init_box,
.exit_box = uncore_mmio_exit_box,
.disable_box = adl_uncore_mmio_disable_box,
.enable_box = adl_uncore_mmio_enable_box,
.disable_event = intel_generic_uncore_mmio_disable_event,
.enable_event = intel_generic_uncore_mmio_enable_event,
.read_counter = uncore_mmio_read_counter,
};
#define ADL_UNC_CTL_CHMASK_MASK 0x00000f00
#define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
ADL_UNC_CTL_CHMASK_MASK | \
SNB_UNC_CTL_EDGE_DET)
static struct attribute *adl_uncore_imc_formats_attr[] = {
&format_attr_event.attr,
&format_attr_chmask.attr,
&format_attr_edge.attr,
NULL,
};
static const struct attribute_group adl_uncore_imc_format_group = {
.name = "format",
.attrs = adl_uncore_imc_formats_attr,
};
static struct intel_uncore_type adl_uncore_imc = {
.name = "imc",
.num_counters = 5,
.num_boxes = 2,
.perf_ctr_bits = 64,
.perf_ctr = ADL_UNCORE_IMC_CTR,
.event_ctl = ADL_UNCORE_IMC_CTRL,
.event_mask = ADL_UNC_IMC_EVENT_MASK,
.box_ctl = ADL_UNCORE_IMC_BOX_CTL,
.mmio_offset = 0,
.mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE,
.ops = &adl_uncore_mmio_ops,
.format_group = &adl_uncore_imc_format_group,
};
enum perf_adl_uncore_imc_freerunning_types {
ADL_MMIO_UNCORE_IMC_DATA_TOTAL,
ADL_MMIO_UNCORE_IMC_DATA_READ,
ADL_MMIO_UNCORE_IMC_DATA_WRITE,
ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
};
static struct freerunning_counters adl_uncore_imc_freerunning[] = {
[ADL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x40, 0x0, 0x0, 1, 64 },
[ADL_MMIO_UNCORE_IMC_DATA_READ] = { 0x58, 0x0, 0x0, 1, 64 },
[ADL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xA0, 0x0, 0x0, 1, 64 },
};
static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
{
__uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE);
}
static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
.init_box = adl_uncore_imc_freerunning_init_box,
.exit_box = uncore_mmio_exit_box,
.read_counter = uncore_mmio_read_counter,
.hw_config = uncore_freerunning_hw_config,
};
static struct intel_uncore_type adl_uncore_imc_free_running = {
.name = "imc_free_running",
.num_counters = 3,
.num_boxes = 2,
.num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
.mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE,
.freerunning = adl_uncore_imc_freerunning,
.ops = &adl_uncore_imc_freerunning_ops,
.event_descs = tgl_uncore_imc_events,
.format_group = &tgl_uncore_imc_format_group,
};
static struct intel_uncore_type *adl_mmio_uncores[] = {
&adl_uncore_imc,
&adl_uncore_imc_free_running,
NULL
};
void adl_uncore_mmio_init(void)
{
uncore_mmio_uncores = adl_mmio_uncores;
}
/* end of Alder Lake MMIO uncore support */

View File

@ -5482,7 +5482,7 @@ static struct intel_uncore_type icx_uncore_imc = {
.fixed_ctr_bits = 48, .fixed_ctr_bits = 48,
.fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
.fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
.event_descs = hswep_uncore_imc_events, .event_descs = snr_uncore_imc_events,
.perf_ctr = SNR_IMC_MMIO_PMON_CTR0, .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
.event_ctl = SNR_IMC_MMIO_PMON_CTL0, .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK, .event_mask = SNBEP_PMON_RAW_EVENT_MASK,

View File

@ -215,7 +215,8 @@ enum {
LBR_FORMAT_EIP_FLAGS2 = 0x04, LBR_FORMAT_EIP_FLAGS2 = 0x04,
LBR_FORMAT_INFO = 0x05, LBR_FORMAT_INFO = 0x05,
LBR_FORMAT_TIME = 0x06, LBR_FORMAT_TIME = 0x06,
LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME, LBR_FORMAT_INFO2 = 0x07,
LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO2,
}; };
enum { enum {
@ -840,6 +841,11 @@ struct x86_pmu {
bool lbr_double_abort; /* duplicated lbr aborts */ bool lbr_double_abort; /* duplicated lbr aborts */
bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
unsigned int lbr_has_info:1;
unsigned int lbr_has_tsx:1;
unsigned int lbr_from_flags:1;
unsigned int lbr_to_cycles:1;
/* /*
* Intel Architectural LBR CPUID Enumeration * Intel Architectural LBR CPUID Enumeration
*/ */
@ -1392,6 +1398,8 @@ void intel_pmu_lbr_init_skl(void);
void intel_pmu_lbr_init_knl(void); void intel_pmu_lbr_init_knl(void);
void intel_pmu_lbr_init(void);
void intel_pmu_arch_lbr_init(void); void intel_pmu_arch_lbr_init(void);
void intel_pmu_pebs_data_source_nhm(void); void intel_pmu_pebs_data_source_nhm(void);

View File

@ -536,11 +536,14 @@ static struct perf_msr intel_rapl_spr_msrs[] = {
* - perf_msr_probe(PERF_RAPL_MAX) * - perf_msr_probe(PERF_RAPL_MAX)
* - want to use same event codes across both architectures * - want to use same event codes across both architectures
*/ */
static struct perf_msr amd_rapl_msrs[PERF_RAPL_MAX] = { static struct perf_msr amd_rapl_msrs[] = {
[PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr }, [PERF_RAPL_PP0] = { 0, &rapl_events_cores_group, 0, false, 0 },
[PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK },
[PERF_RAPL_RAM] = { 0, &rapl_events_ram_group, 0, false, 0 },
[PERF_RAPL_PP1] = { 0, &rapl_events_gpu_group, 0, false, 0 },
[PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, 0, false, 0 },
}; };
static int rapl_cpu_offline(unsigned int cpu) static int rapl_cpu_offline(unsigned int cpu)
{ {
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);

Some files were not shown because too many files have changed in this diff Show More