forked from Minki/linux
PCI interpretation compile fixes
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEwGNS88vfc9+v45Yq41TmuOI4ufgFAmMMpCYACgkQ41TmuOI4 ufirVA/+NpbDVgiGehgV9ngukDFWNNBOvPeKFIjnIWmhcTvRtQ/YWIALKJSinaVl jyPOTM0cEcrxhIPM2kHmnkZhwECRwM32Ox2N6ftmOPEJk8xrP6glfCuwJ6Yl/pNU EBLWzgZGRZyDGmCNo0jvaycPIh/WkeNKGwe/3FlI8gHx7iALvihE/0IFoPlCEWky dbetAAx5W0fRCoKscbMh0q2mDXuz4KBu3yij9wMGlhSzLcrSr9qejCmjEdERruYM c9hrLtIcy8cS9Epa5kOnz0PekK4EUlecB66BjnvsXabjTVNlF8MJwEG5EZfGxP+a jdthEjXaO5u7z4Y5NKxDwTFslmzX3PPbdBka9VGmhLkkV1/jGZ5RKKVY7+MljV74 ZhhVkwUB0D3c7Wbpo+S1mVY6EHi/RO4TkaPCORsmCvwYXFZyhNsteKnWdRbcu3Iu aIjKamGeEfe8Mqojmzaksql9ScNnW9/INUyObz5UruGXFg2uFd0Vae20ZBpn7DYh gBYKZAFArNzHUUpP5zMBPnwpTGt8MuurI7rnVEvVRqsy4SofbYZYuFdxpn2obzui zPQY/lg6eXUWZkbfqJNrA1JN3cTSxVQCO96z8yYp/RazYiKMGUgZ7Ps7WlZkxRNE OSMkssXz6cIL1QPbu75iu6FMQGcVAywH2EN8jJh6E0ceUcVhsB0= =ENc3 -----END PGP SIGNATURE----- Merge tag 'kvm-s390-master-6.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD PCI interpretation compile fixes
This commit is contained in:
commit
29250ba51b
@ -1,2 +1,4 @@
|
||||
Alan Cox <alan@lxorguk.ukuu.org.uk>
|
||||
Alan Cox <root@hraefn.swansea.linux.org.uk>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Marc Gonzalez <marc.w.gonzalez@free.fr>
|
||||
|
6
.mailmap
6
.mailmap
@ -98,8 +98,7 @@ Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
|
||||
Christian Marangi <ansuelsmth@gmail.com>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
|
||||
Colin Ian King <colin.king@intel.com> <colin.i.king@gmail.com>
|
||||
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
Damian Hobson-Garcia <dhobsong@igel.co.jp>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
|
||||
@ -150,6 +149,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
|
||||
Greg Kroah-Hartman <greg@kroah.com>
|
||||
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
|
||||
Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@linux.vnet.ibm.com>
|
||||
Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@canonical.com>
|
||||
Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
|
||||
Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
|
||||
Gustavo Padovan <gustavo@las.ic.unicamp.br>
|
||||
@ -253,6 +254,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
|
||||
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
||||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
|
||||
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
||||
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
|
||||
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
|
||||
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
|
||||
|
@ -523,6 +523,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
/sys/devices/system/cpu/vulnerabilities/retbleed
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
@ -230,6 +230,20 @@ The possible values in this file are:
|
||||
* - 'Mitigation: Clear CPU buffers'
|
||||
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
||||
enabled.
|
||||
* - 'Unknown: No mitigations'
|
||||
- The processor vulnerability status is unknown because it is
|
||||
out of Servicing period. Mitigation is not attempted.
|
||||
|
||||
Definitions:
|
||||
------------
|
||||
|
||||
Servicing period: The process of providing functional and security updates to
|
||||
Intel processors or platforms, utilizing the Intel Platform Update (IPU)
|
||||
process or other similar mechanisms.
|
||||
|
||||
End of Servicing Updates (ESU): ESU is the date at which Intel will no
|
||||
longer provide Servicing, such as through IPU or other similar update
|
||||
processes. ESU dates will typically be aligned to end of quarter.
|
||||
|
||||
If the processor is vulnerable then the following information is appended to
|
||||
the above information:
|
||||
|
@ -5331,6 +5331,8 @@
|
||||
rodata= [KNL]
|
||||
on Mark read-only kernel memory as read-only (default).
|
||||
off Leave read-only kernel memory writable for debugging.
|
||||
full Mark read-only kernel memory and aliases as read-only
|
||||
[arm64]
|
||||
|
||||
rockchip.usb_uart
|
||||
Enable the uart passthrough on the designated usb port
|
||||
|
@ -242,44 +242,34 @@ HWCAP2_MTE3
|
||||
by Documentation/arm64/memory-tagging-extension.rst.
|
||||
|
||||
HWCAP2_SME
|
||||
|
||||
Functionality implied by ID_AA64PFR1_EL1.SME == 0b0001, as described
|
||||
by Documentation/arm64/sme.rst.
|
||||
|
||||
HWCAP2_SME_I16I64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.I16I64 == 0b1111.
|
||||
|
||||
HWCAP2_SME_F64F64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1.
|
||||
|
||||
HWCAP2_SME_I8I32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.I8I32 == 0b1111.
|
||||
|
||||
HWCAP2_SME_F16F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_B16F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_F32F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_FA64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1.
|
||||
|
||||
HWCAP2_WFXT
|
||||
|
||||
Functionality implied by ID_AA64ISAR2_EL1.WFXT == 0b0010.
|
||||
|
||||
HWCAP2_EBF16
|
||||
|
||||
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
|
@ -52,6 +52,8 @@ stable kernels.
|
||||
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 |
|
||||
|
@ -58,13 +58,11 @@ Like with atomic_t, the rule of thumb is:
|
||||
|
||||
- RMW operations that have a return value are fully ordered.
|
||||
|
||||
- RMW operations that are conditional are unordered on FAILURE,
|
||||
otherwise the above rules apply. In the case of test_and_{}_bit() operations,
|
||||
if the bit in memory is unchanged by the operation then it is deemed to have
|
||||
failed.
|
||||
- RMW operations that are conditional are fully ordered.
|
||||
|
||||
Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
|
||||
clear_bit_unlock() which has RELEASE semantics.
|
||||
Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics,
|
||||
clear_bit_unlock() which has RELEASE semantics and test_bit_acquire which has
|
||||
ACQUIRE semantics.
|
||||
|
||||
Since a platform only has a single means of achieving atomic operations
|
||||
the same barriers as for atomic_t are used, see atomic_t.txt.
|
||||
|
@ -233,6 +233,7 @@ allOf:
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun8i-v3s-tcon
|
||||
- allwinner,sun9i-a80-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
properties:
|
||||
@ -252,6 +253,7 @@ allOf:
|
||||
- allwinner,sun8i-a83t-tcon-tv
|
||||
- allwinner,sun8i-r40-tcon-tv
|
||||
- allwinner,sun9i-a80-tcon-tv
|
||||
- allwinner,sun20i-d1-tcon-tv
|
||||
|
||||
then:
|
||||
properties:
|
||||
@ -278,6 +280,7 @@ allOf:
|
||||
- allwinner,sun9i-a80-tcon-lcd
|
||||
- allwinner,sun4i-a10-tcon
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
required:
|
||||
@ -294,6 +297,7 @@ allOf:
|
||||
- allwinner,sun8i-a23-tcon
|
||||
- allwinner,sun8i-a33-tcon
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
properties:
|
||||
|
@ -14,7 +14,7 @@ MAC node:
|
||||
- mac-address : The 6-byte MAC address. If present, it is the default
|
||||
MAC address.
|
||||
- internal-phy : phandle to the internal PHY node
|
||||
- phy-handle : phandle the external PHY node
|
||||
- phy-handle : phandle to the external PHY node
|
||||
|
||||
Internal PHY node:
|
||||
- compatible : Should be "qcom,fsm9900-emac-sgmii" or "qcom,qdf2432-emac-sgmii".
|
||||
|
@ -47,12 +47,6 @@ properties:
|
||||
description:
|
||||
Properties for single LDO regulator.
|
||||
|
||||
properties:
|
||||
regulator-name:
|
||||
pattern: "^LDO[1-5]$"
|
||||
description:
|
||||
should be "LDO1", ..., "LDO5"
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
"^BUCK[1-6]$":
|
||||
@ -62,11 +56,6 @@ properties:
|
||||
Properties for single BUCK regulator.
|
||||
|
||||
properties:
|
||||
regulator-name:
|
||||
pattern: "^BUCK[1-6]$"
|
||||
description:
|
||||
should be "BUCK1", ..., "BUCK6"
|
||||
|
||||
nxp,dvs-run-voltage:
|
||||
$ref: "/schemas/types.yaml#/definitions/uint32"
|
||||
minimum: 600000
|
||||
|
@ -10,7 +10,7 @@ description:
|
||||
See spi-peripheral-props.yaml for more info.
|
||||
|
||||
maintainers:
|
||||
- Pratyush Yadav <p.yadav@ti.com>
|
||||
- Vaishnav Achath <vaishnav.a@ti.com>
|
||||
|
||||
properties:
|
||||
# cdns,qspi-nor.yaml
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Cadence Quad SPI controller
|
||||
|
||||
maintainers:
|
||||
- Pratyush Yadav <p.yadav@ti.com>
|
||||
- Vaishnav Achath <vaishnav.a@ti.com>
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
|
@ -16,7 +16,7 @@ description:
|
||||
their own separate schema that should be referenced from here.
|
||||
|
||||
maintainers:
|
||||
- Pratyush Yadav <p.yadav@ti.com>
|
||||
- Mark Brown <broonie@kernel.org>
|
||||
|
||||
properties:
|
||||
reg:
|
||||
|
@ -42,7 +42,7 @@ properties:
|
||||
description:
|
||||
Address ranges of the thermal registers. If more then one range is given
|
||||
the first one must be the common registers followed by each sensor
|
||||
according the datasheet.
|
||||
according to the datasheet.
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
|
||||
|
@ -214,6 +214,7 @@ patternProperties:
|
||||
- polling-delay
|
||||
- polling-delay-passive
|
||||
- thermal-sensors
|
||||
- trips
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
@ -525,8 +525,8 @@ followed by a test macro::
|
||||
If you need to expose a compiler capability to makefiles and/or C source files,
|
||||
`CC_HAS_` is the recommended prefix for the config option::
|
||||
|
||||
config CC_HAS_ASM_GOTO
|
||||
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
|
||||
config CC_HAS_FOO
|
||||
def_bool $(success,$(srctree)/scripts/cc-check-foo.sh $(CC))
|
||||
|
||||
Build as module only
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -33,7 +33,7 @@ EXAMPLE
|
||||
=======
|
||||
In the example below, **rtla timerlat hist** is set to run for *10* minutes,
|
||||
in the cpus *0-4*, *skipping zero* only lines. Moreover, **rtla timerlat
|
||||
hist** will change the priority of the *timelat* threads to run under
|
||||
hist** will change the priority of the *timerlat* threads to run under
|
||||
*SCHED_DEADLINE* priority, with a *10us* runtime every *1ms* period. The
|
||||
*1ms* period is also passed to the *timerlat* tracer::
|
||||
|
||||
|
@ -2178,7 +2178,7 @@ M: Jean-Marie Verdun <verdun@hpe.com>
|
||||
M: Nick Hawkins <nick.hawkins@hpe.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/arm/hpe,gxp.yaml
|
||||
F: Documentation/devicetree/bindings/spi/hpe,gxp-spi.yaml
|
||||
F: Documentation/devicetree/bindings/spi/hpe,gxp-spifi.yaml
|
||||
F: Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
|
||||
F: arch/arm/boot/dts/hpe-bmc*
|
||||
F: arch/arm/boot/dts/hpe-gxp*
|
||||
@ -3612,6 +3612,7 @@ F: include/linux/find.h
|
||||
F: include/linux/nodemask.h
|
||||
F: lib/bitmap.c
|
||||
F: lib/cpumask.c
|
||||
F: lib/cpumask_kunit.c
|
||||
F: lib/find_bit.c
|
||||
F: lib/find_bit_benchmark.c
|
||||
F: lib/test_bitmap.c
|
||||
@ -3679,6 +3680,7 @@ F: Documentation/networking/bonding.rst
|
||||
F: drivers/net/bonding/
|
||||
F: include/net/bond*
|
||||
F: include/uapi/linux/if_bonding.h
|
||||
F: tools/testing/selftests/drivers/net/bonding/
|
||||
|
||||
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
|
||||
M: Dan Robertson <dan@dlrobertson.com>
|
||||
@ -5145,6 +5147,7 @@ T: git git://git.samba.org/sfrench/cifs-2.6.git
|
||||
F: Documentation/admin-guide/cifs/
|
||||
F: fs/cifs/
|
||||
F: fs/smbfs_common/
|
||||
F: include/uapi/linux/cifs
|
||||
|
||||
COMPACTPCI HOTPLUG CORE
|
||||
M: Scott Murray <scott@spiteful.org>
|
||||
@ -9780,7 +9783,7 @@ M: Christian Brauner <brauner@kernel.org>
|
||||
M: Seth Forshee <sforshee@kernel.org>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
|
||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/vfs/idmapping.git
|
||||
F: Documentation/filesystems/idmappings.rst
|
||||
F: tools/testing/selftests/mount_setattr/
|
||||
F: include/linux/mnt_idmapping.h
|
||||
@ -10657,6 +10660,7 @@ T: git git://git.kernel.dk/linux-block
|
||||
T: git git://git.kernel.dk/liburing
|
||||
F: io_uring/
|
||||
F: include/linux/io_uring.h
|
||||
F: include/linux/io_uring_types.h
|
||||
F: include/uapi/linux/io_uring.h
|
||||
F: tools/io_uring/
|
||||
|
||||
|
8
Makefile
8
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 0
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1113,13 +1113,11 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \
|
||||
$(patsubst %/,%,$(filter %/, $(core-) \
|
||||
$(drivers-) $(libs-))))
|
||||
|
||||
subdir-modorder := $(addsuffix modules.order,$(filter %/, \
|
||||
$(core-y) $(core-m) $(libs-y) $(libs-m) \
|
||||
$(drivers-y) $(drivers-m)))
|
||||
|
||||
build-dirs := $(vmlinux-dirs)
|
||||
clean-dirs := $(vmlinux-alldirs)
|
||||
|
||||
subdir-modorder := $(addsuffix /modules.order, $(build-dirs))
|
||||
|
||||
# Externally visible symbols (used by link-vmlinux.sh)
|
||||
KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y))
|
||||
KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y)))
|
||||
|
@ -53,7 +53,6 @@ config KPROBES
|
||||
config JUMP_LABEL
|
||||
bool "Optimize very unlikely/likely branches"
|
||||
depends on HAVE_ARCH_JUMP_LABEL
|
||||
depends on CC_HAS_ASM_GOTO
|
||||
select OBJTOOL if HAVE_JUMP_LABEL_HACK
|
||||
help
|
||||
This option enables a transparent branch optimization that
|
||||
@ -1361,7 +1360,7 @@ config HAVE_PREEMPT_DYNAMIC_CALL
|
||||
|
||||
config HAVE_PREEMPT_DYNAMIC_KEY
|
||||
bool
|
||||
depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
|
||||
depends on HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_PREEMPT_DYNAMIC
|
||||
help
|
||||
An architecture should select this if it can handle the preemption
|
||||
|
@ -283,11 +283,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
/*
|
||||
* ffz = Find First Zero in word. Undefined if no zero exists,
|
||||
|
@ -917,6 +917,23 @@ config ARM64_ERRATUM_1902691
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2457168
|
||||
bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
|
||||
depends on ARM64_AMU_EXTN
|
||||
default y
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A510 erratum 2457168.
|
||||
|
||||
The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate
|
||||
as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments
|
||||
incorrectly giving a significantly higher output value.
|
||||
|
||||
Work around this problem by returning 0 when reading the affected counter in
|
||||
key locations that results in disabling all users of this counter. This effect
|
||||
is the same to firmware disabling affected counters.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
@ -71,7 +71,7 @@ static __always_inline int icache_is_vpipt(void)
|
||||
|
||||
static inline u32 cache_type_cwg(void)
|
||||
{
|
||||
return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK;
|
||||
return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype());
|
||||
}
|
||||
|
||||
#define __read_mostly __section(".data..read_mostly")
|
||||
|
@ -153,7 +153,7 @@ struct vl_info {
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
|
||||
extern void sve_alloc(struct task_struct *task);
|
||||
extern void sve_alloc(struct task_struct *task, bool flush);
|
||||
extern void fpsimd_release_task(struct task_struct *task);
|
||||
extern void fpsimd_sync_to_sve(struct task_struct *task);
|
||||
extern void fpsimd_force_sync_to_sve(struct task_struct *task);
|
||||
@ -256,7 +256,7 @@ size_t sve_state_size(struct task_struct const *task);
|
||||
|
||||
#else /* ! CONFIG_ARM64_SVE */
|
||||
|
||||
static inline void sve_alloc(struct task_struct *task) { }
|
||||
static inline void sve_alloc(struct task_struct *task, bool flush) { }
|
||||
static inline void fpsimd_release_task(struct task_struct *task) { }
|
||||
static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
|
||||
static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
|
||||
|
@ -3,6 +3,8 @@
|
||||
#ifndef __ARM64_ASM_SETUP_H
|
||||
#define __ARM64_ASM_SETUP_H
|
||||
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <uapi/asm/setup.h>
|
||||
|
||||
void *get_early_fdt_ptr(void);
|
||||
@ -14,4 +16,19 @@ void early_fdt_map(u64 dt_phys);
|
||||
extern phys_addr_t __fdt_pointer __initdata;
|
||||
extern u64 __cacheline_aligned boot_args[4];
|
||||
|
||||
static inline bool arch_parse_debug_rodata(char *arg)
|
||||
{
|
||||
extern bool rodata_enabled;
|
||||
extern bool rodata_full;
|
||||
|
||||
if (arg && !strcmp(arg, "full")) {
|
||||
rodata_enabled = true;
|
||||
rodata_full = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#define arch_parse_debug_rodata arch_parse_debug_rodata
|
||||
|
||||
#endif
|
||||
|
@ -1116,6 +1116,7 @@
|
||||
|
||||
#else
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/alternative.h>
|
||||
@ -1209,8 +1210,6 @@
|
||||
par; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#define SYS_FIELD_GET(reg, field, val) \
|
||||
FIELD_GET(reg##_##field##_MASK, val)
|
||||
|
||||
@ -1220,4 +1219,6 @@
|
||||
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
|
||||
FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SYSREG_H */
|
||||
|
@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
|
||||
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
unsigned int ctype, level, leaves, fw_level;
|
||||
unsigned int ctype, level, leaves;
|
||||
int fw_level;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
|
||||
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
|
||||
@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu)
|
||||
else
|
||||
fw_level = acpi_find_last_cache_level(cpu);
|
||||
|
||||
if (fw_level < 0)
|
||||
return fw_level;
|
||||
|
||||
if (level < fw_level) {
|
||||
/*
|
||||
* some external caches not specified in CLIDR_EL1
|
||||
|
@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1286807
|
||||
{
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
|
||||
},
|
||||
{
|
||||
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
|
||||
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
|
||||
},
|
||||
@ -654,6 +656,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2457168
|
||||
{
|
||||
.desc = "ARM erratum 2457168",
|
||||
.capability = ARM64_WORKAROUND_2457168,
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
|
||||
/* Cortex-A510 r0p0-r1p1 */
|
||||
CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2038923
|
||||
{
|
||||
.desc = "ARM erratum 2038923",
|
||||
|
@ -1870,6 +1870,9 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
|
||||
pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
|
||||
smp_processor_id());
|
||||
cpumask_set_cpu(smp_processor_id(), &amu_cpus);
|
||||
|
||||
/* 0 reference values signal broken/disabled counters */
|
||||
if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
|
||||
update_freq_counters_refs();
|
||||
}
|
||||
}
|
||||
|
@ -502,7 +502,7 @@ tsk .req x28 // current thread_info
|
||||
SYM_CODE_START(vectors)
|
||||
kernel_ventry 1, t, 64, sync // Synchronous EL1t
|
||||
kernel_ventry 1, t, 64, irq // IRQ EL1t
|
||||
kernel_ventry 1, t, 64, fiq // FIQ EL1h
|
||||
kernel_ventry 1, t, 64, fiq // FIQ EL1t
|
||||
kernel_ventry 1, t, 64, error // Error EL1t
|
||||
|
||||
kernel_ventry 1, h, 64, sync // Synchronous EL1h
|
||||
|
@ -715,10 +715,12 @@ size_t sve_state_size(struct task_struct const *task)
|
||||
* do_sve_acc() case, there is no ABI requirement to hide stale data
|
||||
* written previously be task.
|
||||
*/
|
||||
void sve_alloc(struct task_struct *task)
|
||||
void sve_alloc(struct task_struct *task, bool flush)
|
||||
{
|
||||
if (task->thread.sve_state) {
|
||||
memset(task->thread.sve_state, 0, sve_state_size(task));
|
||||
if (flush)
|
||||
memset(task->thread.sve_state, 0,
|
||||
sve_state_size(task));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1388,7 +1390,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs)
|
||||
return;
|
||||
}
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, true);
|
||||
if (!current->thread.sve_state) {
|
||||
force_sig(SIGKILL);
|
||||
return;
|
||||
@ -1439,7 +1441,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
|
||||
return;
|
||||
}
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, false);
|
||||
sme_alloc(current);
|
||||
if (!current->thread.sve_state || !current->thread.za_state) {
|
||||
force_sig(SIGKILL);
|
||||
@ -1460,17 +1462,6 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
|
||||
fpsimd_bind_task_to_cpu();
|
||||
}
|
||||
|
||||
/*
|
||||
* If SVE was not already active initialise the SVE registers,
|
||||
* any non-shared state between the streaming and regular SVE
|
||||
* registers is architecturally guaranteed to be zeroed when
|
||||
* we enter streaming mode. We do not need to initialize ZA
|
||||
* since ZA must be disabled at this point and enabling ZA is
|
||||
* architecturally defined to zero ZA.
|
||||
*/
|
||||
if (system_supports_sve() && !test_thread_flag(TIF_SVE))
|
||||
sve_init_regs();
|
||||
|
||||
put_cpu_fpsimd_context();
|
||||
}
|
||||
|
||||
|
@ -94,10 +94,8 @@ asmlinkage u64 kaslr_early_init(void *fdt)
|
||||
|
||||
seed = get_kaslr_seed(fdt);
|
||||
if (!seed) {
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
if (!__early_cpu_has_rndr() ||
|
||||
!__arm64_rndr((unsigned long *)&seed))
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -882,7 +882,7 @@ static int sve_set_common(struct task_struct *target,
|
||||
* state and ensure there's storage.
|
||||
*/
|
||||
if (target->thread.svcr != old_svcr)
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, true);
|
||||
}
|
||||
|
||||
/* Registers: FPSIMD-only case */
|
||||
@ -912,7 +912,7 @@ static int sve_set_common(struct task_struct *target,
|
||||
goto out;
|
||||
}
|
||||
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, true);
|
||||
if (!target->thread.sve_state) {
|
||||
ret = -ENOMEM;
|
||||
clear_tsk_thread_flag(target, TIF_SVE);
|
||||
@ -1082,7 +1082,7 @@ static int za_set(struct task_struct *target,
|
||||
|
||||
/* Ensure there is some SVE storage for streaming mode */
|
||||
if (!target->thread.sve_state) {
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, false);
|
||||
if (!target->thread.sve_state) {
|
||||
clear_thread_flag(TIF_SME);
|
||||
ret = -ENOMEM;
|
||||
|
@ -91,7 +91,7 @@ static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
|
||||
* not taken into account. This limit is not a guarantee and is
|
||||
* NOT ABI.
|
||||
*/
|
||||
#define SIGFRAME_MAXSZ SZ_64K
|
||||
#define SIGFRAME_MAXSZ SZ_256K
|
||||
|
||||
static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
|
||||
unsigned long *offset, size_t size, bool extend)
|
||||
@ -310,7 +310,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
|
||||
fpsimd_flush_task_state(current);
|
||||
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, true);
|
||||
if (!current->thread.sve_state) {
|
||||
clear_thread_flag(TIF_SVE);
|
||||
return -ENOMEM;
|
||||
@ -926,6 +926,16 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
|
||||
|
||||
/* Signal handlers are invoked with ZA and streaming mode disabled */
|
||||
if (system_supports_sme()) {
|
||||
/*
|
||||
* If we were in streaming mode the saved register
|
||||
* state was SVE but we will exit SM and use the
|
||||
* FPSIMD register state - flush the saved FPSIMD
|
||||
* register state in case it gets loaded.
|
||||
*/
|
||||
if (current->thread.svcr & SVCR_SM_MASK)
|
||||
memset(¤t->thread.uw.fpsimd_state, 0,
|
||||
sizeof(current->thread.uw.fpsimd_state));
|
||||
|
||||
current->thread.svcr &= ~(SVCR_ZA_MASK |
|
||||
SVCR_SM_MASK);
|
||||
sme_smstop();
|
||||
|
@ -296,12 +296,25 @@ core_initcall(init_amu_fie);
|
||||
|
||||
static void cpu_read_corecnt(void *val)
|
||||
{
|
||||
/*
|
||||
* A value of 0 can be returned if the current CPU does not support AMUs
|
||||
* or if the counter is disabled for this CPU. A return value of 0 at
|
||||
* counter read is properly handled as an error case by the users of the
|
||||
* counter.
|
||||
*/
|
||||
*(u64 *)val = read_corecnt();
|
||||
}
|
||||
|
||||
static void cpu_read_constcnt(void *val)
|
||||
{
|
||||
*(u64 *)val = read_constcnt();
|
||||
/*
|
||||
* Return 0 if the current CPU is affected by erratum 2457168. A value
|
||||
* of 0 is also returned if the current CPU does not support AMUs or if
|
||||
* the counter is disabled. A return value of 0 at counter read is
|
||||
* properly handled as an error case by the users of the counter.
|
||||
*/
|
||||
*(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
|
||||
0UL : read_constcnt();
|
||||
}
|
||||
|
||||
static inline
|
||||
@ -328,7 +341,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
|
||||
*/
|
||||
bool cpc_ffh_supported(void)
|
||||
{
|
||||
return freq_counters_valid(get_cpu_with_amu_feat());
|
||||
int cpu = get_cpu_with_amu_feat();
|
||||
|
||||
/*
|
||||
* FFH is considered supported if there is at least one present CPU that
|
||||
* supports AMUs. Using FFH to read core and reference counters for CPUs
|
||||
* that do not support AMUs, have counters disabled or that are affected
|
||||
* by errata, will result in a return value of 0.
|
||||
*
|
||||
* This is done to allow any enabled and valid counters to be read
|
||||
* through FFH, knowing that potentially returning 0 as counter value is
|
||||
* properly handled by the users of these counters.
|
||||
*/
|
||||
if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
|
||||
|
@ -642,24 +642,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
|
||||
vm_area_add_early(vma);
|
||||
}
|
||||
|
||||
static int __init parse_rodata(char *arg)
|
||||
{
|
||||
int ret = strtobool(arg, &rodata_enabled);
|
||||
if (!ret) {
|
||||
rodata_full = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* permit 'full' in addition to boolean options */
|
||||
if (strcmp(arg, "full"))
|
||||
return -EINVAL;
|
||||
|
||||
rodata_enabled = true;
|
||||
rodata_full = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("rodata", parse_rodata);
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static int __init map_entry_trampoline(void)
|
||||
{
|
||||
|
@ -67,6 +67,7 @@ WORKAROUND_1902691
|
||||
WORKAROUND_2038923
|
||||
WORKAROUND_2064142
|
||||
WORKAROUND_2077057
|
||||
WORKAROUND_2457168
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||||
|
@ -179,6 +179,21 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
int retval;
|
||||
|
||||
asm volatile(
|
||||
"{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
|
||||
: "=&r" (retval)
|
||||
: "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
|
||||
: "p0", "memory"
|
||||
);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* ffz - find first zero in word.
|
||||
* @word: The word to search
|
||||
|
@ -331,11 +331,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
return (old & bit) != 0;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
/**
|
||||
* ffz - find the first zero bit in a long word
|
||||
|
@ -111,6 +111,7 @@ config LOONGARCH
|
||||
select PCI_ECAM if ACPI
|
||||
select PCI_LOONGSON
|
||||
select PCI_MSI_ARCH_FALLBACKS
|
||||
select PCI_QUIRKS
|
||||
select PERF_USE_VMALLOC
|
||||
select RTC_LIB
|
||||
select SMP
|
||||
|
@ -109,4 +109,20 @@ extern unsigned long vm_map_base;
|
||||
*/
|
||||
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
|
||||
|
||||
/*
|
||||
* On LoongArch, I/O ports mappring is following:
|
||||
*
|
||||
* | .... |
|
||||
* |-----------------------|
|
||||
* | pci io ports(16K~32M) |
|
||||
* |-----------------------|
|
||||
* | isa io ports(0 ~16K) |
|
||||
* PCI_IOBASE ->|-----------------------|
|
||||
* | .... |
|
||||
*/
|
||||
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
|
||||
#define PCI_IOSIZE SZ_32M
|
||||
#define ISA_IOSIZE SZ_16K
|
||||
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
|
||||
|
||||
#endif /* _ASM_ADDRSPACE_H */
|
||||
|
@ -5,8 +5,9 @@
|
||||
#ifndef __ASM_CMPXCHG_H
|
||||
#define __ASM_CMPXCHG_H
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/build_bug.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define __xchg_asm(amswap_db, m, val) \
|
||||
({ \
|
||||
@ -21,10 +22,53 @@
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
|
||||
unsigned int size)
|
||||
{
|
||||
unsigned int shift;
|
||||
u32 old32, mask, temp;
|
||||
volatile u32 *ptr32;
|
||||
|
||||
/* Mask value to the correct size. */
|
||||
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
|
||||
val &= mask;
|
||||
|
||||
/*
|
||||
* Calculate a shift & mask that correspond to the value we wish to
|
||||
* exchange within the naturally aligned 4 byte integerthat includes
|
||||
* it.
|
||||
*/
|
||||
shift = (unsigned long)ptr & 0x3;
|
||||
shift *= BITS_PER_BYTE;
|
||||
mask <<= shift;
|
||||
|
||||
/*
|
||||
* Calculate a pointer to the naturally aligned 4 byte integer that
|
||||
* includes our byte of interest, and load its value.
|
||||
*/
|
||||
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
|
||||
|
||||
asm volatile (
|
||||
"1: ll.w %0, %3 \n"
|
||||
" andn %1, %0, %z4 \n"
|
||||
" or %1, %1, %z5 \n"
|
||||
" sc.w %1, %2 \n"
|
||||
" beqz %1, 1b \n"
|
||||
: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
|
||||
: "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
|
||||
: "memory");
|
||||
|
||||
return (old32 & mask) >> shift;
|
||||
}
|
||||
|
||||
static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 2:
|
||||
return __xchg_small(ptr, x, size);
|
||||
|
||||
case 4:
|
||||
return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
|
||||
|
||||
@ -67,10 +111,62 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
|
||||
unsigned int new, unsigned int size)
|
||||
{
|
||||
unsigned int shift;
|
||||
u32 old32, mask, temp;
|
||||
volatile u32 *ptr32;
|
||||
|
||||
/* Mask inputs to the correct size. */
|
||||
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
|
||||
old &= mask;
|
||||
new &= mask;
|
||||
|
||||
/*
|
||||
* Calculate a shift & mask that correspond to the value we wish to
|
||||
* compare & exchange within the naturally aligned 4 byte integer
|
||||
* that includes it.
|
||||
*/
|
||||
shift = (unsigned long)ptr & 0x3;
|
||||
shift *= BITS_PER_BYTE;
|
||||
old <<= shift;
|
||||
new <<= shift;
|
||||
mask <<= shift;
|
||||
|
||||
/*
|
||||
* Calculate a pointer to the naturally aligned 4 byte integer that
|
||||
* includes our byte of interest, and load its value.
|
||||
*/
|
||||
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
|
||||
|
||||
asm volatile (
|
||||
"1: ll.w %0, %3 \n"
|
||||
" and %1, %0, %z4 \n"
|
||||
" bne %1, %z5, 2f \n"
|
||||
" andn %1, %0, %z4 \n"
|
||||
" or %1, %1, %z6 \n"
|
||||
" sc.w %1, %2 \n"
|
||||
" beqz %1, 1b \n"
|
||||
" b 3f \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
"3: \n"
|
||||
: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
|
||||
: "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
|
||||
: "memory");
|
||||
|
||||
return (old32 & mask) >> shift;
|
||||
}
|
||||
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, unsigned int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 2:
|
||||
return __cmpxchg_small(ptr, old, new, size);
|
||||
|
||||
case 4:
|
||||
return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
|
||||
(u32)old, new);
|
||||
|
@ -7,34 +7,15 @@
|
||||
|
||||
#define ARCH_HAS_IOREMAP_WC
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable-bits.h>
|
||||
#include <asm/string.h>
|
||||
|
||||
/*
|
||||
* On LoongArch, I/O ports mappring is following:
|
||||
*
|
||||
* | .... |
|
||||
* |-----------------------|
|
||||
* | pci io ports(64K~32M) |
|
||||
* |-----------------------|
|
||||
* | isa io ports(0 ~16K) |
|
||||
* PCI_IOBASE ->|-----------------------|
|
||||
* | .... |
|
||||
*/
|
||||
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
|
||||
#define PCI_IOSIZE SZ_32M
|
||||
#define ISA_IOSIZE SZ_16K
|
||||
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
|
@ -81,7 +81,6 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
|
||||
#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE
|
||||
#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
|
||||
|
||||
extern int find_pch_pic(u32 gsi);
|
||||
struct acpi_madt_lio_pic;
|
||||
struct acpi_madt_eio_pic;
|
||||
struct acpi_madt_ht_pic;
|
||||
|
@ -95,7 +95,7 @@ static inline int pfn_valid(unsigned long pfn)
|
||||
|
||||
#endif
|
||||
|
||||
#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
|
||||
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
|
||||
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
|
||||
|
||||
extern int __virt_addr_valid(volatile void *kaddr);
|
||||
|
@ -123,6 +123,10 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 2:
|
||||
return __xchg_small((volatile void *)ptr, val, size);
|
||||
|
||||
case 4:
|
||||
return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
|
||||
|
||||
@ -204,9 +208,13 @@ do { \
|
||||
#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
|
||||
#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
|
||||
|
||||
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
|
||||
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
|
||||
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
|
||||
#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
|
||||
|
||||
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
|
||||
|
@ -59,7 +59,6 @@
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
struct mm_struct;
|
||||
struct vm_area_struct;
|
||||
@ -145,7 +144,7 @@ static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
|
||||
*p4d = p4dval;
|
||||
}
|
||||
|
||||
#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
|
||||
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
|
||||
#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
|
||||
|
||||
#endif
|
||||
@ -188,7 +187,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
|
||||
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
|
||||
|
||||
#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
|
||||
#define pud_phys(pud) PHYSADDR(pud_val(pud))
|
||||
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
|
||||
|
||||
#endif
|
||||
@ -221,7 +220,7 @@ static inline void pmd_clear(pmd_t *pmdp)
|
||||
|
||||
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
|
||||
|
||||
#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
|
||||
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
|
||||
|
||||
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
|
||||
|
@ -1,10 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#ifndef _ASM_REBOOT_H
|
||||
#define _ASM_REBOOT_H
|
||||
|
||||
extern void (*pm_restart)(void);
|
||||
|
||||
#endif /* _ASM_REBOOT_H */
|
@ -15,10 +15,16 @@
|
||||
#include <acpi/reboot.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/loongarch.h>
|
||||
#include <asm/reboot.h>
|
||||
|
||||
static void default_halt(void)
|
||||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
void machine_halt(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
local_irq_disable();
|
||||
clear_csr_ecfg(ECFG0_IM);
|
||||
|
||||
@ -30,18 +36,29 @@ static void default_halt(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void default_poweroff(void)
|
||||
void machine_power_off(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
do_kernel_power_off();
|
||||
#ifdef CONFIG_EFI
|
||||
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
|
||||
#endif
|
||||
|
||||
while (true) {
|
||||
__arch_cpu_idle();
|
||||
}
|
||||
}
|
||||
|
||||
static void default_restart(void)
|
||||
void machine_restart(char *command)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
do_kernel_restart(command);
|
||||
#ifdef CONFIG_EFI
|
||||
if (efi_capsule_pending(NULL))
|
||||
efi_reboot(REBOOT_WARM, NULL);
|
||||
@ -55,47 +72,3 @@ static void default_restart(void)
|
||||
__arch_cpu_idle();
|
||||
}
|
||||
}
|
||||
|
||||
void (*pm_restart)(void);
|
||||
EXPORT_SYMBOL(pm_restart);
|
||||
|
||||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
void machine_halt(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
default_halt();
|
||||
}
|
||||
|
||||
void machine_power_off(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
pm_power_off();
|
||||
}
|
||||
|
||||
void machine_restart(char *command)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
do_kernel_restart(command);
|
||||
pm_restart();
|
||||
}
|
||||
|
||||
static int __init loongarch_reboot_setup(void)
|
||||
{
|
||||
pm_restart = default_restart;
|
||||
pm_power_off = default_poweroff;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
arch_initcall(loongarch_reboot_setup);
|
||||
|
@ -216,6 +216,10 @@ good_area:
|
||||
return;
|
||||
}
|
||||
|
||||
/* The fault is fully completed (including releasing mmap lock) */
|
||||
if (fault & VM_FAULT_COMPLETED)
|
||||
return;
|
||||
|
||||
if (unlikely(fault & VM_FAULT_RETRY)) {
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
|
@ -2,16 +2,9 @@
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/elf-randomize.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
||||
EXPORT_SYMBOL(shm_align_mask);
|
||||
@ -120,6 +113,6 @@ int __virt_addr_valid(volatile void *kaddr)
|
||||
if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
|
||||
return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__virt_addr_valid);
|
||||
|
@ -24,6 +24,8 @@ static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
|
||||
return (struct vdso_pcpu_data *)(get_vdso_base() - VDSO_DATA_SIZE);
|
||||
}
|
||||
|
||||
extern
|
||||
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
|
||||
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
|
||||
{
|
||||
int cpu_id;
|
||||
|
@ -6,20 +6,23 @@
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
|
||||
int __vdso_clock_gettime(clockid_t clock,
|
||||
struct __kernel_timespec *ts)
|
||||
extern
|
||||
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
|
||||
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
|
||||
{
|
||||
return __cvdso_clock_gettime(clock, ts);
|
||||
}
|
||||
|
||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
|
||||
struct timezone *tz)
|
||||
extern
|
||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
|
||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
|
||||
{
|
||||
return __cvdso_gettimeofday(tv, tz);
|
||||
}
|
||||
|
||||
int __vdso_clock_getres(clockid_t clock_id,
|
||||
struct __kernel_timespec *res)
|
||||
extern
|
||||
int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
|
||||
int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
|
||||
{
|
||||
return __cvdso_clock_getres(clock_id, res);
|
||||
}
|
||||
|
@ -157,11 +157,8 @@ arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return (addr[nr >> 5] & (1UL << (nr & 31))) != 0;
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
static inline int bset_reg_test_and_set_bit(int nr,
|
||||
volatile unsigned long *vaddr)
|
||||
|
@ -50,7 +50,8 @@
|
||||
stw r13, PT_R13(sp)
|
||||
stw r14, PT_R14(sp)
|
||||
stw r15, PT_R15(sp)
|
||||
stw r2, PT_ORIG_R2(sp)
|
||||
movi r24, -1
|
||||
stw r24, PT_ORIG_R2(sp)
|
||||
stw r7, PT_ORIG_R7(sp)
|
||||
|
||||
stw ra, PT_RA(sp)
|
||||
|
@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *);
|
||||
((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\
|
||||
- 1)
|
||||
|
||||
#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1)
|
||||
|
||||
int do_syscall_trace_enter(void);
|
||||
void do_syscall_trace_exit(void);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -185,6 +185,7 @@ ENTRY(handle_system_call)
|
||||
ldw r5, PT_R5(sp)
|
||||
|
||||
local_restart:
|
||||
stw r2, PT_ORIG_R2(sp)
|
||||
/* Check that the requested system call is within limits */
|
||||
movui r1, __NR_syscalls
|
||||
bgeu r2, r1, ret_invsyscall
|
||||
@ -192,7 +193,6 @@ local_restart:
|
||||
movhi r11, %hiadj(sys_call_table)
|
||||
add r1, r1, r11
|
||||
ldw r1, %lo(sys_call_table)(r1)
|
||||
beq r1, r0, ret_invsyscall
|
||||
|
||||
/* Check if we are being traced */
|
||||
GET_THREAD_INFO r11
|
||||
@ -213,6 +213,9 @@ local_restart:
|
||||
translate_rc_and_ret:
|
||||
movi r1, 0
|
||||
bge r2, zero, 3f
|
||||
ldw r1, PT_ORIG_R2(sp)
|
||||
addi r1, r1, 1
|
||||
beq r1, zero, 3f
|
||||
sub r2, zero, r2
|
||||
movi r1, 1
|
||||
3:
|
||||
@ -255,9 +258,9 @@ traced_system_call:
|
||||
ldw r6, PT_R6(sp)
|
||||
ldw r7, PT_R7(sp)
|
||||
|
||||
/* Fetch the syscall function, we don't need to check the boundaries
|
||||
* since this is already done.
|
||||
*/
|
||||
/* Fetch the syscall function. */
|
||||
movui r1, __NR_syscalls
|
||||
bgeu r2, r1, traced_invsyscall
|
||||
slli r1, r2, 2
|
||||
movhi r11,%hiadj(sys_call_table)
|
||||
add r1, r1, r11
|
||||
@ -276,6 +279,9 @@ traced_system_call:
|
||||
translate_rc_and_ret2:
|
||||
movi r1, 0
|
||||
bge r2, zero, 4f
|
||||
ldw r1, PT_ORIG_R2(sp)
|
||||
addi r1, r1, 1
|
||||
beq r1, zero, 4f
|
||||
sub r2, zero, r2
|
||||
movi r1, 1
|
||||
4:
|
||||
@ -287,6 +293,11 @@ end_translate_rc_and_ret2:
|
||||
RESTORE_SWITCH_STACK
|
||||
br ret_from_exception
|
||||
|
||||
/* If the syscall number was invalid return ENOSYS */
|
||||
traced_invsyscall:
|
||||
movi r2, -ENOSYS
|
||||
br translate_rc_and_ret2
|
||||
|
||||
Luser_return:
|
||||
GET_THREAD_INFO r11 /* get thread_info pointer */
|
||||
ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
|
||||
@ -336,9 +347,6 @@ external_interrupt:
|
||||
/* skip if no interrupt is pending */
|
||||
beq r12, r0, ret_from_interrupt
|
||||
|
||||
movi r24, -1
|
||||
stw r24, PT_ORIG_R2(sp)
|
||||
|
||||
/*
|
||||
* Process an external hardware interrupt.
|
||||
*/
|
||||
|
@ -242,7 +242,7 @@ static int do_signal(struct pt_regs *regs)
|
||||
/*
|
||||
* If we were from a system call, check for system call restarting...
|
||||
*/
|
||||
if (regs->orig_r2 >= 0) {
|
||||
if (regs->orig_r2 >= 0 && regs->r1) {
|
||||
continue_addr = regs->ea;
|
||||
restart_addr = continue_addr - 4;
|
||||
retval = regs->r2;
|
||||
@ -264,6 +264,7 @@ static int do_signal(struct pt_regs *regs)
|
||||
regs->ea = restart_addr;
|
||||
break;
|
||||
}
|
||||
regs->orig_r2 = -1;
|
||||
}
|
||||
|
||||
if (get_signal(&ksig)) {
|
||||
|
@ -13,5 +13,6 @@
|
||||
#define __SYSCALL(nr, call) [nr] = (call),
|
||||
|
||||
void *sys_call_table[__NR_syscalls] = {
|
||||
[0 ... __NR_syscalls-1] = sys_ni_syscall,
|
||||
#include <asm/unistd.h>
|
||||
};
|
||||
|
@ -146,10 +146,10 @@ menu "Processor type and features"
|
||||
|
||||
choice
|
||||
prompt "Processor type"
|
||||
default PA7000
|
||||
default PA7000 if "$(ARCH)" = "parisc"
|
||||
|
||||
config PA7000
|
||||
bool "PA7000/PA7100"
|
||||
bool "PA7000/PA7100" if "$(ARCH)" = "parisc"
|
||||
help
|
||||
This is the processor type of your CPU. This information is
|
||||
used for optimizing purposes. In order to compile a kernel
|
||||
@ -160,21 +160,21 @@ config PA7000
|
||||
which is required on some machines.
|
||||
|
||||
config PA7100LC
|
||||
bool "PA7100LC"
|
||||
bool "PA7100LC" if "$(ARCH)" = "parisc"
|
||||
help
|
||||
Select this option for the PCX-L processor, as used in the
|
||||
712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748,
|
||||
D200, D210, D300, D310 and E-class
|
||||
|
||||
config PA7200
|
||||
bool "PA7200"
|
||||
bool "PA7200" if "$(ARCH)" = "parisc"
|
||||
help
|
||||
Select this option for the PCX-T' processor, as used in the
|
||||
C100, C110, J100, J110, J210XC, D250, D260, D350, D360,
|
||||
K100, K200, K210, K220, K400, K410 and K420
|
||||
|
||||
config PA7300LC
|
||||
bool "PA7300LC"
|
||||
bool "PA7300LC" if "$(ARCH)" = "parisc"
|
||||
help
|
||||
Select this option for the PCX-L2 processor, as used in the
|
||||
744, A180, B132L, B160L, B180L, C132L, C160L, C180L,
|
||||
@ -224,17 +224,8 @@ config MLONGCALLS
|
||||
Enabling this option will probably slow down your kernel.
|
||||
|
||||
config 64BIT
|
||||
bool "64-bit kernel"
|
||||
def_bool "$(ARCH)" = "parisc64"
|
||||
depends on PA8X00
|
||||
help
|
||||
Enable this if you want to support 64bit kernel on PA-RISC platform.
|
||||
|
||||
At the moment, only people willing to use more than 2GB of RAM,
|
||||
or having a 64bit-only capable PA-RISC machine should say Y here.
|
||||
|
||||
Since there is no 64bit userland on PA-RISC, there is no point to
|
||||
enable this option otherwise. The 64bit kernel is significantly bigger
|
||||
and slower than the 32bit one.
|
||||
|
||||
choice
|
||||
prompt "Kernel page size"
|
||||
|
@ -12,14 +12,6 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
/* compiler build environment sanity checks: */
|
||||
#if !defined(CONFIG_64BIT) && defined(__LP64__)
|
||||
#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
|
||||
#endif
|
||||
#if defined(CONFIG_64BIT) && !defined(__LP64__)
|
||||
#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
|
||||
#endif
|
||||
|
||||
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
|
||||
* on use of volatile and __*_bit() (set/clear/change):
|
||||
* *_bit() want use of volatile.
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
.level PA_ASM_LEVEL
|
||||
.level 1.1
|
||||
|
||||
__INITDATA
|
||||
ENTRY(boot_args)
|
||||
@ -70,6 +70,47 @@ $bss_loop:
|
||||
stw,ma %arg2,4(%r1)
|
||||
stw,ma %arg3,4(%r1)
|
||||
|
||||
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
|
||||
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
|
||||
* and halt kernel if we detect a PA1.x CPU. */
|
||||
ldi 32,%r10
|
||||
mtctl %r10,%cr11
|
||||
.level 2.0
|
||||
mfctl,w %cr11,%r10
|
||||
.level 1.1
|
||||
comib,<>,n 0,%r10,$cpu_ok
|
||||
|
||||
load32 PA(msg1),%arg0
|
||||
ldi msg1_end-msg1,%arg1
|
||||
$iodc_panic:
|
||||
copy %arg0, %r10
|
||||
copy %arg1, %r11
|
||||
load32 PA(init_stack),%sp
|
||||
#define MEM_CONS 0x3A0
|
||||
ldw MEM_CONS+32(%r0),%arg0 // HPA
|
||||
ldi ENTRY_IO_COUT,%arg1
|
||||
ldw MEM_CONS+36(%r0),%arg2 // SPA
|
||||
ldw MEM_CONS+8(%r0),%arg3 // layers
|
||||
load32 PA(__bss_start),%r1
|
||||
stw %r1,-52(%sp) // arg4
|
||||
stw %r0,-56(%sp) // arg5
|
||||
stw %r10,-60(%sp) // arg6 = ptr to text
|
||||
stw %r11,-64(%sp) // arg7 = len
|
||||
stw %r0,-68(%sp) // arg8
|
||||
load32 PA(.iodc_panic_ret), %rp
|
||||
ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
|
||||
bv,n (%r1)
|
||||
.iodc_panic_ret:
|
||||
b . /* wait endless with ... */
|
||||
or %r10,%r10,%r10 /* qemu idle sleep */
|
||||
msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
|
||||
msg1_end:
|
||||
|
||||
$cpu_ok:
|
||||
#endif
|
||||
|
||||
.level PA_ASM_LEVEL
|
||||
|
||||
/* Initialize startup VM. Just map first 16/32 MB of memory */
|
||||
load32 PA(swapper_pg_dir),%r4
|
||||
mtctl %r4,%cr24 /* Initialize kernel root pointer */
|
||||
|
@ -93,7 +93,7 @@
|
||||
#define R1(i) (((i)>>21)&0x1f)
|
||||
#define R2(i) (((i)>>16)&0x1f)
|
||||
#define R3(i) ((i)&0x1f)
|
||||
#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
|
||||
#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
|
||||
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
|
||||
#define IM5_2(i) IM((i)>>16,5)
|
||||
#define IM5_3(i) IM((i),5)
|
||||
|
@ -68,10 +68,6 @@ void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
|
||||
pci_dma_ops = dma_ops;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function should run under locking protection, specifically
|
||||
* hose_spinlock.
|
||||
*/
|
||||
static int get_phb_number(struct device_node *dn)
|
||||
{
|
||||
int ret, phb_id = -1;
|
||||
@ -108,15 +104,20 @@ static int get_phb_number(struct device_node *dn)
|
||||
if (!ret)
|
||||
phb_id = (int)(prop & (MAX_PHBS - 1));
|
||||
|
||||
spin_lock(&hose_spinlock);
|
||||
|
||||
/* We need to be sure to not use the same PHB number twice. */
|
||||
if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
|
||||
return phb_id;
|
||||
goto out_unlock;
|
||||
|
||||
/* If everything fails then fallback to dynamic PHB numbering. */
|
||||
phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
|
||||
BUG_ON(phb_id >= MAX_PHBS);
|
||||
set_bit(phb_id, phb_bitmap);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&hose_spinlock);
|
||||
|
||||
return phb_id;
|
||||
}
|
||||
|
||||
@ -127,10 +128,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
|
||||
phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
|
||||
if (phb == NULL)
|
||||
return NULL;
|
||||
spin_lock(&hose_spinlock);
|
||||
|
||||
phb->global_number = get_phb_number(dev);
|
||||
|
||||
spin_lock(&hose_spinlock);
|
||||
list_add_tail(&phb->list_node, &hose_list);
|
||||
spin_unlock(&hose_spinlock);
|
||||
|
||||
phb->dn = dev;
|
||||
phb->is_dynamic = slab_is_available();
|
||||
#ifdef CONFIG_PPC64
|
||||
|
@ -84,12 +84,10 @@
|
||||
|
||||
phy1: ethernet-phy@9 {
|
||||
reg = <9>;
|
||||
ti,fifo-depth = <0x1>;
|
||||
};
|
||||
|
||||
phy0: ethernet-phy@8 {
|
||||
reg = <8>;
|
||||
ti,fifo-depth = <0x1>;
|
||||
};
|
||||
};
|
||||
|
||||
@ -102,7 +100,6 @@
|
||||
disable-wp;
|
||||
cap-sd-highspeed;
|
||||
cap-mmc-highspeed;
|
||||
card-detect-delay = <200>;
|
||||
mmc-ddr-1_8v;
|
||||
mmc-hs200-1_8v;
|
||||
sd-uhs-sdr12;
|
||||
|
@ -54,12 +54,10 @@
|
||||
|
||||
phy1: ethernet-phy@5 {
|
||||
reg = <5>;
|
||||
ti,fifo-depth = <0x01>;
|
||||
};
|
||||
|
||||
phy0: ethernet-phy@4 {
|
||||
reg = <4>;
|
||||
ti,fifo-depth = <0x01>;
|
||||
};
|
||||
};
|
||||
|
||||
@ -72,7 +70,6 @@
|
||||
disable-wp;
|
||||
cap-sd-highspeed;
|
||||
cap-mmc-highspeed;
|
||||
card-detect-delay = <200>;
|
||||
mmc-ddr-1_8v;
|
||||
mmc-hs200-1_8v;
|
||||
sd-uhs-sdr12;
|
||||
|
@ -193,7 +193,7 @@
|
||||
cache-size = <2097152>;
|
||||
cache-unified;
|
||||
interrupt-parent = <&plic>;
|
||||
interrupts = <1>, <2>, <3>;
|
||||
interrupts = <1>, <3>, <4>, <2>;
|
||||
};
|
||||
|
||||
clint: clint@2000000 {
|
||||
@ -485,9 +485,8 @@
|
||||
ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
|
||||
msi-parent = <&pcie>;
|
||||
msi-controller;
|
||||
microchip,axi-m-atr0 = <0x10 0x0>;
|
||||
status = "disabled";
|
||||
pcie_intc: legacy-interrupt-controller {
|
||||
pcie_intc: interrupt-controller {
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-controller;
|
||||
|
12
arch/riscv/include/asm/signal.h
Normal file
12
arch/riscv/include/asm/signal.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_SIGNAL_H
|
||||
#define __ASM_SIGNAL_H
|
||||
|
||||
#include <uapi/asm/signal.h>
|
||||
#include <uapi/asm/ptrace.h>
|
||||
|
||||
asmlinkage __visible
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
|
||||
|
||||
#endif
|
@ -42,6 +42,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
|
@ -28,7 +28,7 @@ unsigned long elf_hwcap __read_mostly;
|
||||
/* Host ISA bitmap */
|
||||
static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
|
||||
|
||||
__ro_after_init DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
|
||||
DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
|
||||
EXPORT_SYMBOL(riscv_isa_ext_keys);
|
||||
|
||||
/**
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/signal32.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/csr.h>
|
||||
|
@ -20,9 +20,10 @@
|
||||
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
int show_unhandled_signals = 1;
|
||||
|
||||
|
@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
|
||||
int rc;
|
||||
|
||||
if (diag204_probe()) {
|
||||
pr_err("The hardware system does not support hypfs\n");
|
||||
pr_info("The hardware system does not support hypfs\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
|
||||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
fail_dbfs_exit:
|
||||
hypfs_dbfs_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
return rc;
|
||||
}
|
||||
device_initcall(hypfs_init)
|
||||
|
@ -176,14 +176,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
const volatile unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
return *p & mask;
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
static inline bool arch_test_and_set_bit_lock(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
|
@ -1038,16 +1038,11 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
|
||||
#define __KVM_HAVE_ARCH_VM_FREE
|
||||
void kvm_arch_free_vm(struct kvm *kvm);
|
||||
|
||||
#ifdef CONFIG_VFIO_PCI_ZDEV_KVM
|
||||
int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm);
|
||||
void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev);
|
||||
#else
|
||||
static inline int kvm_s390_pci_register_kvm(struct zpci_dev *dev,
|
||||
struct kvm *kvm)
|
||||
{
|
||||
return -EPERM;
|
||||
}
|
||||
static inline void kvm_s390_pci_unregister_kvm(struct zpci_dev *dev) {}
|
||||
#endif
|
||||
struct zpci_kvm_hook {
|
||||
int (*kvm_register)(void *opaque, struct kvm *kvm);
|
||||
void (*kvm_unregister)(void *opaque);
|
||||
};
|
||||
|
||||
extern struct zpci_kvm_hook zpci_kvm_hook;
|
||||
|
||||
#endif
|
||||
|
@ -91,6 +91,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
|
||||
memcpy(dst, src, arch_task_struct_size);
|
||||
dst->thread.fpu.regs = dst->thread.fpu.fprs;
|
||||
|
||||
/*
|
||||
* Don't transfer over the runtime instrumentation or the guarded
|
||||
* storage control block pointers. These fields are cleared here instead
|
||||
* of in copy_thread() to avoid premature freeing of associated memory
|
||||
* on fork() failure. Wait to clear the RI flag because ->stack still
|
||||
* refers to the source thread.
|
||||
*/
|
||||
dst->thread.ri_cb = NULL;
|
||||
dst->thread.gs_cb = NULL;
|
||||
dst->thread.gs_bc_cb = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -150,13 +162,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
frame->childregs.flags = 0;
|
||||
if (new_stackp)
|
||||
frame->childregs.gprs[15] = new_stackp;
|
||||
|
||||
/* Don't copy runtime instrumentation info */
|
||||
p->thread.ri_cb = NULL;
|
||||
/*
|
||||
* Clear the runtime instrumentation flag after the above childregs
|
||||
* copy. The CB pointer was already cleared in arch_dup_task_struct().
|
||||
*/
|
||||
frame->childregs.psw.mask &= ~PSW_MASK_RI;
|
||||
/* Don't copy guarded storage control block */
|
||||
p->thread.gs_cb = NULL;
|
||||
p->thread.gs_bc_cb = NULL;
|
||||
|
||||
/* Set a new TLS ? */
|
||||
if (clone_flags & CLONE_SETTLS) {
|
||||
|
@ -431,8 +431,9 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
|
||||
* available, enable them and let userspace indicate whether or not they will
|
||||
* be used (specify SHM bit to disable).
|
||||
*/
|
||||
int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm)
|
||||
static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm)
|
||||
{
|
||||
struct zpci_dev *zdev = opaque;
|
||||
int rc;
|
||||
|
||||
if (!zdev)
|
||||
@ -510,10 +511,10 @@ err:
|
||||
kvm_put_kvm(kvm);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_s390_pci_register_kvm);
|
||||
|
||||
void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev)
|
||||
static void kvm_s390_pci_unregister_kvm(void *opaque)
|
||||
{
|
||||
struct zpci_dev *zdev = opaque;
|
||||
struct kvm *kvm;
|
||||
|
||||
if (!zdev)
|
||||
@ -566,7 +567,6 @@ out:
|
||||
|
||||
kvm_put_kvm(kvm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_s390_pci_unregister_kvm);
|
||||
|
||||
void kvm_s390_pci_init_list(struct kvm *kvm)
|
||||
{
|
||||
@ -678,6 +678,8 @@ int kvm_s390_pci_init(void)
|
||||
|
||||
spin_lock_init(&aift->gait_lock);
|
||||
mutex_init(&aift->aift_lock);
|
||||
zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
|
||||
zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -685,6 +687,8 @@ int kvm_s390_pci_init(void)
|
||||
void kvm_s390_pci_exit(void)
|
||||
{
|
||||
mutex_destroy(&aift->aift_lock);
|
||||
zpci_kvm_hook.kvm_register = NULL;
|
||||
zpci_kvm_hook.kvm_unregister = NULL;
|
||||
|
||||
kfree(aift);
|
||||
}
|
||||
|
@ -379,7 +379,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
flags = FAULT_FLAG_DEFAULT;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (access == VM_WRITE || is_write)
|
||||
if (is_write)
|
||||
access = VM_WRITE;
|
||||
if (access == VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
mmap_read_lock(mm);
|
||||
|
||||
|
@ -5,5 +5,5 @@
|
||||
|
||||
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
|
||||
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
|
||||
pci_bus.o
|
||||
pci_bus.o pci_kvm_hook.o
|
||||
obj-$(CONFIG_PCI_IOV) += pci_iov.o
|
||||
|
11
arch/s390/pci/pci_kvm_hook.c
Normal file
11
arch/s390/pci/pci_kvm_hook.c
Normal file
@ -0,0 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* VFIO ZPCI devices support
|
||||
*
|
||||
* Copyright (C) IBM Corp. 2022. All rights reserved.
|
||||
* Author(s): Pierre Morel <pmorel@linux.ibm.com>
|
||||
*/
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
struct zpci_kvm_hook zpci_kvm_hook;
|
||||
EXPORT_SYMBOL_GPL(zpci_kvm_hook);
|
@ -135,16 +135,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
|
@ -1011,7 +1011,7 @@ error_kzalloc:
|
||||
|
||||
static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[], vq_callback_t *callbacks[],
|
||||
const char * const names[], u32 sizes[], const bool *ctx,
|
||||
const char * const names[], const bool *ctx,
|
||||
struct irq_affinity *desc)
|
||||
{
|
||||
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
|
||||
|
@ -65,20 +65,6 @@ extern void setup_clear_cpu_cap(unsigned int bit);
|
||||
|
||||
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
|
||||
|
||||
#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
|
||||
/*
|
||||
* Workaround for the sake of BPF compilation which utilizes kernel
|
||||
* headers, but clang does not support ASM GOTO and fails the build.
|
||||
*/
|
||||
#ifndef __BPF_TRACING__
|
||||
#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
|
||||
#endif
|
||||
|
||||
#define static_cpu_has(bit) boot_cpu_has(bit)
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Static testing of CPU features. Used the same as boot_cpu_has(). It
|
||||
* statically patches the target code for additional performance. Use
|
||||
@ -137,7 +123,6 @@ t_no:
|
||||
boot_cpu_has(bit) : \
|
||||
_static_cpu_has(bit) \
|
||||
)
|
||||
#endif
|
||||
|
||||
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
|
||||
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
|
||||
|
@ -286,10 +286,6 @@ vdso_install:
|
||||
|
||||
archprepare: checkbin
|
||||
checkbin:
|
||||
ifndef CONFIG_CC_HAS_ASM_GOTO
|
||||
@echo Compiler lacks asm-goto support.
|
||||
@exit 1
|
||||
endif
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifeq ($(RETPOLINE_CFLAGS),)
|
||||
@echo "You are building kernel with non-retpoline compiler." >&2
|
||||
|
@ -132,7 +132,17 @@ void snp_set_page_private(unsigned long paddr);
|
||||
void snp_set_page_shared(unsigned long paddr);
|
||||
void sev_prep_identity_maps(unsigned long top_level_pgt);
|
||||
#else
|
||||
static inline void sev_enable(struct boot_params *bp) { }
|
||||
static inline void sev_enable(struct boot_params *bp)
|
||||
{
|
||||
/*
|
||||
* bp->cc_blob_address should only be set by boot/compressed kernel.
|
||||
* Initialize it to 0 unconditionally (thus here in this stub too) to
|
||||
* ensure that uninitialized values from buggy bootloaders aren't
|
||||
* propagated.
|
||||
*/
|
||||
if (bp)
|
||||
bp->cc_blob_address = 0;
|
||||
}
|
||||
static inline void sev_es_shutdown_ghcb(void) { }
|
||||
static inline bool sev_es_check_ghcb_fault(unsigned long address)
|
||||
{
|
||||
|
@ -276,6 +276,14 @@ void sev_enable(struct boot_params *bp)
|
||||
struct msr m;
|
||||
bool snp;
|
||||
|
||||
/*
|
||||
* bp->cc_blob_address should only be set by boot/compressed kernel.
|
||||
* Initialize it to 0 to ensure that uninitialized values from
|
||||
* buggy bootloaders aren't propagated.
|
||||
*/
|
||||
if (bp)
|
||||
bp->cc_blob_address = 0;
|
||||
|
||||
/*
|
||||
* Setup/preliminary detection of SNP. This will be sanity-checked
|
||||
* against CPUID/MSR values later.
|
||||
|
@ -14,7 +14,6 @@ CONFIG_CPU_FREQ=y
|
||||
|
||||
# x86 xen specific config options
|
||||
CONFIG_XEN_PVH=y
|
||||
CONFIG_XEN_MAX_DOMAIN_MEMORY=500
|
||||
CONFIG_XEN_SAVE_RESTORE=y
|
||||
# CONFIG_XEN_DEBUG_FS is not set
|
||||
CONFIG_XEN_MCE_LOG=y
|
||||
|
@ -311,7 +311,7 @@ SYM_CODE_START(entry_INT80_compat)
|
||||
* Interrupts are off on entry.
|
||||
*/
|
||||
ASM_CLAC /* Do this early to minimize exposure */
|
||||
SWAPGS
|
||||
ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
|
||||
|
||||
/*
|
||||
* User tracing code (ptrace or signal handlers) might assume that
|
||||
|
@ -6292,10 +6292,8 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.pebs_aliases = NULL;
|
||||
x86_pmu.pebs_prec_dist = true;
|
||||
x86_pmu.pebs_block = true;
|
||||
x86_pmu.pebs_capable = ~0ULL;
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
|
||||
x86_pmu.flags |= PMU_FL_PEBS_ALL;
|
||||
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
|
||||
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
|
||||
|
||||
@ -6338,10 +6336,8 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.pebs_aliases = NULL;
|
||||
x86_pmu.pebs_prec_dist = true;
|
||||
x86_pmu.pebs_block = true;
|
||||
x86_pmu.pebs_capable = ~0ULL;
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
|
||||
x86_pmu.flags |= PMU_FL_PEBS_ALL;
|
||||
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
|
||||
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
|
||||
x86_pmu.lbr_pt_coexist = true;
|
||||
|
@ -291,6 +291,7 @@ static u64 load_latency_data(struct perf_event *event, u64 status)
|
||||
static u64 store_latency_data(struct perf_event *event, u64 status)
|
||||
{
|
||||
union intel_x86_pebs_dse dse;
|
||||
union perf_mem_data_src src;
|
||||
u64 val;
|
||||
|
||||
dse.val = status;
|
||||
@ -304,7 +305,14 @@ static u64 store_latency_data(struct perf_event *event, u64 status)
|
||||
|
||||
val |= P(BLK, NA);
|
||||
|
||||
return val;
|
||||
/*
|
||||
* the pebs_data_source table is only for loads
|
||||
* so override the mem_op to say STORE instead
|
||||
*/
|
||||
src.val = val;
|
||||
src.mem_op = P(OP,STORE);
|
||||
|
||||
return src.val;
|
||||
}
|
||||
|
||||
struct pebs_record_core {
|
||||
@ -822,7 +830,7 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
|
||||
|
||||
struct event_constraint intel_grt_pebs_event_constraints[] = {
|
||||
/* Allow all events as PEBS with no flags */
|
||||
INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0xf),
|
||||
INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3),
|
||||
INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
@ -2262,6 +2270,7 @@ void __init intel_ds_init(void)
|
||||
PERF_SAMPLE_BRANCH_STACK |
|
||||
PERF_SAMPLE_TIME;
|
||||
x86_pmu.flags |= PMU_FL_PEBS_ALL;
|
||||
x86_pmu.pebs_capable = ~0ULL;
|
||||
pebs_qual = "-baseline";
|
||||
x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
|
||||
} else {
|
||||
|
@ -1097,6 +1097,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
|
||||
reg->config = mask;
|
||||
|
||||
/*
|
||||
* The Arch LBR HW can retrieve the common branch types
|
||||
* from the LBR_INFO. It doesn't require the high overhead
|
||||
* SW disassemble.
|
||||
* Enable the branch type by default for the Arch LBR.
|
||||
*/
|
||||
reg->reg |= X86_BR_TYPE_SAVE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -841,6 +841,22 @@ int snb_pci2phy_map_init(int devid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
/*
|
||||
* SNB IMC counters are 32-bit and are laid out back to back
|
||||
* in MMIO space. Therefore we must use a 32-bit accessor function
|
||||
* using readq() from uncore_mmio_read_counter() causes problems
|
||||
* because it is reading 64-bit at a time. This is okay for the
|
||||
* uncore_perf_event_update() function because it drops the upper
|
||||
* 32-bits but not okay for plain uncore_read_counter() as invoked
|
||||
* in uncore_pmu_event_start().
|
||||
*/
|
||||
return (u64)readl(box->io_addr + hwc->event_base);
|
||||
}
|
||||
|
||||
static struct pmu snb_uncore_imc_pmu = {
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.event_init = snb_uncore_imc_event_init,
|
||||
@ -860,7 +876,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
|
||||
.disable_event = snb_uncore_imc_disable_event,
|
||||
.enable_event = snb_uncore_imc_enable_event,
|
||||
.hw_config = snb_uncore_imc_hw_config,
|
||||
.read_counter = uncore_mmio_read_counter,
|
||||
.read_counter = snb_uncore_imc_read_counter,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snb_uncore_imc = {
|
||||
|
@ -207,6 +207,20 @@ static __always_inline bool constant_test_bit(long nr, const volatile unsigned l
|
||||
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
|
||||
}
|
||||
|
||||
static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
asm volatile("testb %2,%1"
|
||||
CC_SET(nz)
|
||||
: CC_OUT(nz) (oldbit)
|
||||
: "m" (((unsigned char *)addr)[nr >> 3]),
|
||||
"i" (1 << (nr & 7))
|
||||
:"memory");
|
||||
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
@ -226,6 +240,13 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
variable_test_bit(nr, addr);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
|
||||
variable_test_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs - find first set bit in word
|
||||
* @word: The word to search
|
||||
|
@ -155,20 +155,6 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
|
||||
|
||||
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
|
||||
|
||||
#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
|
||||
/*
|
||||
* Workaround for the sake of BPF compilation which utilizes kernel
|
||||
* headers, but clang does not support ASM GOTO and fails the build.
|
||||
*/
|
||||
#ifndef __BPF_TRACING__
|
||||
#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
|
||||
#endif
|
||||
|
||||
#define static_cpu_has(bit) boot_cpu_has(bit)
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Static testing of CPU features. Used the same as boot_cpu_has(). It
|
||||
* statically patches the target code for additional performance. Use
|
||||
@ -208,7 +194,6 @@ t_no:
|
||||
boot_cpu_has(bit) : \
|
||||
_static_cpu_has(bit) \
|
||||
)
|
||||
#endif
|
||||
|
||||
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
|
||||
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
|
||||
|
@ -457,7 +457,8 @@
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
|
||||
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -64,4 +64,6 @@
|
||||
#define EX_TYPE_UCOPY_LEN4 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(4))
|
||||
#define EX_TYPE_UCOPY_LEN8 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(8))
|
||||
|
||||
#define EX_TYPE_ZEROPAD 20 /* longword load with zeropad on fault */
|
||||
|
||||
#endif
|
||||
|
@ -27,6 +27,7 @@
|
||||
* _X - regular server parts
|
||||
* _D - micro server parts
|
||||
* _N,_P - other mobile parts
|
||||
* _S - other client parts
|
||||
*
|
||||
* Historical OPTDIFFs:
|
||||
*
|
||||
@ -112,6 +113,7 @@
|
||||
|
||||
#define INTEL_FAM6_RAPTORLAKE 0xB7
|
||||
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
|
||||
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
|
@ -35,34 +35,57 @@
|
||||
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
||||
|
||||
/*
|
||||
* Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
|
||||
*/
|
||||
#define __FILL_RETURN_SLOT \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 772f; \
|
||||
int3; \
|
||||
772:
|
||||
|
||||
/*
|
||||
* Stuff the entire RSB.
|
||||
*
|
||||
* Google experimented with loop-unrolling and this turned out to be
|
||||
* the optimal version - two calls, each with their own speculation
|
||||
* trap should their return address end up getting used, in a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __FILL_RETURN_BUFFER(reg, nr) \
|
||||
mov $(nr/2), reg; \
|
||||
771: \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 772f; \
|
||||
773: /* speculation trap */ \
|
||||
UNWIND_HINT_EMPTY; \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 773b; \
|
||||
772: \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 774f; \
|
||||
775: /* speculation trap */ \
|
||||
UNWIND_HINT_EMPTY; \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 775b; \
|
||||
774: \
|
||||
add $(BITS_PER_LONG/8) * 2, sp; \
|
||||
__FILL_RETURN_SLOT \
|
||||
__FILL_RETURN_SLOT \
|
||||
add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
|
||||
dec reg; \
|
||||
jnz 771b; \
|
||||
/* barrier for jnz misprediction */ \
|
||||
lfence;
|
||||
#else
|
||||
/*
|
||||
* i386 doesn't unconditionally have LFENCE, as such it can't
|
||||
* do a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr) \
|
||||
.rept nr; \
|
||||
__FILL_RETURN_SLOT; \
|
||||
.endr; \
|
||||
add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Stuff a single RSB slot.
|
||||
*
|
||||
* To mitigate Post-Barrier RSB speculation, one CALL instruction must be
|
||||
* forced to retire before letting a RET instruction execute.
|
||||
*
|
||||
* On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
|
||||
* before this point.
|
||||
*/
|
||||
#define __FILL_ONE_RETURN \
|
||||
__FILL_RETURN_SLOT \
|
||||
add $(BITS_PER_LONG/8), %_ASM_SP; \
|
||||
lfence;
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
@ -132,28 +155,15 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro ISSUE_UNBALANCED_RET_GUARD
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
call .Lunbalanced_ret_guard_\@
|
||||
int3
|
||||
.Lunbalanced_ret_guard_\@:
|
||||
add $(BITS_PER_LONG/8), %_ASM_SP
|
||||
lfence
|
||||
.endm
|
||||
|
||||
/*
|
||||
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
|
||||
* monstrosity above, manually.
|
||||
*/
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
|
||||
.ifb \ftr2
|
||||
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
|
||||
.else
|
||||
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
|
||||
.endif
|
||||
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
|
||||
.Lunbalanced_\@:
|
||||
ISSUE_UNBALANCED_RET_GUARD
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
|
||||
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
|
||||
__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
|
||||
__stringify(__FILL_ONE_RETURN), \ftr2
|
||||
|
||||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user