mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
regulator: mt6358: Remove bogus regulators and
Merge series from Chen-Yu Tsai <wenst@chromium.org>: Hi, This is v3 of the remainder of the MT6358 regulator driver cleanup and improvement series. v1 can be found here [1]; v2 is here [2]. Changes since v2: - Merged patches dropped - Fixed up pickable linear ranges' selector values - Collected tags - Patch adding missing regulator definitions squashed into patch using the definitions; recommended by Krzysztof on my MT6366 series. - Remaining dts patch split out to be sent separately Changes since v1: - Merged patches dropped - Added patch to move VCN33 regulator status sync after ID check - Added patch to fix VCN33 sync fail error message - Added patch to add missing register definitions Various discrepancies were found while preparing to upstream MT8186 device trees, which utilize the MT6366 PMIC, that is also covered by this driver. Patches 1~3 should go through the regulator tree, and patch 4 through the soc/mediatek tree. ** Note: patch 2 needs an ack from Lee for the mfd header change. This v3 series can be seen as two parts. v1 had three parts, but one part was fully merged, and then v2 gained another cleanup. v3 drops the "fixing bogus regulators" part: driver changes are fully merged and device tree change will be sent separately. Part 1 - Robust chip ID checking (patch 1) Angelo suggested making the driver fail to probe if an unexpected chip ID was found. Patch 1 implements this. Part 2 - Output voltage fine tuning support (patches 2, 3) Many of the LDOs on these PMIC support an extra level of output voltage fine tuning. Most default to no offset, but a couple have a non-zero offset by default. Previously this was unaccounted for in the driver and device tree constraints. On the outputs with non-zero offset, this ends up becoming a discrepancy between the device tree and actual hardware. These two patches adds support for this second level of tuning, modeled as bunch of linear ranges. While it's unlikely we need this level of control, it's nice to be able to read back the accurate hardware settings. Please have a look. Thanks ChenYu [1] https://lore.kernel.org/linux-arm-kernel/20230609083009.2822259-1-wenst@chromium.org/ [2] https://lore.kernel.org/linux-mediatek/20230721082903.2038975-1-wenst@chromium.org/ Chen-Yu Tsai (3): regulator: mt6358: Fail probe on unknown chip ID regulator: mt6358: Add output voltage fine tuning to fixed regulators regulator: mt6358: Add output voltage fine tuning to variable LDOs drivers/regulator/mt6358-regulator.c | 304 ++++++++++++--------------- include/linux/mfd/mt6358/registers.h | 6 + 2 files changed, 144 insertions(+), 166 deletions(-) -- 2.42.0.283.g2d96d420d3-goog
This commit is contained in:
commit
4a710a0b0c
@ -175,6 +175,8 @@ infrastructure:
|
||||
+------------------------------+---------+---------+
|
||||
| Name | bits | visible |
|
||||
+------------------------------+---------+---------+
|
||||
| SME | [27-24] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| MTE | [11-8] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| SSBS | [7-4] | y |
|
||||
@ -288,8 +290,18 @@ infrastructure:
|
||||
+------------------------------+---------+---------+
|
||||
| Name | bits | visible |
|
||||
+------------------------------+---------+---------+
|
||||
| CSSC | [55-52] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| RPRFM | [51-48] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| BC | [23-20] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| MOPS | [19-16] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| APA3 | [15-12] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| GPA3 | [11-8] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| RPRES | [7-4] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| WFXT | [3-0] | y |
|
||||
|
@ -305,6 +305,9 @@ HWCAP2_SMEF16F16
|
||||
HWCAP2_MOPS
|
||||
Functionality implied by ID_AA64ISAR2_EL1.MOPS == 0b0001.
|
||||
|
||||
HWCAP2_HBC
|
||||
Functionality implied by ID_AA64ISAR2_EL1.BC == 0b0001.
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
-----------------------
|
||||
|
||||
|
@ -381,9 +381,9 @@ Documentation of LoongArch ISA:
|
||||
|
||||
Documentation of LoongArch ELF psABI:
|
||||
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-CN.pdf (in Chinese)
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-CN.pdf (in Chinese)
|
||||
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-EN.pdf (in English)
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-EN.pdf (in English)
|
||||
|
||||
Linux kernel repository of Loongson and LoongArch:
|
||||
|
||||
|
@ -37,7 +37,6 @@ For more information please refer to the documentation site or wiki
|
||||
|
||||
https://btrfs.readthedocs.io
|
||||
|
||||
https://btrfs.wiki.kernel.org
|
||||
|
||||
that maintains information about administration tasks, frequently asked
|
||||
questions, use cases, mount options, comprehensible changelogs, features,
|
||||
|
@ -7,9 +7,9 @@ AX.25
|
||||
To use the amateur radio protocols within Linux you will need to get a
|
||||
suitable copy of the AX.25 Utilities. More detailed information about
|
||||
AX.25, NET/ROM and ROSE, associated programs and utilities can be
|
||||
found on http://www.linux-ax25.org.
|
||||
found on https://linux-ax25.in-berlin.de.
|
||||
|
||||
There is an active mailing list for discussing Linux amateur radio matters
|
||||
There is a mailing list for discussing Linux amateur radio matters
|
||||
called linux-hams@vger.kernel.org. To subscribe to it, send a message to
|
||||
majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
|
||||
of the message, the subject field is ignored. You don't need to be
|
||||
|
@ -251,6 +251,7 @@ an involved disclosed party. The current ambassadors list:
|
||||
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
Intel Tony Luck <tony.luck@intel.com>
|
||||
Qualcomm Trilok Soni <tsoni@codeaurora.org>
|
||||
RISC-V Palmer Dabbelt <palmer@dabbelt.com>
|
||||
Samsung Javier González <javier.gonz@samsung.com>
|
||||
|
||||
Microsoft James Morris <jamorris@linux.microsoft.com>
|
||||
|
@ -74,8 +74,8 @@ topology based on those information. When the device is older and
|
||||
doesn't respond to the new UMP inquiries, the driver falls back and
|
||||
builds the topology based on Group Terminal Block (GTB) information
|
||||
from the USB descriptor. Some device might be screwed up by the
|
||||
unexpected UMP command; in such a case, pass `midi2_probe=0` option to
|
||||
snd-usb-audio driver for skipping the UMP v1.1 inquiries.
|
||||
unexpected UMP command; in such a case, pass `midi2_ump_probe=0`
|
||||
option to snd-usb-audio driver for skipping the UMP v1.1 inquiries.
|
||||
|
||||
When the MIDI 2.0 device is probed, the kernel creates a rawmidi
|
||||
device for each UMP Endpoint of the device. Its device name is
|
||||
|
@ -344,9 +344,9 @@ LoongArch指令集架构的文档:
|
||||
|
||||
LoongArch的ELF psABI文档:
|
||||
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-CN.pdf (中文版)
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-CN.pdf (中文版)
|
||||
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-EN.pdf (英文版)
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-EN.pdf (英文版)
|
||||
|
||||
Loongson与LoongArch的Linux内核源码仓库:
|
||||
|
||||
|
38
MAINTAINERS
38
MAINTAINERS
@ -1662,7 +1662,7 @@ F: arch/arm*/include/asm/perf_event.h
|
||||
F: arch/arm*/kernel/hw_breakpoint.c
|
||||
F: arch/arm*/kernel/perf_*
|
||||
F: drivers/perf/
|
||||
F: include/linux/perf/arm_pmu.h
|
||||
F: include/linux/perf/arm_pmu*.h
|
||||
|
||||
ARM PORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
@ -1855,7 +1855,7 @@ F: Documentation/devicetree/bindings/phy/amlogic*
|
||||
F: arch/arm/boot/dts/amlogic/
|
||||
F: arch/arm/mach-meson/
|
||||
F: arch/arm64/boot/dts/amlogic/
|
||||
F: drivers/genpd/amlogic/
|
||||
F: drivers/pmdomain/amlogic/
|
||||
F: drivers/mmc/host/meson*
|
||||
F: drivers/phy/amlogic/
|
||||
F: drivers/pinctrl/meson/
|
||||
@ -1918,7 +1918,7 @@ F: drivers/bluetooth/hci_bcm4377.c
|
||||
F: drivers/clk/clk-apple-nco.c
|
||||
F: drivers/cpufreq/apple-soc-cpufreq.c
|
||||
F: drivers/dma/apple-admac.c
|
||||
F: drivers/genpd/apple/
|
||||
F: drivers/pmdomain/apple/
|
||||
F: drivers/i2c/busses/i2c-pasemi-core.c
|
||||
F: drivers/i2c/busses/i2c-pasemi-platform.c
|
||||
F: drivers/iommu/apple-dart.c
|
||||
@ -2435,7 +2435,7 @@ F: arch/arm/mach-ux500/
|
||||
F: drivers/clk/clk-nomadik.c
|
||||
F: drivers/clocksource/clksrc-dbx500-prcmu.c
|
||||
F: drivers/dma/ste_dma40*
|
||||
F: drivers/genpd/st/ste-ux500-pm-domain.c
|
||||
F: drivers/pmdomain/st/ste-ux500-pm-domain.c
|
||||
F: drivers/hwspinlock/u8500_hsem.c
|
||||
F: drivers/i2c/busses/i2c-nomadik.c
|
||||
F: drivers/iio/adc/ab8500-gpadc.c
|
||||
@ -2598,7 +2598,7 @@ F: arch/arm/include/debug/renesas-scif.S
|
||||
F: arch/arm/mach-shmobile/
|
||||
F: arch/arm64/boot/dts/renesas/
|
||||
F: arch/riscv/boot/dts/renesas/
|
||||
F: drivers/genpd/renesas/
|
||||
F: drivers/pmdomain/renesas/
|
||||
F: drivers/soc/renesas/
|
||||
F: include/linux/soc/renesas/
|
||||
K: \brenesas,
|
||||
@ -3344,7 +3344,7 @@ AX.25 NETWORK LAYER
|
||||
M: Ralf Baechle <ralf@linux-mips.org>
|
||||
L: linux-hams@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linux-ax25.org/
|
||||
W: https://linux-ax25.in-berlin.de
|
||||
F: include/net/ax25.h
|
||||
F: include/uapi/linux/ax25.h
|
||||
F: net/ax25/
|
||||
@ -4026,7 +4026,7 @@ F: arch/mips/kernel/*bmips*
|
||||
F: drivers/irqchip/irq-bcm63*
|
||||
F: drivers/irqchip/irq-bcm7*
|
||||
F: drivers/irqchip/irq-brcmstb*
|
||||
F: drivers/genpd/bcm/bcm63xx-power.c
|
||||
F: drivers/pmdomain/bcm/bcm63xx-power.c
|
||||
F: include/linux/bcm963xx_nvram.h
|
||||
F: include/linux/bcm963xx_tag.h
|
||||
|
||||
@ -4248,7 +4248,7 @@ R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git https://github.com/broadcom/stblinux.git
|
||||
F: drivers/genpd/bcm/bcm-pmb.c
|
||||
F: drivers/pmdomain/bcm/bcm-pmb.c
|
||||
F: include/dt-bindings/soc/bcm-pmb.h
|
||||
|
||||
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
|
||||
@ -4378,7 +4378,6 @@ M: David Sterba <dsterba@suse.com>
|
||||
L: linux-btrfs@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://btrfs.readthedocs.io
|
||||
W: https://btrfs.wiki.kernel.org/
|
||||
Q: https://patchwork.kernel.org/project/linux-btrfs/list/
|
||||
C: irc://irc.libera.chat/btrfs
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
|
||||
@ -6646,7 +6645,6 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
|
||||
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
|
||||
|
||||
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
|
||||
M: Ben Skeggs <bskeggs@redhat.com>
|
||||
M: Karol Herbst <kherbst@redhat.com>
|
||||
M: Lyude Paul <lyude@redhat.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
@ -8729,7 +8727,7 @@ M: Ulf Hansson <ulf.hansson@linaro.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
|
||||
F: drivers/genpd/
|
||||
F: drivers/pmdomain/
|
||||
|
||||
GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER
|
||||
M: Eugen Hristev <eugen.hristev@microchip.com>
|
||||
@ -8875,7 +8873,7 @@ F: drivers/gpio/gpio-mockup.c
|
||||
F: tools/testing/selftests/gpio/
|
||||
|
||||
GPIO REGMAP
|
||||
R: Michael Walle <michael@walle.cc>
|
||||
M: Michael Walle <michael@walle.cc>
|
||||
S: Maintained
|
||||
F: drivers/gpio/gpio-regmap.c
|
||||
F: include/linux/gpio/regmap.h
|
||||
@ -13617,6 +13615,7 @@ F: drivers/net/ethernet/mellanox/mlxfw/
|
||||
|
||||
MELLANOX HARDWARE PLATFORM SUPPORT
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
M: Mark Gross <markgross@kernel.org>
|
||||
M: Vadim Pasternak <vadimp@nvidia.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
@ -14211,6 +14210,7 @@ F: drivers/platform/surface/surface_gpe.c
|
||||
|
||||
MICROSOFT SURFACE HARDWARE PLATFORM SUPPORT
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
M: Mark Gross <markgross@kernel.org>
|
||||
M: Maximilian Luz <luzmaximilian@gmail.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
@ -14757,7 +14757,7 @@ NETROM NETWORK LAYER
|
||||
M: Ralf Baechle <ralf@linux-mips.org>
|
||||
L: linux-hams@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linux-ax25.org/
|
||||
W: https://linux-ax25.in-berlin.de
|
||||
F: include/net/netrom.h
|
||||
F: include/uapi/linux/netrom.h
|
||||
F: net/netrom/
|
||||
@ -17680,7 +17680,7 @@ L: linux-pm@vger.kernel.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml
|
||||
F: drivers/genpd/qcom/cpr.c
|
||||
F: drivers/pmdomain/qcom/cpr.c
|
||||
|
||||
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
|
||||
M: Ilia Lin <ilia.lin@kernel.org>
|
||||
@ -18608,7 +18608,7 @@ ROSE NETWORK LAYER
|
||||
M: Ralf Baechle <ralf@linux-mips.org>
|
||||
L: linux-hams@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linux-ax25.org/
|
||||
W: https://linux-ax25.in-berlin.de
|
||||
F: include/net/rose.h
|
||||
F: include/uapi/linux/rose.h
|
||||
F: net/rose/
|
||||
@ -20514,7 +20514,7 @@ STARFIVE JH71XX PMU CONTROLLER DRIVER
|
||||
M: Walker Chen <walker.chen@starfivetech.com>
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/power/starfive*
|
||||
F: drivers/genpd/starfive/jh71xx-pmu.c
|
||||
F: drivers/pmdomain/starfive/jh71xx-pmu.c
|
||||
F: include/dt-bindings/power/starfive,jh7110-pmu.h
|
||||
|
||||
STARFIVE SOC DRIVERS
|
||||
@ -21339,7 +21339,7 @@ F: drivers/irqchip/irq-ti-sci-inta.c
|
||||
F: drivers/irqchip/irq-ti-sci-intr.c
|
||||
F: drivers/reset/reset-ti-sci.c
|
||||
F: drivers/soc/ti/ti_sci_inta_msi.c
|
||||
F: drivers/genpd/ti/ti_sci_pm_domains.c
|
||||
F: drivers/pmdomain/ti/ti_sci_pm_domains.c
|
||||
F: include/dt-bindings/soc/ti,sci_pm_domain.h
|
||||
F: include/linux/soc/ti/ti_sci_inta_msi.h
|
||||
F: include/linux/soc/ti/ti_sci_protocol.h
|
||||
@ -21581,7 +21581,7 @@ L: linux-kernel@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: drivers/genpd/ti/omap_prm.c
|
||||
F: drivers/pmdomain/ti/omap_prm.c
|
||||
F: drivers/soc/ti/*
|
||||
|
||||
TI LM49xxx FAMILY ASoC CODEC DRIVERS
|
||||
@ -23424,9 +23424,11 @@ F: drivers/platform/x86/x86-android-tablets/
|
||||
|
||||
X86 PLATFORM DRIVERS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
M: Mark Gross <markgross@kernel.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
Q: https://patchwork.kernel.org/project/platform-driver-x86/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
|
||||
F: drivers/platform/olpc/
|
||||
F: drivers/platform/x86/
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -207,7 +207,7 @@ static void xen_power_off(void)
|
||||
|
||||
static irqreturn_t xen_arm_callback(int irq, void *arg)
|
||||
{
|
||||
xen_hvm_evtchn_do_upcall();
|
||||
xen_evtchn_do_upcall();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -663,7 +663,7 @@ static inline bool supports_clearbhb(int scope)
|
||||
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
|
||||
|
||||
return cpuid_feature_extract_unsigned_field(isar2,
|
||||
ID_AA64ISAR2_EL1_BC_SHIFT);
|
||||
ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
|
||||
}
|
||||
|
||||
const struct cpumask *system_32bit_el0_cpumask(void);
|
||||
|
@ -118,7 +118,7 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
u64 __guest_enter(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
|
||||
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||
|
@ -222,7 +222,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
||||
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
||||
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
|
||||
|
@ -12,6 +12,6 @@
|
||||
#define FFA_MAX_FUNC_NUM 0x7F
|
||||
|
||||
int hyp_ffa_init(void *pages);
|
||||
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
|
||||
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
|
||||
|
||||
#endif /* __KVM_HYP_FFA_H */
|
||||
|
@ -634,9 +634,8 @@ out_handled:
|
||||
return true;
|
||||
}
|
||||
|
||||
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
|
||||
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
|
||||
{
|
||||
DECLARE_REG(u64, func_id, host_ctxt, 0);
|
||||
struct arm_smccc_res res;
|
||||
|
||||
/*
|
||||
|
@ -57,6 +57,7 @@ __do_hyp_init:
|
||||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.lo __kvm_handle_stub_hvc
|
||||
|
||||
bic x0, x0, #ARM_SMCCC_CALL_HINTS
|
||||
mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
|
||||
cmp x0, x3
|
||||
b.eq 1f
|
||||
|
@ -368,6 +368,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
if (static_branch_unlikely(&kvm_protected_mode_initialized))
|
||||
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
|
||||
|
||||
id &= ~ARM_SMCCC_CALL_HINTS;
|
||||
id -= KVM_HOST_SMCCC_ID(0);
|
||||
|
||||
if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
|
||||
@ -392,11 +393,14 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
|
||||
|
||||
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(u64, func_id, host_ctxt, 0);
|
||||
bool handled;
|
||||
|
||||
handled = kvm_host_psci_handler(host_ctxt);
|
||||
func_id &= ~ARM_SMCCC_CALL_HINTS;
|
||||
|
||||
handled = kvm_host_psci_handler(host_ctxt, func_id);
|
||||
if (!handled)
|
||||
handled = kvm_host_ffa_handler(host_ctxt);
|
||||
handled = kvm_host_ffa_handler(host_ctxt, func_id);
|
||||
if (!handled)
|
||||
default_host_smc_handler(host_ctxt);
|
||||
|
||||
|
@ -273,9 +273,8 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
|
||||
}
|
||||
}
|
||||
|
||||
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
|
||||
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
|
||||
{
|
||||
DECLARE_REG(u64, func_id, host_ctxt, 0);
|
||||
unsigned long ret;
|
||||
|
||||
switch (kvm_host_psci_config.version) {
|
||||
|
@ -652,6 +652,9 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
|
||||
|
||||
mutex_unlock(&kvm_hyp_pgd_mutex);
|
||||
|
||||
if (!ret)
|
||||
*haddr = base;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1347,7 +1347,11 @@ UnsignedEnum 51:48 RPRFM
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
Res0 47:28
|
||||
Res0 47:32
|
||||
UnsignedEnum 31:28 CLRBHB
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
UnsignedEnum 27:24 PAC_frac
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
|
@ -907,3 +907,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic);
|
||||
* TBD when IA64 starts to support suspend...
|
||||
*/
|
||||
int acpi_suspend_lowlevel(void) { return 0; }
|
||||
|
||||
void acpi_proc_quirk_mwait_check(void)
|
||||
{
|
||||
}
|
||||
|
@ -19,7 +19,7 @@
|
||||
*/
|
||||
#ifndef __ASSEMBLY__
|
||||
#ifndef PHYS_OFFSET
|
||||
#define PHYS_OFFSET _AC(0, UL)
|
||||
#define PHYS_OFFSET _UL(0)
|
||||
#endif
|
||||
extern unsigned long vm_map_base;
|
||||
#endif /* __ASSEMBLY__ */
|
||||
@ -43,7 +43,7 @@ extern unsigned long vm_map_base;
|
||||
* Memory above this physical address will be considered highmem.
|
||||
*/
|
||||
#ifndef HIGHMEM_START
|
||||
#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL))
|
||||
#define HIGHMEM_START (_UL(1) << _UL(DMW_PABITS))
|
||||
#endif
|
||||
|
||||
#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
|
||||
@ -65,16 +65,16 @@ extern unsigned long vm_map_base;
|
||||
#define _ATYPE_
|
||||
#define _ATYPE32_
|
||||
#define _ATYPE64_
|
||||
#define _CONST64_(x) x
|
||||
#else
|
||||
#define _ATYPE_ __PTRDIFF_TYPE__
|
||||
#define _ATYPE32_ int
|
||||
#define _ATYPE64_ __s64
|
||||
#ifdef CONFIG_64BIT
|
||||
#define _CONST64_(x) x ## UL
|
||||
#else
|
||||
#define _CONST64_(x) x ## ULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define _CONST64_(x) _UL(x)
|
||||
#else
|
||||
#define _CONST64_(x) _ULL(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
45
arch/loongarch/include/asm/exception.h
Normal file
45
arch/loongarch/include/asm/exception.h
Normal file
@ -0,0 +1,45 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_EXCEPTION_H
|
||||
#define __ASM_EXCEPTION_H
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
void show_registers(struct pt_regs *regs);
|
||||
|
||||
asmlinkage void cache_parity_error(void);
|
||||
asmlinkage void noinstr do_ade(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_ale(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_bce(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_bp(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_ri(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_fpu(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr);
|
||||
asmlinkage void noinstr do_lsx(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_lasx(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_lbt(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_watch(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_syscall(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_reserved(struct pt_regs *regs);
|
||||
asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp);
|
||||
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||
unsigned long write, unsigned long address);
|
||||
|
||||
asmlinkage void handle_ade(void);
|
||||
asmlinkage void handle_ale(void);
|
||||
asmlinkage void handle_bce(void);
|
||||
asmlinkage void handle_sys(void);
|
||||
asmlinkage void handle_bp(void);
|
||||
asmlinkage void handle_ri(void);
|
||||
asmlinkage void handle_fpu(void);
|
||||
asmlinkage void handle_fpe(void);
|
||||
asmlinkage void handle_lsx(void);
|
||||
asmlinkage void handle_lasx(void);
|
||||
asmlinkage void handle_lbt(void);
|
||||
asmlinkage void handle_watch(void);
|
||||
asmlinkage void handle_reserved(void);
|
||||
asmlinkage void handle_vint(void);
|
||||
asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs);
|
||||
|
||||
#endif /* __ASM_EXCEPTION_H */
|
@ -10,8 +10,6 @@
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define __HAVE_ARCH_SHADOW_MAP
|
||||
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
|
||||
|
||||
@ -62,61 +60,22 @@
|
||||
extern bool kasan_early_stage;
|
||||
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
|
||||
|
||||
#define kasan_mem_to_shadow kasan_mem_to_shadow
|
||||
void *kasan_mem_to_shadow(const void *addr);
|
||||
|
||||
#define kasan_shadow_to_mem kasan_shadow_to_mem
|
||||
const void *kasan_shadow_to_mem(const void *shadow_addr);
|
||||
|
||||
#define kasan_arch_is_ready kasan_arch_is_ready
|
||||
static __always_inline bool kasan_arch_is_ready(void)
|
||||
{
|
||||
return !kasan_early_stage;
|
||||
}
|
||||
|
||||
static inline void *kasan_mem_to_shadow(const void *addr)
|
||||
#define addr_has_metadata addr_has_metadata
|
||||
static __always_inline bool addr_has_metadata(const void *addr)
|
||||
{
|
||||
if (!kasan_arch_is_ready()) {
|
||||
return (void *)(kasan_early_shadow_page);
|
||||
} else {
|
||||
unsigned long maddr = (unsigned long)addr;
|
||||
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
|
||||
unsigned long offset = 0;
|
||||
|
||||
maddr &= XRANGE_SHADOW_MASK;
|
||||
switch (xrange) {
|
||||
case XKPRANGE_CC_SEG:
|
||||
offset = XKPRANGE_CC_SHADOW_OFFSET;
|
||||
break;
|
||||
case XKPRANGE_UC_SEG:
|
||||
offset = XKPRANGE_UC_SHADOW_OFFSET;
|
||||
break;
|
||||
case XKVRANGE_VC_SEG:
|
||||
offset = XKVRANGE_VC_SHADOW_OFFSET;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
|
||||
}
|
||||
}
|
||||
|
||||
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)shadow_addr;
|
||||
|
||||
if (unlikely(addr > KASAN_SHADOW_END) ||
|
||||
unlikely(addr < KASAN_SHADOW_START)) {
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
|
||||
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
|
||||
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
|
||||
else {
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
return (kasan_mem_to_shadow((void *)addr) != NULL);
|
||||
}
|
||||
|
||||
void kasan_init(void);
|
||||
|
@ -70,6 +70,7 @@ struct secondary_data {
|
||||
extern struct secondary_data cpuboot_data;
|
||||
|
||||
extern asmlinkage void smpboot_entry(void);
|
||||
extern asmlinkage void start_secondary(void);
|
||||
|
||||
extern void calculate_cpu_foreign_map(void);
|
||||
|
||||
|
@ -19,6 +19,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
|
||||
|
||||
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
|
||||
|
||||
CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ifndef CONFIG_DYNAMIC_FTRACE
|
||||
obj-y += mcount.o ftrace.o
|
||||
|
@ -281,7 +281,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
|
||||
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
|
||||
}
|
||||
|
||||
void __init acpi_numa_arch_fixup(void) {}
|
||||
#endif
|
||||
|
||||
void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
|
||||
|
@ -50,7 +50,6 @@ void __init memblock_init(void)
|
||||
}
|
||||
|
||||
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
|
||||
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
|
||||
|
||||
/* Reserve the first 2MB */
|
||||
memblock_reserve(PHYS_OFFSET, 0x200000);
|
||||
@ -58,4 +57,7 @@ void __init memblock_init(void)
|
||||
/* Reserve the kernel text/data/bss */
|
||||
memblock_reserve(__pa_symbol(&_text),
|
||||
__pa_symbol(&_end) - __pa_symbol(&_text));
|
||||
|
||||
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
|
||||
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/elf.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/lbt.h>
|
||||
#include <asm/io.h>
|
||||
|
@ -72,7 +72,6 @@ copy_word:
|
||||
LONG_ADDI s5, s5, -1
|
||||
beqz s5, process_entry
|
||||
b copy_word
|
||||
b process_entry
|
||||
|
||||
done:
|
||||
ibar 0
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/audit.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/entry-common.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
@ -891,8 +892,8 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon
|
||||
return new_sp;
|
||||
}
|
||||
|
||||
void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
|
||||
struct extctx_layout *extctx)
|
||||
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
|
||||
struct extctx_layout *extctx)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
@ -922,7 +923,7 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
|
||||
* Atomically swap in the new signal mask, and wait for a signal.
|
||||
*/
|
||||
|
||||
asmlinkage long sys_rt_sigreturn(void)
|
||||
SYSCALL_DEFINE0(rt_sigreturn)
|
||||
{
|
||||
int sig;
|
||||
sigset_t set;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/threads.h>
|
||||
@ -556,10 +557,12 @@ void smp_send_stop(void)
|
||||
smp_call_function(stop_this_cpu, NULL, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROFILING
|
||||
int setup_profiling_timer(unsigned int multiplier)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void flush_tlb_all_ipi(void *info)
|
||||
{
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm-generic/syscalls.h>
|
||||
|
@ -29,7 +29,7 @@ static void constant_event_handler(struct clock_event_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
irqreturn_t constant_timer_interrupt(int irq, void *data)
|
||||
static irqreturn_t constant_timer_interrupt(int irq, void *data)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct clock_event_device *cd;
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/init.h>
|
||||
@ -7,6 +8,8 @@
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/bootinfo.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu, cpu_devices);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/perf_event.h>
|
||||
@ -35,6 +34,7 @@
|
||||
#include <asm/branch.h>
|
||||
#include <asm/break.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/lbt.h>
|
||||
#include <asm/inst.h>
|
||||
@ -53,21 +53,6 @@
|
||||
|
||||
#include "access-helper.h"
|
||||
|
||||
extern asmlinkage void handle_ade(void);
|
||||
extern asmlinkage void handle_ale(void);
|
||||
extern asmlinkage void handle_bce(void);
|
||||
extern asmlinkage void handle_sys(void);
|
||||
extern asmlinkage void handle_bp(void);
|
||||
extern asmlinkage void handle_ri(void);
|
||||
extern asmlinkage void handle_fpu(void);
|
||||
extern asmlinkage void handle_fpe(void);
|
||||
extern asmlinkage void handle_lbt(void);
|
||||
extern asmlinkage void handle_lsx(void);
|
||||
extern asmlinkage void handle_lasx(void);
|
||||
extern asmlinkage void handle_reserved(void);
|
||||
extern asmlinkage void handle_watch(void);
|
||||
extern asmlinkage void handle_vint(void);
|
||||
|
||||
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
|
||||
const char *loglvl, bool user)
|
||||
{
|
||||
@ -439,8 +424,8 @@ static inline void setup_vint_size(unsigned int size)
|
||||
* happen together with Overflow or Underflow, and `ptrace' can set
|
||||
* any bits.
|
||||
*/
|
||||
void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
|
||||
struct task_struct *tsk)
|
||||
static void force_fcsr_sig(unsigned long fcsr,
|
||||
void __user *fault_addr, struct task_struct *tsk)
|
||||
{
|
||||
int si_code = FPE_FLTUNK;
|
||||
|
||||
@ -458,7 +443,7 @@ void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
|
||||
force_sig_fault(SIGFPE, si_code, fault_addr);
|
||||
}
|
||||
|
||||
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
|
||||
static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
|
||||
{
|
||||
int si_code;
|
||||
|
||||
@ -824,7 +809,7 @@ out:
|
||||
asmlinkage void noinstr do_ri(struct pt_regs *regs)
|
||||
{
|
||||
int status = SIGILL;
|
||||
unsigned int opcode = 0;
|
||||
unsigned int __maybe_unused opcode;
|
||||
unsigned int __user *era = (unsigned int __user *)exception_era(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
|
@ -53,33 +53,6 @@ SECTIONS
|
||||
. = ALIGN(PECOFF_SEGMENT_ALIGN);
|
||||
_etext = .;
|
||||
|
||||
/*
|
||||
* struct alt_inst entries. From the header (alternative.h):
|
||||
* "Alternative instructions for different CPU types or capabilities"
|
||||
* Think locking instructions on spinlocks.
|
||||
*/
|
||||
. = ALIGN(4);
|
||||
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
|
||||
__alt_instructions = .;
|
||||
*(.altinstructions)
|
||||
__alt_instructions_end = .;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
. = ALIGN(8);
|
||||
.la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
|
||||
__la_abs_begin = .;
|
||||
*(.la_abs)
|
||||
__la_abs_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
.got : ALIGN(16) { *(.got) }
|
||||
.plt : ALIGN(16) { *(.plt) }
|
||||
.got.plt : ALIGN(16) { *(.got.plt) }
|
||||
|
||||
.data.rel : { *(.data.rel*) }
|
||||
|
||||
. = ALIGN(PECOFF_SEGMENT_ALIGN);
|
||||
__init_begin = .;
|
||||
__inittext_begin = .;
|
||||
@ -94,6 +67,18 @@ SECTIONS
|
||||
|
||||
__initdata_begin = .;
|
||||
|
||||
/*
|
||||
* struct alt_inst entries. From the header (alternative.h):
|
||||
* "Alternative instructions for different CPU types or capabilities"
|
||||
* Think locking instructions on spinlocks.
|
||||
*/
|
||||
. = ALIGN(4);
|
||||
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
|
||||
__alt_instructions = .;
|
||||
*(.altinstructions)
|
||||
__alt_instructions_end = .;
|
||||
}
|
||||
|
||||
INIT_DATA_SECTION(16)
|
||||
.exit.data : {
|
||||
EXIT_DATA
|
||||
@ -113,6 +98,11 @@ SECTIONS
|
||||
|
||||
_sdata = .;
|
||||
RO_DATA(4096)
|
||||
|
||||
.got : ALIGN(16) { *(.got) }
|
||||
.plt : ALIGN(16) { *(.plt) }
|
||||
.got.plt : ALIGN(16) { *(.got.plt) }
|
||||
|
||||
RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
|
||||
|
||||
.rela.dyn : ALIGN(8) {
|
||||
@ -121,6 +111,17 @@ SECTIONS
|
||||
__rela_dyn_end = .;
|
||||
}
|
||||
|
||||
.data.rel : { *(.data.rel*) }
|
||||
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
. = ALIGN(8);
|
||||
.la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
|
||||
__la_abs_begin = .;
|
||||
*(.la_abs)
|
||||
__la_abs_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
.sdata : {
|
||||
*(.sdata)
|
||||
}
|
||||
|
@ -20,12 +20,12 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kfence.h>
|
||||
|
||||
#include <asm/branch.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
|
@ -50,18 +50,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
||||
return (pte_t *) pmd;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function checks for proper alignment of input addr and len parameters.
|
||||
*/
|
||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
||||
{
|
||||
if (len & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (addr & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & _PAGE_HUGE) != 0;
|
||||
|
@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm-generic/early_ioremap.h>
|
||||
|
||||
void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
|
||||
{
|
||||
|
@ -35,6 +35,57 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
|
||||
|
||||
bool kasan_early_stage = true;
|
||||
|
||||
void *kasan_mem_to_shadow(const void *addr)
|
||||
{
|
||||
if (!kasan_arch_is_ready()) {
|
||||
return (void *)(kasan_early_shadow_page);
|
||||
} else {
|
||||
unsigned long maddr = (unsigned long)addr;
|
||||
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
|
||||
unsigned long offset = 0;
|
||||
|
||||
maddr &= XRANGE_SHADOW_MASK;
|
||||
switch (xrange) {
|
||||
case XKPRANGE_CC_SEG:
|
||||
offset = XKPRANGE_CC_SHADOW_OFFSET;
|
||||
break;
|
||||
case XKPRANGE_UC_SEG:
|
||||
offset = XKPRANGE_UC_SHADOW_OFFSET;
|
||||
break;
|
||||
case XKVRANGE_VC_SEG:
|
||||
offset = XKVRANGE_VC_SHADOW_OFFSET;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
|
||||
}
|
||||
}
|
||||
|
||||
const void *kasan_shadow_to_mem(const void *shadow_addr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)shadow_addr;
|
||||
|
||||
if (unlikely(addr > KASAN_SHADOW_END) ||
|
||||
unlikely(addr < KASAN_SHADOW_START)) {
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
|
||||
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
|
||||
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
|
||||
else {
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Alloc memory for shadow memory page table.
|
||||
*/
|
||||
|
@ -261,7 +261,7 @@ unsigned long pcpu_handlers[NR_CPUS];
|
||||
#endif
|
||||
extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
|
||||
|
||||
void setup_tlb_handler(int cpu)
|
||||
static void setup_tlb_handler(int cpu)
|
||||
{
|
||||
setup_ptwalker();
|
||||
local_flush_tlb_all();
|
||||
|
@ -37,6 +37,7 @@ extern int split_tlb;
|
||||
extern int dcache_stride;
|
||||
extern int icache_stride;
|
||||
extern struct pdc_cache_info cache_info;
|
||||
extern struct pdc_btlb_info btlb_info;
|
||||
void parisc_setup_cache_timing(void);
|
||||
|
||||
#define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \
|
||||
|
@ -1,8 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ASM_PARISC_MCKINLEY_H
|
||||
#define ASM_PARISC_MCKINLEY_H
|
||||
|
||||
/* declared in arch/parisc/kernel/setup.c */
|
||||
extern struct proc_dir_entry * proc_mckinley_root;
|
||||
|
||||
#endif /*ASM_PARISC_MCKINLEY_H*/
|
@ -44,10 +44,11 @@ int pdc_model_capabilities(unsigned long *capabilities);
|
||||
int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no);
|
||||
int pdc_cache_info(struct pdc_cache_info *cache);
|
||||
int pdc_spaceid_bits(unsigned long *space_bits);
|
||||
#ifndef CONFIG_PA20
|
||||
int pdc_btlb_info(struct pdc_btlb_info *btlb);
|
||||
int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
|
||||
unsigned long entry_info, unsigned long slot);
|
||||
int pdc_btlb_purge_all(void);
|
||||
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
|
||||
#endif /* !CONFIG_PA20 */
|
||||
int pdc_pim_toc11(struct pdc_toc_pim_11 *ret);
|
||||
int pdc_pim_toc20(struct pdc_toc_pim_20 *ret);
|
||||
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
|
||||
|
@ -310,6 +310,7 @@ extern void do_syscall_trace_exit(struct pt_regs *);
|
||||
struct seq_file;
|
||||
extern void early_trap_init(void);
|
||||
extern void collect_boot_cpu_data(void);
|
||||
extern void btlb_init_per_cpu(void);
|
||||
extern int show_cpuinfo (struct seq_file *m, void *v);
|
||||
|
||||
/* driver code in driver/parisc */
|
||||
|
@ -29,7 +29,7 @@
|
||||
struct ioc {
|
||||
void __iomem *ioc_hpa; /* I/O MMU base address */
|
||||
char *res_map; /* resource map, bit == pdir entry */
|
||||
u64 *pdir_base; /* physical base address */
|
||||
__le64 *pdir_base; /* physical base address */
|
||||
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
|
||||
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
|
||||
#ifdef ZX1_SUPPORT
|
||||
@ -86,6 +86,9 @@ struct sba_device {
|
||||
struct ioc ioc[MAX_IOC];
|
||||
};
|
||||
|
||||
/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
|
||||
extern struct sba_device *sba_list;
|
||||
|
||||
#define ASTRO_RUNWAY_PORT 0x582
|
||||
#define IKE_MERCED_PORT 0x803
|
||||
#define REO_MERCED_PORT 0x804
|
||||
@ -110,7 +113,7 @@ static inline int IS_PLUTO(struct parisc_device *d) {
|
||||
|
||||
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
|
||||
|
||||
#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
|
||||
#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL
|
||||
|
||||
#define SBA_FUNC_ID 0x0000 /* function id */
|
||||
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
|
||||
|
@ -2,6 +2,21 @@
|
||||
#ifndef _ASMPARISC_SHMPARAM_H
|
||||
#define _ASMPARISC_SHMPARAM_H
|
||||
|
||||
/*
|
||||
* PA-RISC uses virtually indexed & physically tagged (VIPT) caches
|
||||
* which has strict requirements when two pages to the same physical
|
||||
* address are accessed through different mappings. Read the section
|
||||
* "Address Aliasing" in the arch docs for more detail:
|
||||
* PA-RISC 1.1 (page 3-6):
|
||||
* https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf
|
||||
* PA-RISC 2.0 (page F-5):
|
||||
* https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf
|
||||
*
|
||||
* For Linux we allow kernel and userspace to map pages on page size
|
||||
* granularity (SHMLBA) but have to ensure that, if two pages are
|
||||
* mapped to the same physical address, the virtual and physical
|
||||
* addresses modulo SHM_COLOUR are identical.
|
||||
*/
|
||||
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
|
||||
#define SHM_COLOUR 0x00400000 /* shared mappings colouring */
|
||||
|
||||
|
@ -275,6 +275,8 @@ int main(void)
|
||||
* and kernel data on physical huge pages */
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
|
||||
#elif !defined(CONFIG_64BIT)
|
||||
DEFINE(HUGEPAGE_SIZE, 4*1024*1024);
|
||||
#else
|
||||
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
|
||||
#endif
|
||||
|
@ -58,7 +58,7 @@ int pa_serialize_tlb_flushes __ro_after_init;
|
||||
|
||||
struct pdc_cache_info cache_info __ro_after_init;
|
||||
#ifndef CONFIG_PA20
|
||||
static struct pdc_btlb_info btlb_info __ro_after_init;
|
||||
struct pdc_btlb_info btlb_info __ro_after_init;
|
||||
#endif
|
||||
|
||||
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
|
||||
@ -264,12 +264,6 @@ parisc_cache_init(void)
|
||||
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
|
||||
#undef CAFL_STRIDE
|
||||
|
||||
#ifndef CONFIG_PA20
|
||||
if (pdc_btlb_info(&btlb_info) < 0) {
|
||||
memset(&btlb_info, 0, sizeof btlb_info);
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
|
||||
PDC_MODEL_NVA_UNSUPPORTED) {
|
||||
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
|
||||
|
@ -925,9 +925,9 @@ static __init void qemu_header(void)
|
||||
pr_info("#define PARISC_MODEL \"%s\"\n\n",
|
||||
boot_cpu_data.pdc.sys_model_name);
|
||||
|
||||
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
|
||||
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
|
||||
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
|
||||
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
|
||||
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
|
||||
#undef p
|
||||
|
||||
|
@ -687,7 +687,6 @@ int pdc_spaceid_bits(unsigned long *space_bits)
|
||||
return retval;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PA20
|
||||
/**
|
||||
* pdc_btlb_info - Return block TLB information.
|
||||
* @btlb: The return buffer.
|
||||
@ -696,18 +695,51 @@ int pdc_spaceid_bits(unsigned long *space_bits)
|
||||
*/
|
||||
int pdc_btlb_info(struct pdc_btlb_info *btlb)
|
||||
{
|
||||
int retval;
|
||||
int retval;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pdc_lock, flags);
|
||||
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
|
||||
memcpy(btlb, pdc_result, sizeof(*btlb));
|
||||
spin_unlock_irqrestore(&pdc_lock, flags);
|
||||
if (IS_ENABLED(CONFIG_PA20))
|
||||
return PDC_BAD_PROC;
|
||||
|
||||
if(retval < 0) {
|
||||
btlb->max_size = 0;
|
||||
}
|
||||
return retval;
|
||||
spin_lock_irqsave(&pdc_lock, flags);
|
||||
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
|
||||
memcpy(btlb, pdc_result, sizeof(*btlb));
|
||||
spin_unlock_irqrestore(&pdc_lock, flags);
|
||||
|
||||
if(retval < 0) {
|
||||
btlb->max_size = 0;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
|
||||
unsigned long entry_info, unsigned long slot)
|
||||
{
|
||||
int retval;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PA20))
|
||||
return PDC_BAD_PROC;
|
||||
|
||||
spin_lock_irqsave(&pdc_lock, flags);
|
||||
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32),
|
||||
(unsigned long) vpage, physpage, len, entry_info, slot);
|
||||
spin_unlock_irqrestore(&pdc_lock, flags);
|
||||
return retval;
|
||||
}
|
||||
|
||||
int pdc_btlb_purge_all(void)
|
||||
{
|
||||
int retval;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PA20))
|
||||
return PDC_BAD_PROC;
|
||||
|
||||
spin_lock_irqsave(&pdc_lock, flags);
|
||||
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL);
|
||||
spin_unlock_irqrestore(&pdc_lock, flags);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -728,6 +760,9 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address,
|
||||
int retval;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PA20))
|
||||
return PDC_BAD_PROC;
|
||||
|
||||
spin_lock_irqsave(&pdc_lock, flags);
|
||||
memcpy(pdc_result2, mod_path, sizeof(*mod_path));
|
||||
retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
|
||||
@ -737,7 +772,6 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address,
|
||||
|
||||
return retval;
|
||||
}
|
||||
#endif /* !CONFIG_PA20 */
|
||||
|
||||
/**
|
||||
* pdc_lan_station_id - Get the LAN address.
|
||||
|
@ -180,10 +180,10 @@ $pgt_fill_loop:
|
||||
std %dp,0x18(%r10)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* Get PDCE_PROC for monarch CPU. */
|
||||
#define MEM_PDC_LO 0x388
|
||||
#define MEM_PDC_HI 0x35C
|
||||
#ifdef CONFIG_64BIT
|
||||
/* Get PDCE_PROC for monarch CPU. */
|
||||
ldw MEM_PDC_LO(%r0),%r3
|
||||
ldw MEM_PDC_HI(%r0),%r10
|
||||
depd %r10, 31, 32, %r3 /* move to upper word */
|
||||
@ -269,7 +269,17 @@ stext_pdc_ret:
|
||||
tovirt_r1 %r6
|
||||
mtctl %r6,%cr30 /* restore task thread info */
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/* clear all BTLBs */
|
||||
ldi PDC_BLOCK_TLB,%arg0
|
||||
load32 PA(stext_pdc_btlb_ret), %rp
|
||||
ldw MEM_PDC_LO(%r0),%r3
|
||||
bv (%r3)
|
||||
ldi PDC_BTLB_PURGE_ALL,%arg1
|
||||
stext_pdc_btlb_ret:
|
||||
#endif
|
||||
|
||||
/* PARANOID: clear user scratch/user space SR's */
|
||||
mtsp %r0,%sr0
|
||||
mtsp %r0,%sr1
|
||||
|
@ -365,7 +365,7 @@ union irq_stack_union {
|
||||
volatile unsigned int lock[1];
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
|
||||
static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
|
||||
.slock = { 1,1,1,1 },
|
||||
};
|
||||
#endif
|
||||
|
@ -368,6 +368,8 @@ int init_per_cpu(int cpunum)
|
||||
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
|
||||
init_percpu_prof(cpunum);
|
||||
|
||||
btlb_init_per_cpu();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -154,6 +154,7 @@ SECTIONS
|
||||
}
|
||||
|
||||
/* End of data section */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_edata = .;
|
||||
|
||||
/* BSS */
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <asm/sections.h>
|
||||
#include <asm/msgbuf.h>
|
||||
#include <asm/sparsemem.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
extern int data_start;
|
||||
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
|
||||
@ -720,6 +721,77 @@ void __init paging_init(void)
|
||||
parisc_bootmem_free();
|
||||
}
|
||||
|
||||
static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
|
||||
unsigned long entry_info)
|
||||
{
|
||||
const int slot_max = btlb_info.fixed_range_info.num_comb;
|
||||
int min_num_pages = btlb_info.min_size;
|
||||
unsigned long size;
|
||||
|
||||
/* map at minimum 4 pages */
|
||||
if (min_num_pages < 4)
|
||||
min_num_pages = 4;
|
||||
|
||||
size = HUGEPAGE_SIZE;
|
||||
while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
|
||||
/* starting address must have same alignment as size! */
|
||||
/* if correctly aligned and fits in double size, increase */
|
||||
if (((start & (2 * size - 1)) == 0) &&
|
||||
(end - start) >= (2 * size)) {
|
||||
size <<= 1;
|
||||
continue;
|
||||
}
|
||||
/* if current size alignment is too big, try smaller size */
|
||||
if ((start & (size - 1)) != 0) {
|
||||
size >>= 1;
|
||||
continue;
|
||||
}
|
||||
if ((end - start) >= size) {
|
||||
if ((size >> PAGE_SHIFT) >= min_num_pages)
|
||||
pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
|
||||
size >> PAGE_SHIFT, entry_info, *slot);
|
||||
(*slot)++;
|
||||
start += size;
|
||||
continue;
|
||||
}
|
||||
size /= 2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
void btlb_init_per_cpu(void)
|
||||
{
|
||||
unsigned long s, t, e;
|
||||
int slot;
|
||||
|
||||
/* BTLBs are not available on 64-bit CPUs */
|
||||
if (IS_ENABLED(CONFIG_PA20))
|
||||
return;
|
||||
else if (pdc_btlb_info(&btlb_info) < 0) {
|
||||
memset(&btlb_info, 0, sizeof btlb_info);
|
||||
}
|
||||
|
||||
/* insert BLTLBs for code and data segments */
|
||||
s = (uintptr_t) dereference_function_descriptor(&_stext);
|
||||
e = (uintptr_t) dereference_function_descriptor(&_etext);
|
||||
t = (uintptr_t) dereference_function_descriptor(&_sdata);
|
||||
BUG_ON(t != e);
|
||||
|
||||
/* code segments */
|
||||
slot = 0;
|
||||
alloc_btlb(s, e, &slot, 0x13800000);
|
||||
|
||||
/* sanity check */
|
||||
t = (uintptr_t) dereference_function_descriptor(&_edata);
|
||||
e = (uintptr_t) dereference_function_descriptor(&__bss_start);
|
||||
BUG_ON(t != e);
|
||||
|
||||
/* data segments */
|
||||
s = (uintptr_t) dereference_function_descriptor(&_sdata);
|
||||
e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
|
||||
alloc_btlb(s, e, &slot, 0x11800000);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
|
||||
/*
|
||||
|
@ -255,7 +255,7 @@ config PPC
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KPROBES_ON_FTRACE
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT
|
||||
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT && (!ARCH_USING_PATCHABLE_FUNCTION_ENTRY || (!CC_IS_GCC || GCC_VERSION >= 110100))
|
||||
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
|
||||
|
@ -230,13 +230,15 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
|
||||
struct arch_hw_breakpoint *info;
|
||||
int i;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
for (i = 0; i < nr_wp_slots(); i++) {
|
||||
struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
|
||||
|
||||
if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
|
||||
goto reset;
|
||||
}
|
||||
return;
|
||||
goto out;
|
||||
|
||||
reset:
|
||||
regs_set_return_msr(regs, regs->msr & ~MSR_SE);
|
||||
@ -245,6 +247,9 @@ reset:
|
||||
__set_breakpoint(i, info);
|
||||
info->perf_single_step = false;
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static bool is_larx_stcx_instr(int type)
|
||||
@ -363,6 +368,11 @@ static void handle_p10dd1_spurious_exception(struct perf_event **bp,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a DABR or DAWR exception.
|
||||
*
|
||||
* Called in atomic context.
|
||||
*/
|
||||
int hw_breakpoint_handler(struct die_args *args)
|
||||
{
|
||||
bool err = false;
|
||||
@ -490,6 +500,8 @@ NOKPROBE_SYMBOL(hw_breakpoint_handler);
|
||||
|
||||
/*
|
||||
* Handle single-step exceptions following a DABR hit.
|
||||
*
|
||||
* Called in atomic context.
|
||||
*/
|
||||
static int single_step_dabr_instruction(struct die_args *args)
|
||||
{
|
||||
@ -541,6 +553,8 @@ NOKPROBE_SYMBOL(single_step_dabr_instruction);
|
||||
|
||||
/*
|
||||
* Handle debug exception notifications.
|
||||
*
|
||||
* Called in atomic context.
|
||||
*/
|
||||
int hw_breakpoint_exceptions_notify(
|
||||
struct notifier_block *unused, unsigned long val, void *data)
|
||||
|
@ -131,8 +131,13 @@ void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
|
||||
int *type, int *size, unsigned long *ea)
|
||||
{
|
||||
struct instruction_op op;
|
||||
int err;
|
||||
|
||||
if (__get_user_instr(*instr, (void __user *)regs->nip))
|
||||
pagefault_disable();
|
||||
err = __get_user_instr(*instr, (void __user *)regs->nip);
|
||||
pagefault_enable();
|
||||
|
||||
if (err)
|
||||
return;
|
||||
|
||||
analyse_instr(&op, regs, *instr);
|
||||
|
@ -1512,23 +1512,11 @@ static void do_program_check(struct pt_regs *regs)
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) && user_mode(regs)) {
|
||||
ppc_inst_t insn;
|
||||
|
||||
if (get_user_instr(insn, (void __user *)regs->nip)) {
|
||||
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ppc_inst_primary_opcode(insn) == 31 &&
|
||||
get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
|
||||
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
|
||||
return;
|
||||
}
|
||||
/* User mode considers other cases after enabling IRQs */
|
||||
if (!user_mode(regs)) {
|
||||
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
|
||||
return;
|
||||
}
|
||||
|
||||
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
|
||||
return;
|
||||
}
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (reason & REASON_TM) {
|
||||
@ -1561,16 +1549,44 @@ static void do_program_check(struct pt_regs *regs)
|
||||
|
||||
/*
|
||||
* If we took the program check in the kernel skip down to sending a
|
||||
* SIGILL. The subsequent cases all relate to emulating instructions
|
||||
* which we should only do for userspace. We also do not want to enable
|
||||
* interrupts for kernel faults because that might lead to further
|
||||
* faults, and loose the context of the original exception.
|
||||
* SIGILL. The subsequent cases all relate to user space, such as
|
||||
* emulating instructions which we should only do for user space. We
|
||||
* also do not want to enable interrupts for kernel faults because that
|
||||
* might lead to further faults, and loose the context of the original
|
||||
* exception.
|
||||
*/
|
||||
if (!user_mode(regs))
|
||||
goto sigill;
|
||||
|
||||
interrupt_cond_local_irq_enable(regs);
|
||||
|
||||
/*
|
||||
* (reason & REASON_TRAP) is mostly handled before enabling IRQs,
|
||||
* except get_user_instr() can sleep so we cannot reliably inspect the
|
||||
* current instruction in that context. Now that we know we are
|
||||
* handling a user space trap and can sleep, we can check if the trap
|
||||
* was a hashchk failure.
|
||||
*/
|
||||
if (reason & REASON_TRAP) {
|
||||
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
|
||||
ppc_inst_t insn;
|
||||
|
||||
if (get_user_instr(insn, (void __user *)regs->nip)) {
|
||||
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ppc_inst_primary_opcode(insn) == 31 &&
|
||||
get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
|
||||
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
|
||||
return;
|
||||
}
|
||||
|
||||
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
|
||||
* but there seems to be a hardware bug on the 405GP (RevD)
|
||||
* that means ESR is sometimes set incorrectly - either to
|
||||
|
@ -1418,7 +1418,7 @@ static int h_24x7_event_init(struct perf_event *event)
|
||||
}
|
||||
|
||||
domain = event_get_domain(event);
|
||||
if (domain >= HV_PERF_DOMAIN_MAX) {
|
||||
if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) {
|
||||
pr_devel("invalid domain %d\n", domain);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
menuconfig PPC_82xx
|
||||
bool "82xx-based boards (PQ II)"
|
||||
depends on PPC_BOOK3S_32
|
||||
select FSL_SOC
|
||||
|
||||
if PPC_82xx
|
||||
|
||||
@ -9,7 +10,6 @@ config EP8248E
|
||||
bool "Embedded Planet EP8248E (a.k.a. CWH-PPC-8248N-VE)"
|
||||
select CPM2
|
||||
select PPC_INDIRECT_PCI if PCI
|
||||
select FSL_SOC
|
||||
select PHYLIB if NETDEVICES
|
||||
select MDIO_BITBANG if PHYLIB
|
||||
help
|
||||
@ -22,7 +22,6 @@ config MGCOGE
|
||||
bool "Keymile MGCOGE"
|
||||
select CPM2
|
||||
select PPC_INDIRECT_PCI if PCI
|
||||
select FSL_SOC
|
||||
help
|
||||
This enables support for the Keymile MGCOGE board.
|
||||
|
||||
|
@ -105,7 +105,7 @@ asm volatile(ALTERNATIVE( \
|
||||
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
|
||||
* 0000001 01001 rs1 000 00000 0001011
|
||||
* dcache.cva rs1 (clean, virtual address)
|
||||
* 0000001 00100 rs1 000 00000 0001011
|
||||
* 0000001 00101 rs1 000 00000 0001011
|
||||
*
|
||||
* dcache.cipa rs1 (clean then invalidate, physical address)
|
||||
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
|
||||
@ -118,7 +118,7 @@ asm volatile(ALTERNATIVE( \
|
||||
* 0000000 11001 00000 000 00000 0001011
|
||||
*/
|
||||
#define THEAD_inval_A0 ".long 0x0265000b"
|
||||
#define THEAD_clean_A0 ".long 0x0245000b"
|
||||
#define THEAD_clean_A0 ".long 0x0255000b"
|
||||
#define THEAD_flush_A0 ".long 0x0275000b"
|
||||
#define THEAD_SYNC_S ".long 0x0190000b"
|
||||
|
||||
|
@ -98,7 +98,13 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
|
||||
kbuf.image = image;
|
||||
kbuf.buf_min = lowest_paddr;
|
||||
kbuf.buf_max = ULONG_MAX;
|
||||
kbuf.buf_align = PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* Current riscv boot protocol requires 2MB alignment for
|
||||
* RV64 and 4MB alignment for RV32
|
||||
*
|
||||
*/
|
||||
kbuf.buf_align = PMD_SIZE;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
|
||||
kbuf.top_down = false;
|
||||
|
@ -460,8 +460,11 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
|
||||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
|
||||
return -ENOENT;
|
||||
|
||||
*reg_val = 0;
|
||||
host_isa_ext = kvm_isa_ext_arr[reg_num];
|
||||
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
|
||||
return -ENOENT;
|
||||
|
||||
*reg_val = 0;
|
||||
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
|
||||
*reg_val = 1; /* Mark the given extension as available */
|
||||
|
||||
@ -842,7 +845,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
|
||||
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
|
||||
|
||||
isa_ext = kvm_isa_ext_arr[i];
|
||||
if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
|
||||
if (!__riscv_isa_extension_available(NULL, isa_ext))
|
||||
continue;
|
||||
|
||||
if (uindices) {
|
||||
|
@ -40,23 +40,25 @@ CONFIG_SCHED_AUTOGROUP=y
|
||||
CONFIG_EXPERT=y
|
||||
# CONFIG_SYSFS_SYSCALL is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_NR_CPUS=512
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CERT_STORE=y
|
||||
CONFIG_EXPOLINE=y
|
||||
CONFIG_EXPOLINE_AUTO=y
|
||||
CONFIG_CHSC_SCH=y
|
||||
CONFIG_VFIO_CCW=m
|
||||
CONFIG_VFIO_AP=m
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
|
||||
CONFIG_CMM=m
|
||||
CONFIG_APPLDATA_BASE=y
|
||||
CONFIG_S390_HYPFS_FS=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_S390_UNWIND_SELFTEST=m
|
||||
CONFIG_S390_KPROBES_SANITY_TEST=m
|
||||
@ -434,6 +436,7 @@ CONFIG_SCSI_DH_EMC=m
|
||||
CONFIG_SCSI_DH_ALUA=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
# CONFIG_MD_BITMAP_FILE is not set
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_MULTIPATH=m
|
||||
CONFIG_MD_FAULTY=m
|
||||
@ -577,6 +580,7 @@ CONFIG_SOFT_WATCHDOG=m
|
||||
CONFIG_DIAG288_WATCHDOG=m
|
||||
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
|
||||
CONFIG_FB=y
|
||||
# CONFIG_FB_DEVICE is not set
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
|
||||
# CONFIG_HID_SUPPORT is not set
|
||||
@ -647,6 +651,7 @@ CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_TMPFS_QUOTA=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_ECRYPT_FS=m
|
||||
CONFIG_CRAMFS=m
|
||||
@ -703,6 +708,7 @@ CONFIG_IMA_WRITE_POLICY=y
|
||||
CONFIG_IMA_APPRAISE=y
|
||||
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
|
||||
CONFIG_INIT_STACK_NONE=y
|
||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_CRYPTO_USER=m
|
||||
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
|
||||
CONFIG_CRYPTO_PCRYPT=m
|
||||
@ -825,9 +831,9 @@ CONFIG_LOCK_STAT=y
|
||||
CONFIG_DEBUG_ATOMIC_SLEEP=y
|
||||
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
|
||||
CONFIG_DEBUG_IRQFLAGS=y
|
||||
CONFIG_DEBUG_LIST=y
|
||||
CONFIG_DEBUG_SG=y
|
||||
CONFIG_DEBUG_NOTIFIERS=y
|
||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_DEBUG_CREDENTIALS=y
|
||||
CONFIG_RCU_TORTURE_TEST=m
|
||||
CONFIG_RCU_REF_SCALE_TEST=m
|
||||
|
@ -38,23 +38,25 @@ CONFIG_SCHED_AUTOGROUP=y
|
||||
CONFIG_EXPERT=y
|
||||
# CONFIG_SYSFS_SYSCALL is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_NR_CPUS=512
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CERT_STORE=y
|
||||
CONFIG_EXPOLINE=y
|
||||
CONFIG_EXPOLINE_AUTO=y
|
||||
CONFIG_CHSC_SCH=y
|
||||
CONFIG_VFIO_CCW=m
|
||||
CONFIG_VFIO_AP=m
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
|
||||
CONFIG_CMM=m
|
||||
CONFIG_APPLDATA_BASE=y
|
||||
CONFIG_S390_HYPFS_FS=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_S390_UNWIND_SELFTEST=m
|
||||
CONFIG_S390_KPROBES_SANITY_TEST=m
|
||||
@ -424,6 +426,7 @@ CONFIG_SCSI_DH_EMC=m
|
||||
CONFIG_SCSI_DH_ALUA=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
# CONFIG_MD_BITMAP_FILE is not set
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_MULTIPATH=m
|
||||
CONFIG_MD_FAULTY=m
|
||||
@ -566,6 +569,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y
|
||||
CONFIG_SOFT_WATCHDOG=m
|
||||
CONFIG_DIAG288_WATCHDOG=m
|
||||
CONFIG_FB=y
|
||||
# CONFIG_FB_DEVICE is not set
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
|
||||
# CONFIG_HID_SUPPORT is not set
|
||||
@ -632,6 +636,7 @@ CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_TMPFS_QUOTA=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
@ -687,6 +692,7 @@ CONFIG_IMA_WRITE_POLICY=y
|
||||
CONFIG_IMA_APPRAISE=y
|
||||
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
|
||||
CONFIG_INIT_STACK_NONE=y
|
||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_CRYPTO_FIPS=y
|
||||
CONFIG_CRYPTO_USER=m
|
||||
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
|
||||
@ -781,7 +787,6 @@ CONFIG_PTDUMP_DEBUGFS=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_TEST_LOCKUP=m
|
||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_RCU_TORTURE_TEST=m
|
||||
CONFIG_RCU_REF_SCALE_TEST=m
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
|
@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
# CONFIG_COMPAT is not set
|
||||
@ -15,9 +16,8 @@ CONFIG_NR_CPUS=2
|
||||
CONFIG_HZ_100=y
|
||||
# CONFIG_CHSC_SCH is not set
|
||||
# CONFIG_SCM_BUS is not set
|
||||
CONFIG_CRASH_DUMP=y
|
||||
# CONFIG_PFAULT is not set
|
||||
# CONFIG_S390_HYPFS_FS is not set
|
||||
# CONFIG_S390_HYPFS is not set
|
||||
# CONFIG_VIRTUALIZATION is not set
|
||||
# CONFIG_S390_GUEST is not set
|
||||
# CONFIG_SECCOMP is not set
|
||||
|
@ -432,15 +432,16 @@ static char *get_key_description(struct vcssb *vcssb, const struct vce *vce)
|
||||
char *desc;
|
||||
|
||||
cs_token = vcssb->cs_token;
|
||||
/* Description string contains "%64s:%04u:%08u\0". */
|
||||
/* Description string contains "%64s:%05u:%010u\0". */
|
||||
name_len = sizeof(vce->vce_hdr.vc_name);
|
||||
len = name_len + 1 + 4 + 1 + 8 + 1;
|
||||
len = name_len + 1 + 5 + 1 + 10 + 1;
|
||||
desc = kmalloc(len, GFP_KERNEL);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
memcpy(desc, vce->vce_hdr.vc_name, name_len);
|
||||
sprintf(desc + name_len, ":%04u:%08u", vce->vce_hdr.vc_index, cs_token);
|
||||
snprintf(desc + name_len, len - name_len, ":%05u:%010u",
|
||||
vce->vce_hdr.vc_index, cs_token);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
@ -72,8 +72,8 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
|
||||
#define __ioremap_29bit(offset, size, prot) NULL
|
||||
#endif /* CONFIG_29BIT */
|
||||
|
||||
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
|
||||
unsigned long prot)
|
||||
void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
|
||||
unsigned long prot)
|
||||
{
|
||||
void __iomem *mapped;
|
||||
pgprot_t pgprot = __pgprot(prot);
|
||||
|
@ -1945,6 +1945,7 @@ config EFI
|
||||
select UCS2_STRING
|
||||
select EFI_RUNTIME_WRAPPERS
|
||||
select ARCH_USE_MEMREMAP_PROT
|
||||
select EFI_RUNTIME_MAP if KEXEC_CORE
|
||||
help
|
||||
This enables the kernel to use EFI runtime services that are
|
||||
available (such as the EFI variable services).
|
||||
@ -2020,7 +2021,6 @@ config EFI_MAX_FAKE_MEM
|
||||
config EFI_RUNTIME_MAP
|
||||
bool "Export EFI runtime maps to sysfs" if EXPERT
|
||||
depends on EFI
|
||||
default KEXEC_CORE
|
||||
help
|
||||
Export EFI runtime memory regions to /sys/firmware/efi/runtime-map.
|
||||
That memory map is required by the 2nd kernel to set up EFI virtual
|
||||
|
@ -59,6 +59,14 @@ static void *alloc_pgt_page(void *context)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Consumed more tables than expected? */
|
||||
if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
|
||||
debug_putstr("pgt_buf running low in " __FILE__ "\n");
|
||||
debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
|
||||
debug_putaddr(pages->pgt_buf_offset);
|
||||
debug_putaddr(pages->pgt_buf_size);
|
||||
}
|
||||
|
||||
entry = pages->pgt_buf + pages->pgt_buf_offset;
|
||||
pages->pgt_buf_offset += PAGE_SIZE;
|
||||
|
||||
|
@ -294,7 +294,7 @@ static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||
|
||||
inc_irq_stat(irq_hv_callback_count);
|
||||
|
||||
xen_hvm_evtchn_do_upcall();
|
||||
xen_evtchn_do_upcall();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
@ -40,23 +40,40 @@
|
||||
#ifdef CONFIG_X86_64
|
||||
# define BOOT_STACK_SIZE 0x4000
|
||||
|
||||
# define BOOT_INIT_PGT_SIZE (6*4096)
|
||||
# ifdef CONFIG_RANDOMIZE_BASE
|
||||
/*
|
||||
* Assuming all cross the 512GB boundary:
|
||||
* 1 page for level4
|
||||
* (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel
|
||||
* 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP).
|
||||
* Total is 19 pages.
|
||||
* Used by decompressor's startup_32() to allocate page tables for identity
|
||||
* mapping of the 4G of RAM in 4-level paging mode:
|
||||
* - 1 level4 table;
|
||||
* - 1 level3 table;
|
||||
* - 4 level2 table that maps everything with 2M pages;
|
||||
*
|
||||
* The additional level5 table needed for 5-level paging is allocated from
|
||||
* trampoline_32bit memory.
|
||||
*/
|
||||
# ifdef CONFIG_X86_VERBOSE_BOOTUP
|
||||
# define BOOT_PGT_SIZE (19*4096)
|
||||
# else /* !CONFIG_X86_VERBOSE_BOOTUP */
|
||||
# define BOOT_PGT_SIZE (17*4096)
|
||||
# endif
|
||||
# else /* !CONFIG_RANDOMIZE_BASE */
|
||||
# define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE
|
||||
# endif
|
||||
# define BOOT_INIT_PGT_SIZE (6*4096)
|
||||
|
||||
/*
|
||||
* Total number of page tables kernel_add_identity_map() can allocate,
|
||||
* including page tables consumed by startup_32().
|
||||
*
|
||||
* Worst-case scenario:
|
||||
* - 5-level paging needs 1 level5 table;
|
||||
* - KASLR needs to map kernel, boot_params, cmdline and randomized kernel,
|
||||
* assuming all of them cross 256T boundary:
|
||||
* + 4*2 level4 table;
|
||||
* + 4*2 level3 table;
|
||||
* + 4*2 level2 table;
|
||||
* - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM):
|
||||
* + 1 level4 table;
|
||||
* + 1 level3 table;
|
||||
* + 1 level2 table;
|
||||
* Total: 28 tables
|
||||
*
|
||||
* Add 4 spare table in case decompressor touches anything beyond what is
|
||||
* accounted above. Warn if it happens.
|
||||
*/
|
||||
# define BOOT_PGT_SIZE_WARN (28*4096)
|
||||
# define BOOT_PGT_SIZE (32*4096)
|
||||
|
||||
#else /* !CONFIG_X86_64 */
|
||||
# define BOOT_STACK_SIZE 0x1000
|
||||
|
@ -91,19 +91,6 @@ static inline void efi_fpu_end(void)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1)
|
||||
|
||||
#define arch_efi_call_virt_setup() \
|
||||
({ \
|
||||
efi_fpu_begin(); \
|
||||
firmware_restrict_branch_speculation_start(); \
|
||||
})
|
||||
|
||||
#define arch_efi_call_virt_teardown() \
|
||||
({ \
|
||||
firmware_restrict_branch_speculation_end(); \
|
||||
efi_fpu_end(); \
|
||||
})
|
||||
|
||||
#else /* !CONFIG_X86_32 */
|
||||
#define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT
|
||||
|
||||
@ -116,14 +103,6 @@ extern bool efi_disable_ibt_for_runtime;
|
||||
__efi_call(__VA_ARGS__); \
|
||||
})
|
||||
|
||||
#define arch_efi_call_virt_setup() \
|
||||
({ \
|
||||
efi_sync_low_kernel_mappings(); \
|
||||
efi_fpu_begin(); \
|
||||
firmware_restrict_branch_speculation_start(); \
|
||||
efi_enter_mm(); \
|
||||
})
|
||||
|
||||
#undef arch_efi_call_virt
|
||||
#define arch_efi_call_virt(p, f, args...) ({ \
|
||||
u64 ret, ibt = ibt_save(efi_disable_ibt_for_runtime); \
|
||||
@ -132,13 +111,6 @@ extern bool efi_disable_ibt_for_runtime;
|
||||
ret; \
|
||||
})
|
||||
|
||||
#define arch_efi_call_virt_teardown() \
|
||||
({ \
|
||||
efi_leave_mm(); \
|
||||
firmware_restrict_branch_speculation_end(); \
|
||||
efi_fpu_end(); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
/*
|
||||
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
|
||||
@ -168,8 +140,8 @@ extern void efi_delete_dummy_variable(void);
|
||||
extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr);
|
||||
extern void efi_free_boot_services(void);
|
||||
|
||||
void efi_enter_mm(void);
|
||||
void efi_leave_mm(void);
|
||||
void arch_efi_call_virt_setup(void);
|
||||
void arch_efi_call_virt_teardown(void);
|
||||
|
||||
/* kexec external ABI */
|
||||
struct efi_setup_data {
|
||||
|
@ -1419,7 +1419,6 @@ struct kvm_arch {
|
||||
* the thread holds the MMU lock in write mode.
|
||||
*/
|
||||
spinlock_t tdp_mmu_pages_lock;
|
||||
struct workqueue_struct *tdp_mmu_zap_wq;
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
@ -1835,7 +1834,7 @@ void kvm_mmu_vendor_module_exit(void);
|
||||
|
||||
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
|
||||
int kvm_mmu_create(struct kvm_vcpu *vcpu);
|
||||
int kvm_mmu_init_vm(struct kvm *kvm);
|
||||
void kvm_mmu_init_vm(struct kvm *kvm);
|
||||
void kvm_mmu_uninit_vm(struct kvm *kvm);
|
||||
|
||||
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
|
||||
|
@ -8,6 +8,14 @@
|
||||
#undef notrace
|
||||
#define notrace __attribute__((no_instrument_function))
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/*
|
||||
* The generic version tends to create spurious ENDBR instructions under
|
||||
* certain conditions.
|
||||
*/
|
||||
#define _THIS_IP_ ({ unsigned long __here; asm ("lea 0(%%rip), %0" : "=r" (__here)); __here; })
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
|
||||
#endif /* CONFIG_X86_32 */
|
||||
@ -97,6 +105,13 @@
|
||||
CFI_POST_PADDING \
|
||||
SYM_FUNC_END(__cfi_##name)
|
||||
|
||||
/* UML needs to be able to override memcpy() and friends for KASAN. */
|
||||
#ifdef CONFIG_UML
|
||||
# define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS_WEAK
|
||||
#else
|
||||
# define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS
|
||||
#endif
|
||||
|
||||
/* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
|
||||
#define SYM_TYPED_FUNC_START(name) \
|
||||
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
|
||||
|
@ -186,8 +186,7 @@ do { \
|
||||
#else
|
||||
#define deactivate_mm(tsk, mm) \
|
||||
do { \
|
||||
if (!tsk->vfork_done) \
|
||||
shstk_free(tsk); \
|
||||
shstk_free(tsk); \
|
||||
load_gs_index(0); \
|
||||
loadsegment(fs, 0); \
|
||||
} while (0)
|
||||
|
@ -9,13 +9,6 @@ struct paravirt_patch_site {
|
||||
u8 type; /* type of this instruction */
|
||||
u8 len; /* length of original instruction */
|
||||
};
|
||||
|
||||
/* Lazy mode for batching updates / context switch */
|
||||
enum paravirt_lazy_mode {
|
||||
PARAVIRT_LAZY_NONE,
|
||||
PARAVIRT_LAZY_MMU,
|
||||
PARAVIRT_LAZY_CPU,
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
@ -549,14 +542,6 @@ int paravirt_disable_iospace(void);
|
||||
__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
|
||||
|
||||
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
|
||||
void paravirt_start_context_switch(struct task_struct *prev);
|
||||
void paravirt_end_context_switch(struct task_struct *next);
|
||||
|
||||
void paravirt_enter_lazy_mmu(void);
|
||||
void paravirt_leave_lazy_mmu(void);
|
||||
void paravirt_flush_lazy_mmu(void);
|
||||
|
||||
void _paravirt_nop(void);
|
||||
void paravirt_BUG(void);
|
||||
unsigned long paravirt_ret0(void);
|
||||
|
@ -683,13 +683,11 @@ extern u16 get_llc_id(unsigned int cpu);
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
extern u32 amd_get_nodes_per_socket(void);
|
||||
extern u32 amd_get_highest_perf(void);
|
||||
extern bool cpu_has_ibpb_brtype_microcode(void);
|
||||
extern void amd_clear_divider(void);
|
||||
extern void amd_check_microcode(void);
|
||||
#else
|
||||
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
|
||||
static inline u32 amd_get_highest_perf(void) { return 0; }
|
||||
static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
|
||||
static inline void amd_clear_divider(void) { }
|
||||
static inline void amd_check_microcode(void) { }
|
||||
#endif
|
||||
|
@ -36,6 +36,7 @@
|
||||
extern struct shared_info *HYPERVISOR_shared_info;
|
||||
extern struct start_info *xen_start_info;
|
||||
|
||||
#include <asm/bug.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#define XEN_SIGNATURE "XenVMMXenVMM"
|
||||
@ -63,4 +64,40 @@ void __init xen_pvh_init(struct boot_params *boot_params);
|
||||
void __init mem_map_via_hcall(struct boot_params *boot_params_p);
|
||||
#endif
|
||||
|
||||
/* Lazy mode for batching updates / context switch */
|
||||
enum xen_lazy_mode {
|
||||
XEN_LAZY_NONE,
|
||||
XEN_LAZY_MMU,
|
||||
XEN_LAZY_CPU,
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode);
|
||||
DECLARE_PER_CPU(unsigned int, xen_lazy_nesting);
|
||||
|
||||
static inline void enter_lazy(enum xen_lazy_mode mode)
|
||||
{
|
||||
enum xen_lazy_mode old_mode = this_cpu_read(xen_lazy_mode);
|
||||
|
||||
if (mode == old_mode) {
|
||||
this_cpu_inc(xen_lazy_nesting);
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(old_mode != XEN_LAZY_NONE);
|
||||
|
||||
this_cpu_write(xen_lazy_mode, mode);
|
||||
}
|
||||
|
||||
static inline void leave_lazy(enum xen_lazy_mode mode)
|
||||
{
|
||||
BUG_ON(this_cpu_read(xen_lazy_mode) != mode);
|
||||
|
||||
if (this_cpu_read(xen_lazy_nesting) == 0)
|
||||
this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE);
|
||||
else
|
||||
this_cpu_dec(xen_lazy_nesting);
|
||||
}
|
||||
|
||||
enum xen_lazy_mode xen_get_lazy_mode(void);
|
||||
|
||||
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
|
||||
|
@ -720,13 +720,8 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
|
||||
{
|
||||
s32 *s;
|
||||
|
||||
/*
|
||||
* Do not patch out the default return thunks if those needed are the
|
||||
* ones generated by the compiler.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK) &&
|
||||
(x86_return_thunk == __x86_return_thunk))
|
||||
return;
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
static_call_force_reinit();
|
||||
|
||||
for (s = start; s < end; s++) {
|
||||
void *dest = NULL, *addr = (void *)s + *s;
|
||||
|
@ -1533,7 +1533,7 @@ static void __init build_socket_tables(void)
|
||||
{
|
||||
struct uv_gam_range_entry *gre = uv_gre_table;
|
||||
int nums, numn, nump;
|
||||
int cpu, i, lnid;
|
||||
int i, lnid, apicid;
|
||||
int minsock = _min_socket;
|
||||
int maxsock = _max_socket;
|
||||
int minpnode = _min_pnode;
|
||||
@ -1584,15 +1584,14 @@ static void __init build_socket_tables(void)
|
||||
|
||||
/* Set socket -> node values: */
|
||||
lnid = NUMA_NO_NODE;
|
||||
for_each_possible_cpu(cpu) {
|
||||
int nid = cpu_to_node(cpu);
|
||||
int apicid, sockid;
|
||||
for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) {
|
||||
int nid = __apicid_to_node[apicid];
|
||||
int sockid;
|
||||
|
||||
if (lnid == nid)
|
||||
if ((nid == NUMA_NO_NODE) || (lnid == nid))
|
||||
continue;
|
||||
lnid = nid;
|
||||
|
||||
apicid = per_cpu(x86_cpu_to_apicid, cpu);
|
||||
sockid = apicid >> uv_cpuid.socketid_shift;
|
||||
|
||||
if (_socket_to_node[sockid - minsock] == SOCK_EMPTY)
|
||||
|
@ -272,7 +272,6 @@ void __init callthunks_patch_builtin_calls(void)
|
||||
pr_info("Setting up call depth tracking\n");
|
||||
mutex_lock(&text_mutex);
|
||||
callthunks_setup(&cs, &builtin_coretext);
|
||||
static_call_force_reinit();
|
||||
thunks_initialized = true;
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
@ -766,6 +766,15 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_TOPOEXT))
|
||||
smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
|
||||
if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
|
||||
else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
|
||||
setup_force_cpu_cap(X86_FEATURE_SBPB);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void init_amd_k8(struct cpuinfo_x86 *c)
|
||||
@ -1301,25 +1310,6 @@ void amd_check_microcode(void)
|
||||
on_each_cpu(zenbleed_check_cpu, NULL, 1);
|
||||
}
|
||||
|
||||
bool cpu_has_ibpb_brtype_microcode(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86) {
|
||||
/* Zen1/2 IBPB flushes branch type predictions too. */
|
||||
case 0x17:
|
||||
return boot_cpu_has(X86_FEATURE_AMD_IBPB);
|
||||
case 0x19:
|
||||
/* Poke the MSR bit on Zen3/4 to check its presence. */
|
||||
if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SBPB);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a DIV 0/1 insn to clear any division data from previous DIV
|
||||
* operations.
|
||||
|
@ -2404,26 +2404,15 @@ early_param("spec_rstack_overflow", srso_parse_cmdline);
|
||||
|
||||
static void __init srso_select_mitigation(void)
|
||||
{
|
||||
bool has_microcode;
|
||||
bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
|
||||
goto pred_cmd;
|
||||
|
||||
/*
|
||||
* The first check is for the kernel running as a guest in order
|
||||
* for guests to verify whether IBPB is a viable mitigation.
|
||||
*/
|
||||
has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
|
||||
if (!has_microcode) {
|
||||
pr_warn("IBPB-extending microcode not applied!\n");
|
||||
pr_warn(SRSO_NOTICE);
|
||||
} else {
|
||||
/*
|
||||
* Enable the synthetic (even if in a real CPUID leaf)
|
||||
* flags for guests.
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
|
||||
|
||||
/*
|
||||
* Zen1/2 with SMT off aren't vulnerable after the right
|
||||
* IBPB microcode has been applied.
|
||||
@ -2444,7 +2433,7 @@ static void __init srso_select_mitigation(void)
|
||||
|
||||
switch (srso_cmd) {
|
||||
case SRSO_CMD_OFF:
|
||||
return;
|
||||
goto pred_cmd;
|
||||
|
||||
case SRSO_CMD_MICROCODE:
|
||||
if (has_microcode) {
|
||||
@ -2717,7 +2706,7 @@ static ssize_t srso_show_state(char *buf)
|
||||
|
||||
return sysfs_emit(buf, "%s%s\n",
|
||||
srso_strings[srso_mitigation],
|
||||
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
|
||||
boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
|
||||
}
|
||||
|
||||
static ssize_t gds_show_state(char *buf)
|
||||
|
@ -143,66 +143,7 @@ int paravirt_disable_iospace(void)
|
||||
return request_resource(&ioport_resource, &reserve_ioports);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
|
||||
|
||||
static inline void enter_lazy(enum paravirt_lazy_mode mode)
|
||||
{
|
||||
BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
|
||||
|
||||
this_cpu_write(paravirt_lazy_mode, mode);
|
||||
}
|
||||
|
||||
static void leave_lazy(enum paravirt_lazy_mode mode)
|
||||
{
|
||||
BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
|
||||
|
||||
this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
|
||||
}
|
||||
|
||||
void paravirt_enter_lazy_mmu(void)
|
||||
{
|
||||
enter_lazy(PARAVIRT_LAZY_MMU);
|
||||
}
|
||||
|
||||
void paravirt_leave_lazy_mmu(void)
|
||||
{
|
||||
leave_lazy(PARAVIRT_LAZY_MMU);
|
||||
}
|
||||
|
||||
void paravirt_flush_lazy_mmu(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
void paravirt_start_context_switch(struct task_struct *prev)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
|
||||
if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
|
||||
}
|
||||
enter_lazy(PARAVIRT_LAZY_CPU);
|
||||
}
|
||||
|
||||
void paravirt_end_context_switch(struct task_struct *next)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
|
||||
leave_lazy(PARAVIRT_LAZY_CPU);
|
||||
|
||||
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
static noinstr void pv_native_write_cr2(unsigned long val)
|
||||
{
|
||||
native_write_cr2(val);
|
||||
@ -229,14 +170,6 @@ static noinstr void pv_native_safe_halt(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
||||
{
|
||||
if (in_interrupt())
|
||||
return PARAVIRT_LAZY_NONE;
|
||||
|
||||
return this_cpu_read(paravirt_lazy_mode);
|
||||
}
|
||||
|
||||
struct pv_info pv_info = {
|
||||
.name = "bare hardware",
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
|
@ -257,13 +257,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
|
||||
io_bitmap_share(p);
|
||||
|
||||
/*
|
||||
* If copy_thread() if failing, don't leak the shadow stack possibly
|
||||
* allocated in shstk_alloc_thread_stack() above.
|
||||
*/
|
||||
if (ret)
|
||||
shstk_free(p);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -358,15 +358,11 @@ static void __init add_early_ima_buffer(u64 phys_addr)
|
||||
#if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
|
||||
int __init ima_free_kexec_buffer(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!ima_kexec_buffer_size)
|
||||
return -ENOENT;
|
||||
|
||||
rc = memblock_phys_free(ima_kexec_buffer_phys,
|
||||
ima_kexec_buffer_size);
|
||||
if (rc)
|
||||
return rc;
|
||||
memblock_free_late(ima_kexec_buffer_phys,
|
||||
ima_kexec_buffer_size);
|
||||
|
||||
ima_kexec_buffer_phys = 0;
|
||||
ima_kexec_buffer_size = 0;
|
||||
|
@ -205,10 +205,21 @@ unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long cl
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For CLONE_VM, except vfork, the child needs a separate shadow
|
||||
* For CLONE_VFORK the child will share the parents shadow stack.
|
||||
* Make sure to clear the internal tracking of the thread shadow
|
||||
* stack so the freeing logic run for child knows to leave it alone.
|
||||
*/
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
shstk->base = 0;
|
||||
shstk->size = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For !CLONE_VM the child will use a copy of the parents shadow
|
||||
* stack.
|
||||
*/
|
||||
if ((clone_flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM)
|
||||
if (!(clone_flags & CLONE_VM))
|
||||
return 0;
|
||||
|
||||
size = adjust_shstk_size(stack_size);
|
||||
@ -408,7 +419,25 @@ void shstk_free(struct task_struct *tsk)
|
||||
if (!tsk->mm || tsk->mm != current->mm)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If shstk->base is NULL, then this task is not managing its
|
||||
* own shadow stack (CLONE_VFORK). So skip freeing it.
|
||||
*/
|
||||
if (!shstk->base)
|
||||
return;
|
||||
|
||||
/*
|
||||
* shstk->base is NULL for CLONE_VFORK child tasks, and so is
|
||||
* normal. But size = 0 on a shstk->base is not normal and
|
||||
* indicated an attempt to free the thread shadow stack twice.
|
||||
* Warn about it.
|
||||
*/
|
||||
if (WARN_ON(!shstk->size))
|
||||
return;
|
||||
|
||||
unmap_shadow_stack(shstk->base, shstk->size);
|
||||
|
||||
shstk->size = 0;
|
||||
}
|
||||
|
||||
static int wrss_control(bool enable)
|
||||
|
@ -579,7 +579,6 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
}
|
||||
|
||||
|
||||
#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_CLUSTER) || defined(CONFIG_SCHED_MC)
|
||||
static inline int x86_sched_itmt_flags(void)
|
||||
{
|
||||
return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
|
||||
@ -603,7 +602,14 @@ static int x86_cluster_flags(void)
|
||||
return cpu_cluster_flags() | x86_sched_itmt_flags();
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static int x86_die_flags(void)
|
||||
{
|
||||
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
|
||||
return x86_sched_itmt_flags();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set if a package/die has multiple NUMA nodes inside.
|
||||
@ -640,7 +646,7 @@ static void __init build_sched_topology(void)
|
||||
*/
|
||||
if (!x86_has_numa_in_package) {
|
||||
x86_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_cpu_mask, SD_INIT_NAME(DIE)
|
||||
cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(DIE)
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -6167,20 +6167,15 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
|
||||
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
|
||||
}
|
||||
|
||||
int kvm_mmu_init_vm(struct kvm *kvm)
|
||||
void kvm_mmu_init_vm(struct kvm *kvm)
|
||||
{
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
||||
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
|
||||
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
|
||||
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
|
||||
|
||||
if (tdp_mmu_enabled) {
|
||||
r = kvm_mmu_init_tdp_mmu(kvm);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
if (tdp_mmu_enabled)
|
||||
kvm_mmu_init_tdp_mmu(kvm);
|
||||
|
||||
kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
|
||||
kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
|
||||
@ -6189,8 +6184,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)
|
||||
|
||||
kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
|
||||
kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mmu_free_vm_memory_caches(struct kvm *kvm)
|
||||
@ -6246,7 +6239,6 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
|
||||
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
{
|
||||
bool flush;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(gfn_end <= gfn_start))
|
||||
return;
|
||||
@ -6257,11 +6249,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
|
||||
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
|
||||
|
||||
if (tdp_mmu_enabled) {
|
||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
|
||||
flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
|
||||
gfn_end, true, flush);
|
||||
}
|
||||
if (tdp_mmu_enabled)
|
||||
flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
|
||||
|
||||
if (flush)
|
||||
kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
|
||||
|
@ -58,7 +58,12 @@ struct kvm_mmu_page {
|
||||
|
||||
bool tdp_mmu_page;
|
||||
bool unsync;
|
||||
u8 mmu_valid_gen;
|
||||
union {
|
||||
u8 mmu_valid_gen;
|
||||
|
||||
/* Only accessed under slots_lock. */
|
||||
bool tdp_mmu_scheduled_root_to_zap;
|
||||
};
|
||||
|
||||
/*
|
||||
* The shadow page can't be replaced by an equivalent huge page
|
||||
@ -100,13 +105,7 @@ struct kvm_mmu_page {
|
||||
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
|
||||
tdp_ptep_t ptep;
|
||||
};
|
||||
union {
|
||||
DECLARE_BITMAP(unsync_child_bitmap, 512);
|
||||
struct {
|
||||
struct work_struct tdp_mmu_async_work;
|
||||
void *tdp_mmu_async_data;
|
||||
};
|
||||
};
|
||||
DECLARE_BITMAP(unsync_child_bitmap, 512);
|
||||
|
||||
/*
|
||||
* Tracks shadow pages that, if zapped, would allow KVM to create an NX
|
||||
|
@ -12,18 +12,10 @@
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
/* Initializes the TDP MMU for the VM, if enabled. */
|
||||
int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
|
||||
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
|
||||
{
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
|
||||
if (!wq)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
|
||||
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
|
||||
kvm->arch.tdp_mmu_zap_wq = wq;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Arbitrarily returns true so that this may be used in if statements. */
|
||||
@ -46,20 +38,15 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
|
||||
* ultimately frees all roots.
|
||||
*/
|
||||
kvm_tdp_mmu_invalidate_all_roots(kvm);
|
||||
|
||||
/*
|
||||
* Destroying a workqueue also first flushes the workqueue, i.e. no
|
||||
* need to invoke kvm_tdp_mmu_zap_invalidated_roots().
|
||||
*/
|
||||
destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
|
||||
kvm_tdp_mmu_zap_invalidated_roots(kvm);
|
||||
|
||||
WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
|
||||
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
|
||||
|
||||
/*
|
||||
* Ensure that all the outstanding RCU callbacks to free shadow pages
|
||||
* can run before the VM is torn down. Work items on tdp_mmu_zap_wq
|
||||
* can call kvm_tdp_mmu_put_root and create new callbacks.
|
||||
* can run before the VM is torn down. Putting the last reference to
|
||||
* zapped roots will create new callbacks.
|
||||
*/
|
||||
rcu_barrier();
|
||||
}
|
||||
@ -86,46 +73,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
|
||||
tdp_mmu_free_sp(sp);
|
||||
}
|
||||
|
||||
static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
bool shared);
|
||||
|
||||
static void tdp_mmu_zap_root_work(struct work_struct *work)
|
||||
{
|
||||
struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
|
||||
tdp_mmu_async_work);
|
||||
struct kvm *kvm = root->tdp_mmu_async_data;
|
||||
|
||||
read_lock(&kvm->mmu_lock);
|
||||
|
||||
/*
|
||||
* A TLB flush is not necessary as KVM performs a local TLB flush when
|
||||
* allocating a new root (see kvm_mmu_load()), and when migrating vCPU
|
||||
* to a different pCPU. Note, the local TLB flush on reuse also
|
||||
* invalidates any paging-structure-cache entries, i.e. TLB entries for
|
||||
* intermediate paging structures, that may be zapped, as such entries
|
||||
* are associated with the ASID on both VMX and SVM.
|
||||
*/
|
||||
tdp_mmu_zap_root(kvm, root, true);
|
||||
|
||||
/*
|
||||
* Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
|
||||
* avoiding an infinite loop. By design, the root is reachable while
|
||||
* it's being asynchronously zapped, thus a different task can put its
|
||||
* last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
|
||||
* asynchronously zapped root is unavoidable.
|
||||
*/
|
||||
kvm_tdp_mmu_put_root(kvm, root, true);
|
||||
|
||||
read_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
||||
{
|
||||
root->tdp_mmu_async_data = kvm;
|
||||
INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
|
||||
queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
|
||||
}
|
||||
|
||||
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
bool shared)
|
||||
{
|
||||
@ -211,8 +158,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
|
||||
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
|
||||
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
|
||||
|
||||
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
|
||||
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
|
||||
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
|
||||
for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
|
||||
_root; \
|
||||
_root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
|
||||
if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
|
||||
} else
|
||||
|
||||
/*
|
||||
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
|
||||
@ -292,7 +243,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
|
||||
* by a memslot update or by the destruction of the VM. Initialize the
|
||||
* refcount to two; one reference for the vCPU, and one reference for
|
||||
* the TDP MMU itself, which is held until the root is invalidated and
|
||||
* is ultimately put by tdp_mmu_zap_root_work().
|
||||
* is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
|
||||
*/
|
||||
refcount_set(&root->tdp_mmu_root_count, 2);
|
||||
|
||||
@ -877,13 +828,12 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
* true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
|
||||
* more SPTEs were zapped since the MMU lock was last acquired.
|
||||
*/
|
||||
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
|
||||
bool can_yield, bool flush)
|
||||
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
|
||||
{
|
||||
struct kvm_mmu_page *root;
|
||||
|
||||
for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
|
||||
flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
|
||||
for_each_tdp_mmu_root_yield_safe(kvm, root, false)
|
||||
flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
|
||||
|
||||
return flush;
|
||||
}
|
||||
@ -891,7 +841,6 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
|
||||
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_mmu_page *root;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Zap all roots, including invalid roots, as all SPTEs must be dropped
|
||||
@ -905,10 +854,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
|
||||
* is being destroyed or the userspace VMM has exited. In both cases,
|
||||
* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
|
||||
*/
|
||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
|
||||
for_each_tdp_mmu_root_yield_safe(kvm, root, i)
|
||||
tdp_mmu_zap_root(kvm, root, false);
|
||||
}
|
||||
for_each_tdp_mmu_root_yield_safe(kvm, root, false)
|
||||
tdp_mmu_zap_root(kvm, root, false);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -917,18 +864,47 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
|
||||
*/
|
||||
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
|
||||
{
|
||||
flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
|
||||
struct kvm_mmu_page *root;
|
||||
|
||||
read_lock(&kvm->mmu_lock);
|
||||
|
||||
for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
|
||||
if (!root->tdp_mmu_scheduled_root_to_zap)
|
||||
continue;
|
||||
|
||||
root->tdp_mmu_scheduled_root_to_zap = false;
|
||||
KVM_BUG_ON(!root->role.invalid, kvm);
|
||||
|
||||
/*
|
||||
* A TLB flush is not necessary as KVM performs a local TLB
|
||||
* flush when allocating a new root (see kvm_mmu_load()), and
|
||||
* when migrating a vCPU to a different pCPU. Note, the local
|
||||
* TLB flush on reuse also invalidates paging-structure-cache
|
||||
* entries, i.e. TLB entries for intermediate paging structures,
|
||||
* that may be zapped, as such entries are associated with the
|
||||
* ASID on both VMX and SVM.
|
||||
*/
|
||||
tdp_mmu_zap_root(kvm, root, true);
|
||||
|
||||
/*
|
||||
* The referenced needs to be put *after* zapping the root, as
|
||||
* the root must be reachable by mmu_notifiers while it's being
|
||||
* zapped
|
||||
*/
|
||||
kvm_tdp_mmu_put_root(kvm, root, true);
|
||||
}
|
||||
|
||||
read_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
|
||||
* is about to be zapped, e.g. in response to a memslots update. The actual
|
||||
* zapping is performed asynchronously. Using a separate workqueue makes it
|
||||
* easy to ensure that the destruction is performed before the "fast zap"
|
||||
* completes, without keeping a separate list of invalidated roots; the list is
|
||||
* effectively the list of work items in the workqueue.
|
||||
* zapping is done separately so that it happens with mmu_lock with read,
|
||||
* whereas invalidating roots must be done with mmu_lock held for write (unless
|
||||
* the VM is being destroyed).
|
||||
*
|
||||
* Note, the asynchronous worker is gifted the TDP MMU's reference.
|
||||
* Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
|
||||
* See kvm_tdp_mmu_get_vcpu_root_hpa().
|
||||
*/
|
||||
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
|
||||
@ -953,19 +929,20 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
|
||||
/*
|
||||
* As above, mmu_lock isn't held when destroying the VM! There can't
|
||||
* be other references to @kvm, i.e. nothing else can invalidate roots
|
||||
* or be consuming roots, but walking the list of roots does need to be
|
||||
* guarded against roots being deleted by the asynchronous zap worker.
|
||||
* or get/put references to roots.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
|
||||
list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
|
||||
/*
|
||||
* Note, invalid roots can outlive a memslot update! Invalid
|
||||
* roots must be *zapped* before the memslot update completes,
|
||||
* but a different task can acquire a reference and keep the
|
||||
* root alive after its been zapped.
|
||||
*/
|
||||
if (!root->role.invalid) {
|
||||
root->tdp_mmu_scheduled_root_to_zap = true;
|
||||
root->role.invalid = true;
|
||||
tdp_mmu_schedule_zap_root(kvm, root);
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1146,8 +1123,13 @@ retry:
|
||||
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
|
||||
bool flush)
|
||||
{
|
||||
return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
|
||||
range->end, range->may_block, flush);
|
||||
struct kvm_mmu_page *root;
|
||||
|
||||
__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
|
||||
flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
|
||||
range->may_block, flush);
|
||||
|
||||
return flush;
|
||||
}
|
||||
|
||||
typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
#include "spte.h"
|
||||
|
||||
int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
||||
void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
||||
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
||||
|
||||
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
||||
@ -20,8 +20,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
|
||||
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
bool shared);
|
||||
|
||||
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
|
||||
gfn_t end, bool can_yield, bool flush);
|
||||
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
|
||||
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
|
||||
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
|
||||
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
|
||||
|
@ -2962,6 +2962,32 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
||||
count, in);
|
||||
}
|
||||
|
||||
static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
|
||||
bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
|
||||
guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
|
||||
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
|
||||
}
|
||||
}
|
||||
|
||||
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
/* For sev guests, the memory encryption bit is not reserved in CR3. */
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
|
||||
if (best)
|
||||
vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
|
||||
|
||||
if (sev_es_guest(svm->vcpu.kvm))
|
||||
sev_es_vcpu_after_set_cpuid(svm);
|
||||
}
|
||||
|
||||
static void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
{
|
||||
struct vmcb *vmcb = svm->vmcb01.ptr;
|
||||
@ -3024,14 +3050,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) &&
|
||||
(guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP) ||
|
||||
guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDPID))) {
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, 1, 1);
|
||||
if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP))
|
||||
svm_clr_intercept(svm, INTERCEPT_RDTSCP);
|
||||
}
|
||||
}
|
||||
|
||||
void sev_init_vmcb(struct vcpu_svm *svm)
|
||||
|
@ -683,6 +683,21 @@ static int svm_hardware_enable(void)
|
||||
|
||||
amd_pmu_enable_virt();
|
||||
|
||||
/*
|
||||
* If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type
|
||||
* "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests.
|
||||
* Since Linux does not change the value of TSC_AUX once set, prime the
|
||||
* TSC_AUX field now to avoid a RDMSR on every vCPU run.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
|
||||
struct sev_es_save_area *hostsa;
|
||||
u32 msr_hi;
|
||||
|
||||
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
|
||||
|
||||
rdmsr(MSR_TSC_AUX, hostsa->tsc_aux, msr_hi);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1532,7 +1547,14 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
if (tsc_scaling)
|
||||
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
||||
|
||||
if (likely(tsc_aux_uret_slot >= 0))
|
||||
/*
|
||||
* TSC_AUX is always virtualized for SEV-ES guests when the feature is
|
||||
* available. The user return MSR support is not required in this case
|
||||
* because TSC_AUX is restored on #VMEXIT from the host save area
|
||||
* (which has been initialized in svm_hardware_enable()).
|
||||
*/
|
||||
if (likely(tsc_aux_uret_slot >= 0) &&
|
||||
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
|
||||
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
|
||||
|
||||
svm->guest_state_loaded = true;
|
||||
@ -3086,6 +3108,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
/*
|
||||
* TSC_AUX is always virtualized for SEV-ES guests when the
|
||||
* feature is available. The user return MSR support is not
|
||||
* required in this case because TSC_AUX is restored on #VMEXIT
|
||||
* from the host save area (which has been initialized in
|
||||
* svm_hardware_enable()).
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
|
||||
break;
|
||||
|
||||
/*
|
||||
* TSC_AUX is usually changed only during boot and never read
|
||||
* directly. Intercept TSC_AUX instead of exposing it to the
|
||||
@ -4284,7 +4316,6 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
|
||||
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
/*
|
||||
* SVM doesn't provide a way to disable just XSAVES in the guest, KVM
|
||||
@ -4328,12 +4359,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
|
||||
!!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
|
||||
|
||||
/* For sev guests, the memory encryption bit is not reserved in CR3. */
|
||||
if (sev_guest(vcpu->kvm)) {
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
|
||||
if (best)
|
||||
vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
|
||||
}
|
||||
if (sev_guest(vcpu->kvm))
|
||||
sev_vcpu_after_set_cpuid(svm);
|
||||
|
||||
init_vmcb_after_set_cpuid(vcpu);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user