Merge branch 'for-next/misc' into for-next/core
* for-next/misc: arm64: mm: Drop 'const' from conditional arm64_dma_phys_limit definition arm64: clean up tools Makefile arm64: drop unused includes of <linux/personality.h> arm64: Do not defer reserve_crashkernel() for platforms with no DMA memory zones arm64: prevent instrumentation of bp hardening callbacks arm64: cpufeature: Remove cpu_has_fwb() check arm64: atomics: remove redundant static branch arm64: entry: Save some nops when CONFIG_ARM64_PSEUDO_NMI is not set
This commit is contained in:
commit
b3ea0eafa9
arch/arm64
include/asm
kernel
mm
tools
@ -17,12 +17,10 @@
|
||||
#include <asm/cpucaps.h>
|
||||
|
||||
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
|
||||
extern struct static_key_false arm64_const_caps_ready;
|
||||
|
||||
static inline bool system_uses_lse_atomics(void)
|
||||
static __always_inline bool system_uses_lse_atomics(void)
|
||||
{
|
||||
return (static_branch_likely(&arm64_const_caps_ready)) &&
|
||||
static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]);
|
||||
return static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]);
|
||||
}
|
||||
|
||||
#define __lse_ll_sc_body(op, ...) \
|
||||
|
@ -67,7 +67,8 @@ struct bp_hardening_data {
|
||||
|
||||
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
||||
|
||||
static inline void arm64_apply_bp_hardening(void)
|
||||
/* Called during entry so must be __always_inline */
|
||||
static __always_inline void arm64_apply_bp_hardening(void)
|
||||
{
|
||||
struct bp_hardening_data *d;
|
||||
|
||||
|
@ -1775,14 +1775,6 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
|
||||
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
|
||||
}
|
||||
|
||||
static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
u64 val = read_sysreg_s(SYS_CLIDR_EL1);
|
||||
|
||||
/* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
|
||||
WARN_ON(CLIDR_LOUU(val) || CLIDR_LOUIS(val));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_PAN
|
||||
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
@ -2144,7 +2136,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.field_pos = ID_AA64MMFR2_FWB_SHIFT,
|
||||
.min_field_value = 1,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = cpu_has_fwb,
|
||||
},
|
||||
{
|
||||
.desc = "ARMv8.4 Translation Table Level",
|
||||
|
@ -300,6 +300,7 @@ alternative_else_nop_endif
|
||||
str w21, [sp, #S_SYSCALLNO]
|
||||
.endif
|
||||
|
||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||
/* Save pmr */
|
||||
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||
mrs_s x20, SYS_ICC_PMR_EL1
|
||||
@ -307,6 +308,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||
mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
|
||||
msr_s SYS_ICC_PMR_EL1, x20
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
|
||||
/* Re-enable tag checking (TCO set on exception entry) */
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
@ -330,6 +332,7 @@ alternative_else_nop_endif
|
||||
disable_daif
|
||||
.endif
|
||||
|
||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||
/* Restore pmr */
|
||||
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||
ldr x20, [sp, #S_PMR_SAVE]
|
||||
@ -339,6 +342,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||
dsb sy // Ensure priority change is seen by redistributor
|
||||
.L__skip_pmr_sync\@:
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
|
||||
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
|
||||
|
||||
|
@ -193,17 +193,20 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
|
||||
__this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
|
||||
}
|
||||
|
||||
static void call_smc_arch_workaround_1(void)
|
||||
/* Called during entry so must be noinstr */
|
||||
static noinstr void call_smc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void call_hvc_arch_workaround_1(void)
|
||||
/* Called during entry so must be noinstr */
|
||||
static noinstr void call_hvc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void qcom_link_stack_sanitisation(void)
|
||||
/* Called during entry so must be noinstr */
|
||||
static noinstr void qcom_link_stack_sanitisation(void)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -61,8 +61,34 @@ EXPORT_SYMBOL(memstart_addr);
|
||||
* unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
|
||||
* In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
|
||||
* otherwise it is empty.
|
||||
*
|
||||
* Memory reservation for crash kernel either done early or deferred
|
||||
* depending on DMA memory zones configs (ZONE_DMA) --
|
||||
*
|
||||
* In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
|
||||
* here instead of max_zone_phys(). This lets early reservation of
|
||||
* crash kernel memory which has a dependency on arm64_dma_phys_limit.
|
||||
* Reserving memory early for crash kernel allows linear creation of block
|
||||
* mappings (greater than page-granularity) for all the memory bank rangs.
|
||||
* In this scheme a comparatively quicker boot is observed.
|
||||
*
|
||||
* If ZONE_DMA configs are defined, crash kernel memory reservation
|
||||
* is delayed until DMA zone memory range size initilazation performed in
|
||||
* zone_sizes_init(). The defer is necessary to steer clear of DMA zone
|
||||
* memory range to avoid overlap allocation. So crash kernel memory boundaries
|
||||
* are not known when mapping all bank memory ranges, which otherwise means
|
||||
* not possible to exclude crash kernel range from creating block mappings
|
||||
* so page-granularity mappings are created for the entire memory range.
|
||||
* Hence a slightly slower boot is observed.
|
||||
*
|
||||
* Note: Page-granularity mapppings are necessary for crash kernel memory
|
||||
* range for shrinking its size via /sys/kernel/kexec_crash_size interface.
|
||||
*/
|
||||
phys_addr_t arm64_dma_phys_limit __ro_after_init;
|
||||
#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
|
||||
phys_addr_t __ro_after_init arm64_dma_phys_limit;
|
||||
#else
|
||||
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
/*
|
||||
@ -153,8 +179,6 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
||||
if (!arm64_dma_phys_limit)
|
||||
arm64_dma_phys_limit = dma32_phys_limit;
|
||||
#endif
|
||||
if (!arm64_dma_phys_limit)
|
||||
arm64_dma_phys_limit = PHYS_MASK + 1;
|
||||
max_zone_pfns[ZONE_NORMAL] = max;
|
||||
|
||||
free_area_init(max_zone_pfns);
|
||||
@ -315,6 +339,9 @@ void __init arm64_memblock_init(void)
|
||||
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
reserve_crashkernel();
|
||||
|
||||
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
||||
}
|
||||
|
||||
@ -361,7 +388,8 @@ void __init bootmem_init(void)
|
||||
* request_standard_resources() depends on crashkernel's memory being
|
||||
* reserved, so do it here.
|
||||
*/
|
||||
reserve_crashkernel();
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
reserve_crashkernel();
|
||||
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
@ -517,7 +517,7 @@ static void __init map_mem(pgd_t *pgdp)
|
||||
*/
|
||||
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
|
||||
|
||||
if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE))
|
||||
if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
|
||||
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
|
||||
|
||||
/*
|
||||
@ -528,6 +528,17 @@ static void __init map_mem(pgd_t *pgdp)
|
||||
*/
|
||||
memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
if (crash_mem_map) {
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) ||
|
||||
IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
|
||||
else if (crashk_res.end)
|
||||
memblock_mark_nomap(crashk_res.start,
|
||||
resource_size(&crashk_res));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* map all the memory banks */
|
||||
for_each_mem_range(i, &start, &end) {
|
||||
if (start >= end)
|
||||
@ -554,6 +565,25 @@ static void __init map_mem(pgd_t *pgdp)
|
||||
__map_memblock(pgdp, kernel_start, kernel_end,
|
||||
PAGE_KERNEL, NO_CONT_MAPPINGS);
|
||||
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
|
||||
|
||||
/*
|
||||
* Use page-level mappings here so that we can shrink the region
|
||||
* in page granularity and put back unused memory to buddy system
|
||||
* through /sys/kernel/kexec_crash_size interface.
|
||||
*/
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
if (crash_mem_map &&
|
||||
!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) {
|
||||
if (crashk_res.end) {
|
||||
__map_memblock(pgdp, crashk_res.start,
|
||||
crashk_res.end + 1,
|
||||
PAGE_KERNEL,
|
||||
NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
|
||||
memblock_clear_nomap(crashk_res.start,
|
||||
resource_size(&crashk_res));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void mark_rodata_ro(void)
|
||||
|
@ -5,18 +5,14 @@ kapi := $(gen)/asm
|
||||
|
||||
kapi-hdrs-y := $(kapi)/cpucaps.h
|
||||
|
||||
targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y))
|
||||
targets += $(addprefix ../../../, $(kapi-hdrs-y))
|
||||
|
||||
PHONY += kapi
|
||||
|
||||
kapi: $(kapi-hdrs-y) $(gen-y)
|
||||
|
||||
# Create output directory if not already present
|
||||
_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
|
||||
kapi: $(kapi-hdrs-y)
|
||||
|
||||
quiet_cmd_gen_cpucaps = GEN $@
|
||||
cmd_gen_cpucaps = mkdir -p $(dir $@) && \
|
||||
$(AWK) -f $(filter-out $(PHONY),$^) > $@
|
||||
cmd_gen_cpucaps = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
|
||||
|
||||
$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
|
||||
$(call if_changed,gen_cpucaps)
|
||||
|
Loading…
Reference in New Issue
Block a user