Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"There's a reasonable amount here and the juicy details are all below.
It's worth noting that the MTE/KASAN changes strayed outside of our
usual directories due to core mm changes and some associated changes
to some other architectures; Andrew asked for us to carry these [1]
rather that take them via the -mm tree.
Summary:
- Optimise SVE switching for CPUs with 128-bit implementations.
- Fix output format from SVE selftest.
- Add support for versions v1.2 and 1.3 of the SMC calling
convention.
- Allow Pointer Authentication to be configured independently for
kernel and userspace.
- PMU driver cleanups for managing IRQ affinity and exposing event
attributes via sysfs.
- KASAN optimisations for both hardware tagging (MTE) and out-of-line
software tagging implementations.
- Relax frame record alignment requirements to facilitate 8-byte
alignment with KASAN and Clang.
- Cleanup of page-table definitions and removal of unused memory
types.
- Reduction of ARCH_DMA_MINALIGN back to 64 bytes.
- Refactoring of our instruction decoding routines and addition of
some missing encodings.
- Move entry code moved into C and hardened against harmful compiler
instrumentation.
- Update booting requirements for the FEAT_HCX feature, added to v8.7
of the architecture.
- Fix resume from idle when pNMI is being used.
- Additional CPU sanity checks for MTE and preparatory changes for
systems where not all of the CPUs support 32-bit EL0.
- Update our kernel string routines to the latest Cortex Strings
implementation.
- Big cleanup of our cache maintenance routines, which were
confusingly named and inconsistent in their implementations.
- Tweak linker flags so that GDB can understand vmlinux when using
RELR relocations.
- Boot path cleanups to enable early initialisation of per-cpu
operations needed by KCSAN.
- Non-critical fixes and miscellaneous cleanup"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (150 commits)
arm64: tlb: fix the TTL value of tlb_get_level
arm64: Restrict undef hook for cpufeature registers
arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS
arm64: insn: avoid circular include dependency
arm64: smp: Bump debugging information print down to KERN_DEBUG
drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe()
perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number
arm64: suspend: Use cpuidle context helpers in cpu_suspend()
PSCI: Use cpuidle context helpers in psci_cpu_suspend_enter()
arm64: Convert cpu_do_idle() to using cpuidle context helpers
arm64: Add cpuidle context save/restore helpers
arm64: head: fix code comments in set_cpu_boot_mode_flag
arm64: mm: drop unused __pa(__idmap_text_start)
arm64: mm: fix the count comments in compute_indices
arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan
arm64: mm: Pass original fault address to handle_mm_fault()
arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK]
arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT
arm64/mm: Drop SWAPPER_INIT_MAP_SIZE
arm64: Conditionally configure PTR_AUTH key of the kernel.
...
This commit is contained in:
@@ -120,9 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
* page tables.
|
||||
*/
|
||||
secondary_data.task = idle;
|
||||
secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
|
||||
update_cpu_boot_status(CPU_MMU_OFF);
|
||||
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
||||
|
||||
/* Now bring the CPU into our world */
|
||||
ret = boot_secondary(cpu, idle);
|
||||
@@ -142,8 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
pr_crit("CPU%u: failed to come online\n", cpu);
|
||||
secondary_data.task = NULL;
|
||||
secondary_data.stack = NULL;
|
||||
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
||||
status = READ_ONCE(secondary_data.status);
|
||||
if (status == CPU_MMU_OFF)
|
||||
status = READ_ONCE(__early_cpu_boot_status);
|
||||
@@ -202,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
|
||||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||||
struct mm_struct *mm = &init_mm;
|
||||
const struct cpu_operations *ops;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = task_cpu(current);
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* All kernel threads share the same mm context; grab a
|
||||
@@ -351,7 +344,7 @@ void __cpu_die(unsigned int cpu)
|
||||
pr_crit("CPU%u: cpu didn't die\n", cpu);
|
||||
return;
|
||||
}
|
||||
pr_notice("CPU%u: shutdown\n", cpu);
|
||||
pr_debug("CPU%u: shutdown\n", cpu);
|
||||
|
||||
/*
|
||||
* Now that the dying CPU is beyond the point of no return w.r.t.
|
||||
@@ -451,6 +444,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
/*
|
||||
* The runtime per-cpu areas have been allocated by
|
||||
* setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
|
||||
* freed shortly, so we must move over to the runtime per-cpu area.
|
||||
*/
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
cpuinfo_store_boot_cpu();
|
||||
|
||||
|
||||
Reference in New Issue
Block a user