arm64: initialize per-cpu offsets earlier
The current initialization of the per-cpu offset register is difficult to follow and this initialization is not always early enough for upcoming instrumentation with KCSAN, where the instrumentation callbacks use the per-cpu offset. To make it possible to support KCSAN, and to simplify reasoning about early bringup code, let's initialize the per-cpu offset earlier, before we run any C code that may consume it. To do so, this patch adds a new init_this_cpu_offset() helper that's called before the usual primary/secondary start functions. For consistency, this is also used to re-initialize the per-cpu offset after the runtime per-cpu areas have been allocated (which can change CPU0's offset). So that init_this_cpu_offset() isn't subject to any instrumentation that might consume the per-cpu offset, it is marked with noinstr, preventing instrumentation. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201005164303.21389-1-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
4dafc08d0b
commit
353e228eb3
@ -68,4 +68,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info);
|
|||||||
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
|
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
|
||||||
struct cpuinfo_arm64 *boot);
|
struct cpuinfo_arm64 *boot);
|
||||||
|
|
||||||
|
void init_this_cpu_offset(void);
|
||||||
|
|
||||||
#endif /* __ASM_CPU_H */
|
#endif /* __ASM_CPU_H */
|
||||||
|
@ -452,6 +452,8 @@ SYM_FUNC_START_LOCAL(__primary_switched)
|
|||||||
bl __pi_memset
|
bl __pi_memset
|
||||||
dsb ishst // Make zero page visible to PTW
|
dsb ishst // Make zero page visible to PTW
|
||||||
|
|
||||||
|
bl init_this_cpu_offset
|
||||||
|
|
||||||
#ifdef CONFIG_KASAN
|
#ifdef CONFIG_KASAN
|
||||||
bl kasan_early_init
|
bl kasan_early_init
|
||||||
#endif
|
#endif
|
||||||
@ -758,6 +760,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
|
|||||||
ptrauth_keys_init_cpu x2, x3, x4, x5
|
ptrauth_keys_init_cpu x2, x3, x4, x5
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
bl init_this_cpu_offset
|
||||||
b secondary_start_kernel
|
b secondary_start_kernel
|
||||||
SYM_FUNC_END(__secondary_switched)
|
SYM_FUNC_END(__secondary_switched)
|
||||||
|
|
||||||
|
@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
|
|||||||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||||||
set_cpu_logical_map(0, mpidr);
|
set_cpu_logical_map(0, mpidr);
|
||||||
|
|
||||||
/*
|
|
||||||
* clear __my_cpu_offset on boot CPU to avoid hang caused by
|
|
||||||
* using percpu variable early, for example, lockdep will
|
|
||||||
* access percpu variable inside lock_release
|
|
||||||
*/
|
|
||||||
set_my_cpu_offset(0);
|
|
||||||
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
|
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
|
||||||
(unsigned long)mpidr, read_cpuid_id());
|
(unsigned long)mpidr, read_cpuid_id());
|
||||||
}
|
}
|
||||||
@ -282,6 +276,12 @@ u64 cpu_logical_map(int cpu)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpu_logical_map);
|
EXPORT_SYMBOL_GPL(cpu_logical_map);
|
||||||
|
|
||||||
|
void noinstr init_this_cpu_offset(void)
|
||||||
|
{
|
||||||
|
unsigned int cpu = task_cpu(current);
|
||||||
|
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||||
|
}
|
||||||
|
|
||||||
void __init __no_sanitize_address setup_arch(char **cmdline_p)
|
void __init __no_sanitize_address setup_arch(char **cmdline_p)
|
||||||
{
|
{
|
||||||
init_mm.start_code = (unsigned long) _text;
|
init_mm.start_code = (unsigned long) _text;
|
||||||
|
@ -192,10 +192,7 @@ asmlinkage notrace void secondary_start_kernel(void)
|
|||||||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||||||
struct mm_struct *mm = &init_mm;
|
struct mm_struct *mm = &init_mm;
|
||||||
const struct cpu_operations *ops;
|
const struct cpu_operations *ops;
|
||||||
unsigned int cpu;
|
unsigned int cpu = smp_processor_id();
|
||||||
|
|
||||||
cpu = task_cpu(current);
|
|
||||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All kernel threads share the same mm context; grab a
|
* All kernel threads share the same mm context; grab a
|
||||||
@ -435,7 +432,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||||||
|
|
||||||
void __init smp_prepare_boot_cpu(void)
|
void __init smp_prepare_boot_cpu(void)
|
||||||
{
|
{
|
||||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
/*
|
||||||
|
* Now that setup_per_cpu_areas() has allocated the runtime per-cpu
|
||||||
|
* areas it is only safe to read the CPU0 boot-time area, and we must
|
||||||
|
* reinitialize the offset to point to the runtime area.
|
||||||
|
*/
|
||||||
|
init_this_cpu_offset();
|
||||||
|
|
||||||
cpuinfo_store_boot_cpu();
|
cpuinfo_store_boot_cpu();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user