linux/arch/x86/include/asm/cpumask.h
Thomas Gleixner d4f28f07c2 x86/smpboot: Move synchronization masks to SMP boot code
The usage is in smpboot.c and not in the CPU initialization code.

The XEN_PV usage of cpu_callout_mask is obsolete as cpu_init() not longer
waits and cacheinfo has its own CPU mask now, so cpu_callout_mask can be
made static too.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck
Link: https://lore.kernel.org/r/20230512205256.091511483@linutronix.de
2023-05-15 13:44:52 +02:00

39 lines
903 B
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
extern void setup_cpu_local_masks(void);
/*
* NMI and MCE exceptions need cpu_is_offline() _really_ early,
* provide an arch_ special for them to avoid instrumentation.
*/
#if NR_CPUS > 1
static __always_inline bool arch_cpu_online(int cpu)
{
return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
}
static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
arch_clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
#else
static __always_inline bool arch_cpu_online(int cpu)
{
return cpu == 0;
}
static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
return;
}
#endif
#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu))
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */