forked from Minki/linux
Merge branch 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull cpu hotplug fixes from Thomas Gleixner: "Two fixes for the cpu hotplug machinery: - Replace the overly clever 'SMT disabled by BIOS' detection logic as it breaks KVM scenarios and prevents speculation control updates when the Hyperthreads are brought online late after boot. - Remove a redundant invocation of the speculation control update function" * 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: cpu/hotplug: Fix "SMT disabled by BIOS" detection for KVM x86/speculation: Remove redundant arch_smt_update() invocation
This commit is contained in:
commit
cc6810e36b
@ -71,7 +71,7 @@ void __init check_bugs(void)
|
||||
* identify_boot_cpu() initialized SMT support information, let the
|
||||
* core code know.
|
||||
*/
|
||||
cpu_smt_check_topology_early();
|
||||
cpu_smt_check_topology();
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SMP)) {
|
||||
pr_info("CPU: ");
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/smt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tboot.h>
|
||||
#include <linux/trace_events.h>
|
||||
@ -6823,7 +6824,7 @@ static int vmx_vm_init(struct kvm *kvm)
|
||||
* Warn upon starting the first VM in a potentially
|
||||
* insecure environment.
|
||||
*/
|
||||
if (cpu_smt_control == CPU_SMT_ENABLED)
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(L1TF_MSG_SMT);
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
|
||||
pr_warn_once(L1TF_MSG_L1D);
|
||||
|
@ -180,12 +180,10 @@ enum cpuhp_smt_control {
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
|
||||
extern enum cpuhp_smt_control cpu_smt_control;
|
||||
extern void cpu_smt_disable(bool force);
|
||||
extern void cpu_smt_check_topology_early(void);
|
||||
extern void cpu_smt_check_topology(void);
|
||||
#else
|
||||
# define cpu_smt_control (CPU_SMT_ENABLED)
|
||||
static inline void cpu_smt_disable(bool force) { }
|
||||
static inline void cpu_smt_check_topology_early(void) { }
|
||||
static inline void cpu_smt_check_topology(void) { }
|
||||
#endif
|
||||
|
||||
|
38
kernel/cpu.c
38
kernel/cpu.c
@ -376,9 +376,6 @@ void __weak arch_smt_update(void) { }
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_SMT
|
||||
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
|
||||
EXPORT_SYMBOL_GPL(cpu_smt_control);
|
||||
|
||||
static bool cpu_smt_available __read_mostly;
|
||||
|
||||
void __init cpu_smt_disable(bool force)
|
||||
{
|
||||
@ -397,25 +394,11 @@ void __init cpu_smt_disable(bool force)
|
||||
|
||||
/*
|
||||
* The decision whether SMT is supported can only be done after the full
|
||||
* CPU identification. Called from architecture code before non boot CPUs
|
||||
* are brought up.
|
||||
*/
|
||||
void __init cpu_smt_check_topology_early(void)
|
||||
{
|
||||
if (!topology_smt_supported())
|
||||
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If SMT was disabled by BIOS, detect it here, after the CPUs have been
|
||||
* brought online. This ensures the smt/l1tf sysfs entries are consistent
|
||||
* with reality. cpu_smt_available is set to true during the bringup of non
|
||||
* boot CPUs when a SMT sibling is detected. Note, this may overwrite
|
||||
* cpu_smt_control's previous setting.
|
||||
* CPU identification. Called from architecture code.
|
||||
*/
|
||||
void __init cpu_smt_check_topology(void)
|
||||
{
|
||||
if (!cpu_smt_available)
|
||||
if (!topology_smt_supported())
|
||||
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
@ -428,18 +411,10 @@ early_param("nosmt", smt_cmdline_disable);
|
||||
|
||||
static inline bool cpu_smt_allowed(unsigned int cpu)
|
||||
{
|
||||
if (topology_is_primary_thread(cpu))
|
||||
if (cpu_smt_control == CPU_SMT_ENABLED)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If the CPU is not a 'primary' thread and the booted_once bit is
|
||||
* set then the processor has SMT support. Store this information
|
||||
* for the late check of SMT support in cpu_smt_check_topology().
|
||||
*/
|
||||
if (per_cpu(cpuhp_state, cpu).booted_once)
|
||||
cpu_smt_available = true;
|
||||
|
||||
if (cpu_smt_control == CPU_SMT_ENABLED)
|
||||
if (topology_is_primary_thread(cpu))
|
||||
return true;
|
||||
|
||||
/*
|
||||
@ -2090,10 +2065,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
*/
|
||||
cpuhp_offline_cpu_device(cpu);
|
||||
}
|
||||
if (!ret) {
|
||||
if (!ret)
|
||||
cpu_smt_control = ctrlval;
|
||||
arch_smt_update();
|
||||
}
|
||||
cpu_maps_update_done();
|
||||
return ret;
|
||||
}
|
||||
@ -2104,7 +2077,6 @@ static int cpuhp_smt_enable(void)
|
||||
|
||||
cpu_maps_update_begin();
|
||||
cpu_smt_control = CPU_SMT_ENABLED;
|
||||
arch_smt_update();
|
||||
for_each_present_cpu(cpu) {
|
||||
/* Skip online CPUs and CPUs on offline nodes */
|
||||
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
|
||||
|
@ -5980,6 +5980,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
|
||||
EXPORT_SYMBOL_GPL(sched_smt_present);
|
||||
|
||||
static inline void set_idle_cores(int cpu, int val)
|
||||
{
|
||||
|
@ -584,8 +584,6 @@ void __init smp_init(void)
|
||||
num_nodes, (num_nodes > 1 ? "s" : ""),
|
||||
num_cpus, (num_cpus > 1 ? "s" : ""));
|
||||
|
||||
/* Final decision about SMT support */
|
||||
cpu_smt_check_topology();
|
||||
/* Any cleanup work */
|
||||
smp_cpus_done(setup_max_cpus);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user