mirror of
https://github.com/torvalds/linux.git
synced 2024-12-17 00:21:32 +00:00
ae03900105
On PVH, PVHVM, at failure in the VCPUOP_register_vcpu_info hypercall we limit the number of cpus to to MAX_VIRT_CPUS. However, if this failure had occurred for a cpu beyond MAX_VIRT_CPUS, we continue to function with > MAX_VIRT_CPUS. This leads to problems at the next save/restore cycle when there are > MAX_VIRT_CPUS threads going into stop_machine() but coming back up there's valid state for only the first MAX_VIRT_CPUS. This patch pulls the excess CPUs down via cpu_down(). Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com> Signed-off-by: Juergen Gross <jgross@suse.com>
76 lines
1.7 KiB
C
76 lines
1.7 KiB
C
#include <asm/smp.h>
|
|
|
|
#include <xen/events.h>
|
|
|
|
#include "xen-ops.h"
|
|
#include "smp.h"
|
|
|
|
|
|
static void __init xen_hvm_smp_prepare_boot_cpu(void)
|
|
{
|
|
BUG_ON(smp_processor_id() != 0);
|
|
native_smp_prepare_boot_cpu();
|
|
|
|
/*
|
|
* Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
|
|
* in xen_cpu_up_prepare_hvm().
|
|
*/
|
|
xen_vcpu_setup(0);
|
|
|
|
/*
|
|
* The alternative logic (which patches the unlock/lock) runs before
|
|
* the smp bootup up code is activated. Hence we need to set this up
|
|
* the core kernel is being patched. Otherwise we will have only
|
|
* modules patched but not core code.
|
|
*/
|
|
xen_init_spinlocks();
|
|
}
|
|
|
|
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
|
|
{
|
|
int cpu;
|
|
|
|
native_smp_prepare_cpus(max_cpus);
|
|
WARN_ON(xen_smp_intr_init(0));
|
|
|
|
xen_init_lock_cpu(0);
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
if (cpu == 0)
|
|
continue;
|
|
|
|
/* Set default vcpu_id to make sure that we don't use cpu-0's */
|
|
per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static void xen_hvm_cpu_die(unsigned int cpu)
|
|
{
|
|
if (common_cpu_die(cpu) == 0) {
|
|
xen_smp_intr_free(cpu);
|
|
xen_uninit_lock_cpu(cpu);
|
|
xen_teardown_timer(cpu);
|
|
}
|
|
}
|
|
#else
|
|
static void xen_hvm_cpu_die(unsigned int cpu)
|
|
{
|
|
BUG();
|
|
}
|
|
#endif
|
|
|
|
void __init xen_hvm_smp_init(void)
|
|
{
|
|
if (!xen_have_vector_callback)
|
|
return;
|
|
|
|
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
|
|
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
|
|
smp_ops.cpu_die = xen_hvm_cpu_die;
|
|
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
|
|
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
|
|
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
|
|
smp_ops.smp_cpus_done = xen_smp_cpus_done;
|
|
}
|