Merge commit 'upstream-x86-virt' into WIP.x86/mm
Merge a minimal set of virt cleanups, for a base for the MM isolation patches. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
e5d77a73f3
@ -113,7 +113,7 @@ void hyperv_init(void)
|
|||||||
u64 guest_id;
|
u64 guest_id;
|
||||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||||
|
|
||||||
if (x86_hyper != &x86_hyper_ms_hyperv)
|
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Allocate percpu VP index */
|
/* Allocate percpu VP index */
|
||||||
|
@ -23,11 +23,22 @@
|
|||||||
#ifdef CONFIG_HYPERVISOR_GUEST
|
#ifdef CONFIG_HYPERVISOR_GUEST
|
||||||
|
|
||||||
#include <asm/kvm_para.h>
|
#include <asm/kvm_para.h>
|
||||||
|
#include <asm/x86_init.h>
|
||||||
#include <asm/xen/hypervisor.h>
|
#include <asm/xen/hypervisor.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* x86 hypervisor information
|
* x86 hypervisor information
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
enum x86_hypervisor_type {
|
||||||
|
X86_HYPER_NATIVE = 0,
|
||||||
|
X86_HYPER_VMWARE,
|
||||||
|
X86_HYPER_MS_HYPERV,
|
||||||
|
X86_HYPER_XEN_PV,
|
||||||
|
X86_HYPER_XEN_HVM,
|
||||||
|
X86_HYPER_KVM,
|
||||||
|
};
|
||||||
|
|
||||||
struct hypervisor_x86 {
|
struct hypervisor_x86 {
|
||||||
/* Hypervisor name */
|
/* Hypervisor name */
|
||||||
const char *name;
|
const char *name;
|
||||||
@ -35,40 +46,19 @@ struct hypervisor_x86 {
|
|||||||
/* Detection routine */
|
/* Detection routine */
|
||||||
uint32_t (*detect)(void);
|
uint32_t (*detect)(void);
|
||||||
|
|
||||||
/* Platform setup (run once per boot) */
|
/* Hypervisor type */
|
||||||
void (*init_platform)(void);
|
enum x86_hypervisor_type type;
|
||||||
|
|
||||||
/* X2APIC detection (run once per boot) */
|
/* init time callbacks */
|
||||||
bool (*x2apic_available)(void);
|
struct x86_hyper_init init;
|
||||||
|
|
||||||
/* pin current vcpu to specified physical cpu (run rarely) */
|
/* runtime callbacks */
|
||||||
void (*pin_vcpu)(int);
|
struct x86_hyper_runtime runtime;
|
||||||
|
|
||||||
/* called during init_mem_mapping() to setup early mappings. */
|
|
||||||
void (*init_mem_mapping)(void);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const struct hypervisor_x86 *x86_hyper;
|
extern enum x86_hypervisor_type x86_hyper_type;
|
||||||
|
|
||||||
/* Recognized hypervisors */
|
|
||||||
extern const struct hypervisor_x86 x86_hyper_vmware;
|
|
||||||
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
|
|
||||||
extern const struct hypervisor_x86 x86_hyper_xen_pv;
|
|
||||||
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
|
|
||||||
extern const struct hypervisor_x86 x86_hyper_kvm;
|
|
||||||
|
|
||||||
extern void init_hypervisor_platform(void);
|
extern void init_hypervisor_platform(void);
|
||||||
extern bool hypervisor_x2apic_available(void);
|
|
||||||
extern void hypervisor_pin_vcpu(int cpu);
|
|
||||||
|
|
||||||
static inline void hypervisor_init_mem_mapping(void)
|
|
||||||
{
|
|
||||||
if (x86_hyper && x86_hyper->init_mem_mapping)
|
|
||||||
x86_hyper->init_mem_mapping();
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
static inline void init_hypervisor_platform(void) { }
|
static inline void init_hypervisor_platform(void) { }
|
||||||
static inline bool hypervisor_x2apic_available(void) { return false; }
|
|
||||||
static inline void hypervisor_init_mem_mapping(void) { }
|
|
||||||
#endif /* CONFIG_HYPERVISOR_GUEST */
|
#endif /* CONFIG_HYPERVISOR_GUEST */
|
||||||
#endif /* _ASM_X86_HYPERVISOR_H */
|
#endif /* _ASM_X86_HYPERVISOR_H */
|
||||||
|
@ -114,6 +114,18 @@ struct x86_init_pci {
|
|||||||
void (*fixup_irqs)(void);
|
void (*fixup_irqs)(void);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct x86_hyper_init - x86 hypervisor init functions
|
||||||
|
* @init_platform: platform setup
|
||||||
|
* @x2apic_available: X2APIC detection
|
||||||
|
* @init_mem_mapping: setup early mappings during init_mem_mapping()
|
||||||
|
*/
|
||||||
|
struct x86_hyper_init {
|
||||||
|
void (*init_platform)(void);
|
||||||
|
bool (*x2apic_available)(void);
|
||||||
|
void (*init_mem_mapping)(void);
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct x86_init_ops - functions for platform specific setup
|
* struct x86_init_ops - functions for platform specific setup
|
||||||
*
|
*
|
||||||
@ -127,6 +139,7 @@ struct x86_init_ops {
|
|||||||
struct x86_init_timers timers;
|
struct x86_init_timers timers;
|
||||||
struct x86_init_iommu iommu;
|
struct x86_init_iommu iommu;
|
||||||
struct x86_init_pci pci;
|
struct x86_init_pci pci;
|
||||||
|
struct x86_hyper_init hyper;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -199,6 +212,15 @@ struct x86_legacy_features {
|
|||||||
struct x86_legacy_devices devices;
|
struct x86_legacy_devices devices;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks
|
||||||
|
*
|
||||||
|
* @pin_vcpu: pin current vcpu to specified physical cpu (run rarely)
|
||||||
|
*/
|
||||||
|
struct x86_hyper_runtime {
|
||||||
|
void (*pin_vcpu)(int cpu);
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct x86_platform_ops - platform specific runtime functions
|
* struct x86_platform_ops - platform specific runtime functions
|
||||||
* @calibrate_cpu: calibrate CPU
|
* @calibrate_cpu: calibrate CPU
|
||||||
@ -218,6 +240,7 @@ struct x86_legacy_features {
|
|||||||
* possible in x86_early_init_platform_quirks() by
|
* possible in x86_early_init_platform_quirks() by
|
||||||
* only using the current x86_hardware_subarch
|
* only using the current x86_hardware_subarch
|
||||||
* semantics.
|
* semantics.
|
||||||
|
* @hyper: x86 hypervisor specific runtime callbacks
|
||||||
*/
|
*/
|
||||||
struct x86_platform_ops {
|
struct x86_platform_ops {
|
||||||
unsigned long (*calibrate_cpu)(void);
|
unsigned long (*calibrate_cpu)(void);
|
||||||
@ -233,6 +256,7 @@ struct x86_platform_ops {
|
|||||||
void (*apic_post_init)(void);
|
void (*apic_post_init)(void);
|
||||||
struct x86_legacy_features legacy;
|
struct x86_legacy_features legacy;
|
||||||
void (*set_legacy_features)(void);
|
void (*set_legacy_features)(void);
|
||||||
|
struct x86_hyper_runtime hyper;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pci_dev;
|
struct pci_dev;
|
||||||
|
@ -1645,7 +1645,7 @@ static __init void try_to_enable_x2apic(int remap_mode)
|
|||||||
* under KVM
|
* under KVM
|
||||||
*/
|
*/
|
||||||
if (max_physical_apicid > 255 ||
|
if (max_physical_apicid > 255 ||
|
||||||
!hypervisor_x2apic_available()) {
|
!x86_init.hyper.x2apic_available()) {
|
||||||
pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
|
pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
|
||||||
x2apic_disable();
|
x2apic_disable();
|
||||||
return;
|
return;
|
||||||
|
@ -920,9 +920,8 @@ static __init void uv_rtc_init(void)
|
|||||||
/*
|
/*
|
||||||
* percpu heartbeat timer
|
* percpu heartbeat timer
|
||||||
*/
|
*/
|
||||||
static void uv_heartbeat(unsigned long ignored)
|
static void uv_heartbeat(struct timer_list *timer)
|
||||||
{
|
{
|
||||||
struct timer_list *timer = &uv_scir_info->timer;
|
|
||||||
unsigned char bits = uv_scir_info->state;
|
unsigned char bits = uv_scir_info->state;
|
||||||
|
|
||||||
/* Flip heartbeat bit: */
|
/* Flip heartbeat bit: */
|
||||||
@ -947,7 +946,7 @@ static int uv_heartbeat_enable(unsigned int cpu)
|
|||||||
struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
|
struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
|
||||||
|
|
||||||
uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
|
uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
|
||||||
setup_pinned_timer(timer, uv_heartbeat, cpu);
|
timer_setup(timer, uv_heartbeat, TIMER_PINNED);
|
||||||
timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
|
timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
|
||||||
add_timer_on(timer, cpu);
|
add_timer_on(timer, cpu);
|
||||||
uv_cpu_scir_info(cpu)->enabled = 1;
|
uv_cpu_scir_info(cpu)->enabled = 1;
|
||||||
|
@ -26,6 +26,12 @@
|
|||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
|
|
||||||
|
extern const struct hypervisor_x86 x86_hyper_vmware;
|
||||||
|
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
|
||||||
|
extern const struct hypervisor_x86 x86_hyper_xen_pv;
|
||||||
|
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
|
||||||
|
extern const struct hypervisor_x86 x86_hyper_kvm;
|
||||||
|
|
||||||
static const __initconst struct hypervisor_x86 * const hypervisors[] =
|
static const __initconst struct hypervisor_x86 * const hypervisors[] =
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_XEN_PV
|
#ifdef CONFIG_XEN_PV
|
||||||
@ -41,54 +47,52 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] =
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct hypervisor_x86 *x86_hyper;
|
enum x86_hypervisor_type x86_hyper_type;
|
||||||
EXPORT_SYMBOL(x86_hyper);
|
EXPORT_SYMBOL(x86_hyper_type);
|
||||||
|
|
||||||
static inline void __init
|
static inline const struct hypervisor_x86 * __init
|
||||||
detect_hypervisor_vendor(void)
|
detect_hypervisor_vendor(void)
|
||||||
{
|
{
|
||||||
const struct hypervisor_x86 *h, * const *p;
|
const struct hypervisor_x86 *h = NULL, * const *p;
|
||||||
uint32_t pri, max_pri = 0;
|
uint32_t pri, max_pri = 0;
|
||||||
|
|
||||||
for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
|
for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
|
||||||
h = *p;
|
pri = (*p)->detect();
|
||||||
pri = h->detect();
|
if (pri > max_pri) {
|
||||||
if (pri != 0 && pri > max_pri) {
|
|
||||||
max_pri = pri;
|
max_pri = pri;
|
||||||
x86_hyper = h;
|
h = *p;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (max_pri)
|
if (h)
|
||||||
pr_info("Hypervisor detected: %s\n", x86_hyper->name);
|
pr_info("Hypervisor detected: %s\n", h->name);
|
||||||
|
|
||||||
|
return h;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init copy_array(const void *src, void *target, unsigned int size)
|
||||||
|
{
|
||||||
|
unsigned int i, n = size / sizeof(void *);
|
||||||
|
const void * const *from = (const void * const *)src;
|
||||||
|
const void **to = (const void **)target;
|
||||||
|
|
||||||
|
for (i = 0; i < n; i++)
|
||||||
|
if (from[i])
|
||||||
|
to[i] = from[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init init_hypervisor_platform(void)
|
void __init init_hypervisor_platform(void)
|
||||||
{
|
{
|
||||||
|
const struct hypervisor_x86 *h;
|
||||||
|
|
||||||
detect_hypervisor_vendor();
|
h = detect_hypervisor_vendor();
|
||||||
|
|
||||||
if (!x86_hyper)
|
if (!h)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (x86_hyper->init_platform)
|
copy_array(&h->init, &x86_init.hyper, sizeof(h->init));
|
||||||
x86_hyper->init_platform();
|
copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime));
|
||||||
}
|
|
||||||
|
|
||||||
bool __init hypervisor_x2apic_available(void)
|
x86_hyper_type = h->type;
|
||||||
{
|
x86_init.hyper.init_platform();
|
||||||
return x86_hyper &&
|
|
||||||
x86_hyper->x2apic_available &&
|
|
||||||
x86_hyper->x2apic_available();
|
|
||||||
}
|
|
||||||
|
|
||||||
void hypervisor_pin_vcpu(int cpu)
|
|
||||||
{
|
|
||||||
if (!x86_hyper)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (x86_hyper->pin_vcpu)
|
|
||||||
x86_hyper->pin_vcpu(cpu);
|
|
||||||
else
|
|
||||||
WARN_ONCE(1, "vcpu pinning requested but not supported!\n");
|
|
||||||
}
|
}
|
||||||
|
@ -254,9 +254,9 @@ static void __init ms_hyperv_init_platform(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||||
.name = "Microsoft Hyper-V",
|
.name = "Microsoft Hyper-V",
|
||||||
.detect = ms_hyperv_platform,
|
.detect = ms_hyperv_platform,
|
||||||
.init_platform = ms_hyperv_init_platform,
|
.type = X86_HYPER_MS_HYPERV,
|
||||||
|
.init.init_platform = ms_hyperv_init_platform,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(x86_hyper_ms_hyperv);
|
|
||||||
|
@ -205,10 +205,10 @@ static bool __init vmware_legacy_x2apic_available(void)
|
|||||||
(eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
|
(eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const __refconst struct hypervisor_x86 x86_hyper_vmware = {
|
const __initconst struct hypervisor_x86 x86_hyper_vmware = {
|
||||||
.name = "VMware",
|
.name = "VMware",
|
||||||
.detect = vmware_platform,
|
.detect = vmware_platform,
|
||||||
.init_platform = vmware_platform_setup,
|
.type = X86_HYPER_VMWARE,
|
||||||
.x2apic_available = vmware_legacy_x2apic_available,
|
.init.init_platform = vmware_platform_setup,
|
||||||
|
.init.x2apic_available = vmware_legacy_x2apic_available,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(x86_hyper_vmware);
|
|
||||||
|
@ -544,12 +544,12 @@ static uint32_t __init kvm_detect(void)
|
|||||||
return kvm_cpuid_base();
|
return kvm_cpuid_base();
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct hypervisor_x86 x86_hyper_kvm __refconst = {
|
const __initconst struct hypervisor_x86 x86_hyper_kvm = {
|
||||||
.name = "KVM",
|
.name = "KVM",
|
||||||
.detect = kvm_detect,
|
.detect = kvm_detect,
|
||||||
.x2apic_available = kvm_para_available,
|
.type = X86_HYPER_KVM,
|
||||||
|
.init.x2apic_available = kvm_para_available,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL_GPL(x86_hyper_kvm);
|
|
||||||
|
|
||||||
static __init int activate_jump_labels(void)
|
static __init int activate_jump_labels(void)
|
||||||
{
|
{
|
||||||
|
@ -28,6 +28,8 @@ void x86_init_noop(void) { }
|
|||||||
void __init x86_init_uint_noop(unsigned int unused) { }
|
void __init x86_init_uint_noop(unsigned int unused) { }
|
||||||
int __init iommu_init_noop(void) { return 0; }
|
int __init iommu_init_noop(void) { return 0; }
|
||||||
void iommu_shutdown_noop(void) { }
|
void iommu_shutdown_noop(void) { }
|
||||||
|
bool __init bool_x86_init_noop(void) { return false; }
|
||||||
|
void x86_op_int_noop(int cpu) { }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The platform setup functions are preset with the default functions
|
* The platform setup functions are preset with the default functions
|
||||||
@ -81,6 +83,12 @@ struct x86_init_ops x86_init __initdata = {
|
|||||||
.init_irq = x86_default_pci_init_irq,
|
.init_irq = x86_default_pci_init_irq,
|
||||||
.fixup_irqs = x86_default_pci_fixup_irqs,
|
.fixup_irqs = x86_default_pci_fixup_irqs,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
.hyper = {
|
||||||
|
.init_platform = x86_init_noop,
|
||||||
|
.x2apic_available = bool_x86_init_noop,
|
||||||
|
.init_mem_mapping = x86_init_noop,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
struct x86_cpuinit_ops x86_cpuinit = {
|
struct x86_cpuinit_ops x86_cpuinit = {
|
||||||
@ -101,6 +109,7 @@ struct x86_platform_ops x86_platform __ro_after_init = {
|
|||||||
.get_nmi_reason = default_get_nmi_reason,
|
.get_nmi_reason = default_get_nmi_reason,
|
||||||
.save_sched_clock_state = tsc_save_sched_clock_state,
|
.save_sched_clock_state = tsc_save_sched_clock_state,
|
||||||
.restore_sched_clock_state = tsc_restore_sched_clock_state,
|
.restore_sched_clock_state = tsc_restore_sched_clock_state,
|
||||||
|
.hyper.pin_vcpu = x86_op_int_noop,
|
||||||
};
|
};
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(x86_platform);
|
EXPORT_SYMBOL_GPL(x86_platform);
|
||||||
|
@ -671,7 +671,7 @@ void __init init_mem_mapping(void)
|
|||||||
load_cr3(swapper_pg_dir);
|
load_cr3(swapper_pg_dir);
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
|
|
||||||
hypervisor_init_mem_mapping();
|
x86_init.hyper.init_mem_mapping();
|
||||||
|
|
||||||
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
|
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
@ -226,12 +226,12 @@ static uint32_t __init xen_platform_hvm(void)
|
|||||||
return xen_cpuid_base();
|
return xen_cpuid_base();
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct hypervisor_x86 x86_hyper_xen_hvm = {
|
const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = {
|
||||||
.name = "Xen HVM",
|
.name = "Xen HVM",
|
||||||
.detect = xen_platform_hvm,
|
.detect = xen_platform_hvm,
|
||||||
.init_platform = xen_hvm_guest_init,
|
.type = X86_HYPER_XEN_HVM,
|
||||||
.pin_vcpu = xen_pin_vcpu,
|
.init.init_platform = xen_hvm_guest_init,
|
||||||
.x2apic_available = xen_x2apic_para_available,
|
.init.x2apic_available = xen_x2apic_para_available,
|
||||||
.init_mem_mapping = xen_hvm_init_mem_mapping,
|
.init.init_mem_mapping = xen_hvm_init_mem_mapping,
|
||||||
|
.runtime.pin_vcpu = xen_pin_vcpu,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(x86_hyper_xen_hvm);
|
|
||||||
|
@ -1459,9 +1459,9 @@ static uint32_t __init xen_platform_pv(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct hypervisor_x86 x86_hyper_xen_pv = {
|
const __initconst struct hypervisor_x86 x86_hyper_xen_pv = {
|
||||||
.name = "Xen PV",
|
.name = "Xen PV",
|
||||||
.detect = xen_platform_pv,
|
.detect = xen_platform_pv,
|
||||||
.pin_vcpu = xen_pin_vcpu,
|
.type = X86_HYPER_XEN_PV,
|
||||||
|
.runtime.pin_vcpu = xen_pin_vcpu,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(x86_hyper_xen_pv);
|
|
||||||
|
@ -1534,7 +1534,7 @@ static int __init hv_acpi_init(void)
|
|||||||
{
|
{
|
||||||
int ret, t;
|
int ret, t;
|
||||||
|
|
||||||
if (x86_hyper != &x86_hyper_ms_hyperv)
|
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
init_completion(&probe_event);
|
init_completion(&probe_event);
|
||||||
|
@ -316,11 +316,9 @@ static int vmmouse_enable(struct psmouse *psmouse)
|
|||||||
/*
|
/*
|
||||||
* Array of supported hypervisors.
|
* Array of supported hypervisors.
|
||||||
*/
|
*/
|
||||||
static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = {
|
static enum x86_hypervisor_type vmmouse_supported_hypervisors[] = {
|
||||||
&x86_hyper_vmware,
|
X86_HYPER_VMWARE,
|
||||||
#ifdef CONFIG_KVM_GUEST
|
X86_HYPER_KVM,
|
||||||
&x86_hyper_kvm,
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -331,7 +329,7 @@ static bool vmmouse_check_hypervisor(void)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++)
|
for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++)
|
||||||
if (vmmouse_supported_hypervisors[i] == x86_hyper)
|
if (vmmouse_supported_hypervisors[i] == x86_hyper_type)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -1271,7 +1271,7 @@ static int __init vmballoon_init(void)
|
|||||||
* Check if we are running on VMware's hypervisor and bail out
|
* Check if we are running on VMware's hypervisor and bail out
|
||||||
* if we are not.
|
* if we are not.
|
||||||
*/
|
*/
|
||||||
if (x86_hyper != &x86_hyper_vmware)
|
if (x86_hyper_type != X86_HYPER_VMWARE)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
|
for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
|
||||||
|
@ -7,8 +7,12 @@
|
|||||||
* Juergen Gross <jgross@suse.com>
|
* Juergen Gross <jgross@suse.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_HYPERVISOR_GUEST
|
#ifdef CONFIG_X86
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/x86_init.h>
|
||||||
|
static inline void hypervisor_pin_vcpu(int cpu)
|
||||||
|
{
|
||||||
|
x86_platform.hyper.pin_vcpu(cpu);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
static inline void hypervisor_pin_vcpu(int cpu)
|
static inline void hypervisor_pin_vcpu(int cpu)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user