2018-03-20 14:02:11 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __KVM_X86_VMX_EVMCS_H
|
|
|
|
#define __KVM_X86_VMX_EVMCS_H
|
|
|
|
|
2018-12-03 21:53:06 +00:00
|
|
|
#include <linux/jump_label.h>
|
2018-03-20 14:02:11 +00:00
|
|
|
|
2018-12-03 21:53:06 +00:00
|
|
|
#include <asm/hyperv-tlfs.h>
|
|
|
|
#include <asm/mshyperv.h>
|
|
|
|
#include <asm/vmx.h>
|
|
|
|
|
|
|
|
#include "capabilities.h"
|
|
|
|
#include "vmcs.h"
|
2020-02-05 12:30:34 +00:00
|
|
|
#include "vmcs12.h"
|
2018-12-03 21:53:06 +00:00
|
|
|
|
|
|
|
struct vmcs_config;
|
|
|
|
|
|
|
|
DECLARE_STATIC_KEY_FALSE(enable_evmcs);
|
|
|
|
|
|
|
|
#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
|
|
|
|
|
|
|
|
#define KVM_EVMCS_VERSION 1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enlightened VMCSv1 doesn't support these:
|
|
|
|
*
|
|
|
|
* POSTED_INTR_NV = 0x00000002,
|
|
|
|
* GUEST_INTR_STATUS = 0x00000810,
|
|
|
|
* APIC_ACCESS_ADDR = 0x00002014,
|
|
|
|
* POSTED_INTR_DESC_ADDR = 0x00002016,
|
|
|
|
* EOI_EXIT_BITMAP0 = 0x0000201c,
|
|
|
|
* EOI_EXIT_BITMAP1 = 0x0000201e,
|
|
|
|
* EOI_EXIT_BITMAP2 = 0x00002020,
|
|
|
|
* EOI_EXIT_BITMAP3 = 0x00002022,
|
|
|
|
* GUEST_PML_INDEX = 0x00000812,
|
|
|
|
* PML_ADDRESS = 0x0000200e,
|
|
|
|
* VM_FUNCTION_CONTROL = 0x00002018,
|
|
|
|
* EPTP_LIST_ADDRESS = 0x00002024,
|
|
|
|
* VMREAD_BITMAP = 0x00002026,
|
|
|
|
* VMWRITE_BITMAP = 0x00002028,
|
|
|
|
*
|
|
|
|
* TSC_MULTIPLIER = 0x00002032,
|
|
|
|
* PLE_GAP = 0x00004020,
|
|
|
|
* PLE_WINDOW = 0x00004022,
|
|
|
|
* VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
|
|
|
|
* GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
|
|
|
|
* HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
|
|
|
|
*
|
|
|
|
* Currently unsupported in KVM:
|
|
|
|
* GUEST_IA32_RTIT_CTL = 0x00002814,
|
|
|
|
*/
|
|
|
|
#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
|
|
|
|
PIN_BASED_VMX_PREEMPTION_TIMER)
|
|
|
|
#define EVMCS1_UNSUPPORTED_2NDEXEC \
|
|
|
|
(SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
|
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
|
|
|
|
SECONDARY_EXEC_APIC_REGISTER_VIRT | \
|
|
|
|
SECONDARY_EXEC_ENABLE_PML | \
|
|
|
|
SECONDARY_EXEC_ENABLE_VMFUNC | \
|
|
|
|
SECONDARY_EXEC_SHADOW_VMCS | \
|
|
|
|
SECONDARY_EXEC_TSC_SCALING | \
|
|
|
|
SECONDARY_EXEC_PAUSE_LOOP_EXITING)
|
|
|
|
#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
|
|
|
|
#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
|
|
|
|
#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_HYPERV)
|
2018-03-20 14:02:11 +00:00
|
|
|
|
|
|
|
struct evmcs_field {
|
|
|
|
u16 offset;
|
|
|
|
u16 clean_field;
|
|
|
|
};
|
|
|
|
|
2018-12-03 21:53:06 +00:00
|
|
|
extern const struct evmcs_field vmcs_field_to_evmcs_1[];
|
|
|
|
extern const unsigned int nr_evmcs_1_fields;
|
|
|
|
|
|
|
|
#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
|
2018-03-20 14:02:11 +00:00
|
|
|
|
|
|
|
static __always_inline int get_evmcs_offset(unsigned long field,
|
|
|
|
u16 *clean_field)
|
|
|
|
{
|
|
|
|
unsigned int index = ROL16(field, 6);
|
|
|
|
const struct evmcs_field *evmcs_field;
|
|
|
|
|
2018-12-03 21:53:06 +00:00
|
|
|
if (unlikely(index >= nr_evmcs_1_fields)) {
|
2018-03-20 14:02:11 +00:00
|
|
|
WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
|
|
|
|
field);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
evmcs_field = &vmcs_field_to_evmcs_1[index];
|
|
|
|
|
|
|
|
if (clean_field)
|
|
|
|
*clean_field = evmcs_field->clean_field;
|
|
|
|
|
|
|
|
return evmcs_field->offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef ROL16
|
|
|
|
|
2018-12-03 21:53:06 +00:00
|
|
|
static inline void evmcs_write64(unsigned long field, u64 value)
|
|
|
|
{
|
|
|
|
u16 clean_field;
|
|
|
|
int offset = get_evmcs_offset(field, &clean_field);
|
|
|
|
|
|
|
|
if (offset < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
*(u64 *)((char *)current_evmcs + offset) = value;
|
|
|
|
|
|
|
|
current_evmcs->hv_clean_fields &= ~clean_field;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void evmcs_write32(unsigned long field, u32 value)
|
|
|
|
{
|
|
|
|
u16 clean_field;
|
|
|
|
int offset = get_evmcs_offset(field, &clean_field);
|
|
|
|
|
|
|
|
if (offset < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
*(u32 *)((char *)current_evmcs + offset) = value;
|
|
|
|
current_evmcs->hv_clean_fields &= ~clean_field;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void evmcs_write16(unsigned long field, u16 value)
|
|
|
|
{
|
|
|
|
u16 clean_field;
|
|
|
|
int offset = get_evmcs_offset(field, &clean_field);
|
|
|
|
|
|
|
|
if (offset < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
*(u16 *)((char *)current_evmcs + offset) = value;
|
|
|
|
current_evmcs->hv_clean_fields &= ~clean_field;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 evmcs_read64(unsigned long field)
|
|
|
|
{
|
|
|
|
int offset = get_evmcs_offset(field, NULL);
|
|
|
|
|
|
|
|
if (offset < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return *(u64 *)((char *)current_evmcs + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 evmcs_read32(unsigned long field)
|
|
|
|
{
|
|
|
|
int offset = get_evmcs_offset(field, NULL);
|
|
|
|
|
|
|
|
if (offset < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return *(u32 *)((char *)current_evmcs + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u16 evmcs_read16(unsigned long field)
|
|
|
|
{
|
|
|
|
int offset = get_evmcs_offset(field, NULL);
|
|
|
|
|
|
|
|
if (offset < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return *(u16 *)((char *)current_evmcs + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void evmcs_touch_msr_bitmap(void)
|
|
|
|
{
|
|
|
|
if (unlikely(!current_evmcs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (current_evmcs->hv_enlightenments_control.msr_bitmap)
|
|
|
|
current_evmcs->hv_clean_fields &=
|
|
|
|
~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void evmcs_load(u64 phys_addr)
|
|
|
|
{
|
|
|
|
struct hv_vp_assist_page *vp_ap =
|
|
|
|
hv_get_vp_assist_page(smp_processor_id());
|
|
|
|
|
2019-08-22 14:30:21 +00:00
|
|
|
if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
|
|
|
|
vp_ap->nested_control.features.directhypercall = 1;
|
2018-12-03 21:53:06 +00:00
|
|
|
vp_ap->current_nested_vmcs = phys_addr;
|
|
|
|
vp_ap->enlighten_vmentry = 1;
|
|
|
|
}
|
|
|
|
|
2020-10-14 14:33:46 +00:00
|
|
|
__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
|
2018-12-03 21:53:06 +00:00
|
|
|
#else /* !IS_ENABLED(CONFIG_HYPERV) */
|
|
|
|
static inline void evmcs_write64(unsigned long field, u64 value) {}
|
|
|
|
static inline void evmcs_write32(unsigned long field, u32 value) {}
|
|
|
|
static inline void evmcs_write16(unsigned long field, u16 value) {}
|
|
|
|
static inline u64 evmcs_read64(unsigned long field) { return 0; }
|
|
|
|
static inline u32 evmcs_read32(unsigned long field) { return 0; }
|
|
|
|
static inline u16 evmcs_read16(unsigned long field) { return 0; }
|
|
|
|
static inline void evmcs_load(u64 phys_addr) {}
|
|
|
|
static inline void evmcs_touch_msr_bitmap(void) {}
|
|
|
|
#endif /* IS_ENABLED(CONFIG_HYPERV) */
|
|
|
|
|
2020-03-09 15:52:13 +00:00
|
|
|
enum nested_evmptrld_status {
|
|
|
|
EVMPTRLD_DISABLED,
|
|
|
|
EVMPTRLD_SUCCEEDED,
|
|
|
|
EVMPTRLD_VMFAIL,
|
|
|
|
EVMPTRLD_ERROR,
|
|
|
|
};
|
|
|
|
|
x86/kvm/nVMX: fix VMCLEAR when Enlightened VMCS is in use
When Enlightened VMCS is in use, it is valid to do VMCLEAR and,
according to TLFS, this should "transition an enlightened VMCS from the
active to the non-active state". It is, however, wrong to assume that
it is only valid to do VMCLEAR for the eVMCS which is currently active
on the vCPU performing VMCLEAR.
Currently, the logic in handle_vmclear() is broken: in case, there is no
active eVMCS on the vCPU doing VMCLEAR we treat the argument as a 'normal'
VMCS and kvm_vcpu_write_guest() to the 'launch_state' field irreversibly
corrupts the memory area.
So, in case the VMCLEAR argument is not the current active eVMCS on the
vCPU, how can we know if the area it is pointing to is a normal or an
enlightened VMCS?
Thanks to the bug in Hyper-V (see commit 72aeb60c52bf7 ("KVM: nVMX: Verify
eVMCS revision id match supported eVMCS version on eVMCS VMPTRLD")) we can
not, the revision can't be used to distinguish between them. So let's
assume it is always enlightened in case enlightened vmentry is enabled in
the assist page. Also, check if vmx->nested.enlightened_vmcs_enabled to
minimize the impact for 'unenlightened' workloads.
Fixes: b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-06-28 11:23:33 +00:00
|
|
|
bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
|
2018-12-10 17:21:55 +00:00
|
|
|
uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
|
2018-12-03 21:53:06 +00:00
|
|
|
int nested_enable_evmcs(struct kvm_vcpu *vcpu,
|
|
|
|
uint16_t *vmcs_version);
|
2020-02-05 12:30:33 +00:00
|
|
|
void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
|
2020-02-05 12:30:34 +00:00
|
|
|
int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
|
2018-12-03 21:53:06 +00:00
|
|
|
|
2018-03-20 14:02:11 +00:00
|
|
|
#endif /* __KVM_X86_VMX_EVMCS_H */
|