mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
74a497fae7
VMCS12 fields that are not handled through shadow VMCS are rarely written, and thus they are also almost constant in the vmcs02. We can thus optimize prepare_vmcs02 by skipping all the work for non-shadowed fields in the common case. This patch introduces the (pretty simple) tracking infrastructure; the next patches will move work to prepare_vmcs02_full and save a few hundred clock cycles per VMRESUME on a Haswell Xeon E5 system: before after cpuid 14159 13869 vmcall 15290 14951 inl_from_kernel 17703 17447 outl_to_kernel 16011 14692 self_ipi_sti_nop 16763 15825 self_ipi_tpr_sti_nop 17341 15935 wr_tsc_adjust_msr 14510 14264 rd_tsc_adjust_msr 15018 14311 mmio-wildcard-eventfd:pci-mem 16381 14947 mmio-datamatch-eventfd:pci-mem 18620 17858 portio-wildcard-eventfd:pci-io 15121 14769 portio-datamatch-eventfd:pci-io 15761 14831 (average savings 748, stdev 460). Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
78 lines
2.3 KiB
C
78 lines
2.3 KiB
C
#ifndef SHADOW_FIELD_RO
|
|
#define SHADOW_FIELD_RO(x)
|
|
#endif
|
|
#ifndef SHADOW_FIELD_RW
|
|
#define SHADOW_FIELD_RW(x)
|
|
#endif
|
|
|
|
/*
|
|
* We do NOT shadow fields that are modified when L0
|
|
* traps and emulates any vmx instruction (e.g. VMPTRLD,
|
|
* VMXON...) executed by L1.
|
|
* For example, VM_INSTRUCTION_ERROR is read
|
|
* by L1 if a vmx instruction fails (part of the error path).
|
|
* Note the code assumes this logic. If for some reason
|
|
* we start shadowing these fields then we need to
|
|
* force a shadow sync when L0 emulates vmx instructions
|
|
* (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
|
|
* by nested_vmx_failValid)
|
|
*
|
|
* When adding or removing fields here, note that shadowed
|
|
* fields must always be synced by prepare_vmcs02, not just
|
|
* prepare_vmcs02_full.
|
|
*/
|
|
|
|
/*
|
|
* Keeping the fields ordered by size is an attempt at improving
|
|
* branch prediction in vmcs_read_any and vmcs_write_any.
|
|
*/
|
|
|
|
/* 16-bits */
|
|
SHADOW_FIELD_RW(GUEST_CS_SELECTOR)
|
|
SHADOW_FIELD_RW(GUEST_INTR_STATUS)
|
|
SHADOW_FIELD_RW(GUEST_PML_INDEX)
|
|
SHADOW_FIELD_RW(HOST_FS_SELECTOR)
|
|
SHADOW_FIELD_RW(HOST_GS_SELECTOR)
|
|
|
|
/* 32-bits */
|
|
SHADOW_FIELD_RO(VM_EXIT_REASON)
|
|
SHADOW_FIELD_RO(VM_EXIT_INTR_INFO)
|
|
SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN)
|
|
SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD)
|
|
SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE)
|
|
SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE)
|
|
SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL)
|
|
SHADOW_FIELD_RW(EXCEPTION_BITMAP)
|
|
SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE)
|
|
SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD)
|
|
SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN)
|
|
SHADOW_FIELD_RW(TPR_THRESHOLD)
|
|
SHADOW_FIELD_RW(GUEST_CS_LIMIT)
|
|
SHADOW_FIELD_RW(GUEST_CS_AR_BYTES)
|
|
SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO)
|
|
SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE)
|
|
|
|
/* Natural width */
|
|
SHADOW_FIELD_RO(EXIT_QUALIFICATION)
|
|
SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS)
|
|
SHADOW_FIELD_RW(GUEST_RIP)
|
|
SHADOW_FIELD_RW(GUEST_RSP)
|
|
SHADOW_FIELD_RW(GUEST_CR0)
|
|
SHADOW_FIELD_RW(GUEST_CR3)
|
|
SHADOW_FIELD_RW(GUEST_CR4)
|
|
SHADOW_FIELD_RW(GUEST_RFLAGS)
|
|
SHADOW_FIELD_RW(GUEST_CS_BASE)
|
|
SHADOW_FIELD_RW(GUEST_ES_BASE)
|
|
SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK)
|
|
SHADOW_FIELD_RW(CR0_READ_SHADOW)
|
|
SHADOW_FIELD_RW(CR4_READ_SHADOW)
|
|
SHADOW_FIELD_RW(HOST_FS_BASE)
|
|
SHADOW_FIELD_RW(HOST_GS_BASE)
|
|
|
|
/* 64-bit */
|
|
SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS)
|
|
SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH)
|
|
|
|
#undef SHADOW_FIELD_RO
|
|
#undef SHADOW_FIELD_RW
|