mirror of
https://github.com/torvalds/linux.git
synced 2024-12-06 11:01:43 +00:00
30b5c851af
This is how Xen guests do steal time accounting. The hypervisor records the amount of time spent in each of running/runnable/blocked/offline states. In the Xen accounting, a vCPU is still in state RUNSTATE_running while in Xen for a hypercall or I/O trap, etc. Only if Xen explicitly schedules does the state become RUNSTATE_blocked. In KVM this means that even when the vCPU exits the kvm_run loop, the state remains RUNSTATE_running. The VMM can explicitly set the vCPU to RUNSTATE_blocked by using the KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT attribute, and can also use KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST to retrospectively add a given amount of time to the blocked state and subtract it from the running state. The state_entry_time corresponds to get_kvmclock_ns() at the time the vCPU entered the current state, and the total times of all four states should always add up to state_entry_time. Co-developed-by: Joao Martins <joao.m.martins@oracle.com> Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Message-Id: <20210301125309.874953-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
139 lines
3.6 KiB
C
139 lines
3.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
|
|
* Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
*
|
|
* KVM Xen emulation
|
|
*/
|
|
|
|
#ifndef __ARCH_X86_KVM_XEN_H__
|
|
#define __ARCH_X86_KVM_XEN_H__
|
|
|
|
#ifdef CONFIG_KVM_XEN
|
|
#include <linux/jump_label_ratelimit.h>
|
|
|
|
extern struct static_key_false_deferred kvm_xen_enabled;
|
|
|
|
int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
|
|
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
|
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
|
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
|
|
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
|
|
int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
|
|
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
|
|
void kvm_xen_destroy_vm(struct kvm *kvm);
|
|
|
|
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
|
{
|
|
return static_branch_unlikely(&kvm_xen_enabled.key) &&
|
|
kvm->arch.xen_hvm_config.msr;
|
|
}
|
|
|
|
static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
|
|
{
|
|
return static_branch_unlikely(&kvm_xen_enabled.key) &&
|
|
(kvm->arch.xen_hvm_config.flags &
|
|
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
|
|
}
|
|
|
|
static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (static_branch_unlikely(&kvm_xen_enabled.key) &&
|
|
vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
|
|
return __kvm_xen_has_interrupt(vcpu);
|
|
|
|
return 0;
|
|
}
|
|
#else
|
|
static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline void kvm_xen_destroy_vm(struct kvm *kvm)
|
|
{
|
|
}
|
|
|
|
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
|
|
|
|
#include <asm/pvclock-abi.h>
|
|
#include <asm/xen/interface.h>
|
|
#include <xen/interface/vcpu.h>
|
|
|
|
void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
|
|
|
|
static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
|
|
{
|
|
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
|
|
}
|
|
|
|
static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
|
|
{
|
|
/*
|
|
* If the vCPU wasn't preempted but took a normal exit for
|
|
* some reason (hypercalls, I/O, etc.), that is accounted as
|
|
* still RUNSTATE_running, as the VMM is still operating on
|
|
* behalf of the vCPU. Only if the VMM does actually block
|
|
* does it need to enter RUNSTATE_blocked.
|
|
*/
|
|
if (vcpu->preempted)
|
|
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
|
|
}
|
|
|
|
/* 32-bit compatibility definitions, also used natively in 32-bit build */
|
|
struct compat_arch_vcpu_info {
|
|
unsigned int cr2;
|
|
unsigned int pad[5];
|
|
};
|
|
|
|
struct compat_vcpu_info {
|
|
uint8_t evtchn_upcall_pending;
|
|
uint8_t evtchn_upcall_mask;
|
|
uint16_t pad;
|
|
uint32_t evtchn_pending_sel;
|
|
struct compat_arch_vcpu_info arch;
|
|
struct pvclock_vcpu_time_info time;
|
|
}; /* 64 bytes (x86) */
|
|
|
|
struct compat_arch_shared_info {
|
|
unsigned int max_pfn;
|
|
unsigned int pfn_to_mfn_frame_list_list;
|
|
unsigned int nmi_reason;
|
|
unsigned int p2m_cr3;
|
|
unsigned int p2m_vaddr;
|
|
unsigned int p2m_generation;
|
|
uint32_t wc_sec_hi;
|
|
};
|
|
|
|
struct compat_shared_info {
|
|
struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
|
|
uint32_t evtchn_pending[32];
|
|
uint32_t evtchn_mask[32];
|
|
struct pvclock_wall_clock wc;
|
|
struct compat_arch_shared_info arch;
|
|
};
|
|
|
|
struct compat_vcpu_runstate_info {
|
|
int state;
|
|
uint64_t state_entry_time;
|
|
uint64_t time[4];
|
|
} __attribute__((packed));
|
|
|
|
#endif /* __ARCH_X86_KVM_XEN_H__ */
|