mirror of
https://github.com/torvalds/linux.git
synced 2024-12-14 23:25:54 +00:00
KVM: x86: Use gfn_to_pfn_cache for pv_time
Add a new kvm_setup_guest_pvclock() which parallels the existing kvm_setup_pvclock_page(). The latter will be removed once we convert all users to the gfn_to_pfn_cache version. Using the new cache, we can potentially let kvm_set_guest_paused() set the PVCLOCK_GUEST_STOPPED bit directly rather than having to delegate to the vCPU via KVM_REQ_CLOCK_UPDATE. But not yet. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20220303154127.202856-5-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
a795cd43c5
commit
916d3608df
@ -751,8 +751,7 @@ struct kvm_vcpu_arch {
|
||||
gpa_t time;
|
||||
struct pvclock_vcpu_time_info hv_clock;
|
||||
unsigned int hw_tsc_khz;
|
||||
struct gfn_to_hva_cache pv_time;
|
||||
bool pv_time_enabled;
|
||||
struct gfn_to_pfn_cache pv_time;
|
||||
/* set guest stopped flag in pvclock flags field */
|
||||
bool pvclock_set_guest_stopped_request;
|
||||
|
||||
|
@ -2249,14 +2249,13 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
|
||||
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
|
||||
|
||||
/* we verify if the enable bit is set... */
|
||||
vcpu->arch.pv_time_enabled = false;
|
||||
if (!(system_time & 1))
|
||||
return;
|
||||
|
||||
if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.pv_time, system_time & ~1ULL,
|
||||
sizeof(struct pvclock_vcpu_time_info)))
|
||||
vcpu->arch.pv_time_enabled = true;
|
||||
if (system_time & 1) {
|
||||
kvm_gfn_to_pfn_cache_init(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
|
||||
KVM_HOST_USES_PFN, system_time & ~1ULL,
|
||||
sizeof(struct pvclock_vcpu_time_info));
|
||||
} else {
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
@ -2961,6 +2960,57 @@ u64 get_kvmclock_ns(struct kvm *kvm)
|
||||
return data.clock;
|
||||
}
|
||||
|
||||
static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
|
||||
struct gfn_to_pfn_cache *gpc,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct kvm_vcpu_arch *vcpu = &v->arch;
|
||||
struct pvclock_vcpu_time_info *guest_hv_clock;
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
|
||||
offset + sizeof(*guest_hv_clock))) {
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
|
||||
offset + sizeof(*guest_hv_clock)))
|
||||
return;
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
}
|
||||
|
||||
guest_hv_clock = (void *)(gpc->khva + offset);
|
||||
|
||||
/*
|
||||
* This VCPU is paused, but it's legal for a guest to read another
|
||||
* VCPU's kvmclock, so we really have to follow the specification where
|
||||
* it says that version is odd if data is being modified, and even after
|
||||
* it is consistent.
|
||||
*/
|
||||
|
||||
guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1;
|
||||
smp_wmb();
|
||||
|
||||
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
|
||||
vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
|
||||
|
||||
if (vcpu->pvclock_set_guest_stopped_request) {
|
||||
vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
|
||||
vcpu->pvclock_set_guest_stopped_request = false;
|
||||
}
|
||||
|
||||
memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
|
||||
smp_wmb();
|
||||
|
||||
guest_hv_clock->version = ++vcpu->hv_clock.version;
|
||||
|
||||
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
|
||||
}
|
||||
|
||||
static void kvm_setup_pvclock_page(struct kvm_vcpu *v,
|
||||
struct gfn_to_hva_cache *cache,
|
||||
unsigned int offset)
|
||||
@ -3106,8 +3156,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
|
||||
vcpu->hv_clock.flags = pvclock_flags;
|
||||
|
||||
if (vcpu->pv_time_enabled)
|
||||
kvm_setup_pvclock_page(v, &vcpu->pv_time, 0);
|
||||
if (vcpu->pv_time.active)
|
||||
kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
|
||||
if (vcpu->xen.vcpu_info_set)
|
||||
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache,
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
@ -3301,7 +3351,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
|
||||
|
||||
static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.pv_time_enabled = false;
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
vcpu->arch.time = 0;
|
||||
}
|
||||
|
||||
@ -5103,7 +5153,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu->arch.pv_time_enabled)
|
||||
if (!vcpu->arch.pv_time.active)
|
||||
return -EINVAL;
|
||||
vcpu->arch.pvclock_set_guest_stopped_request = true;
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
@ -6187,7 +6237,7 @@ static int kvm_arch_suspend_notifier(struct kvm *kvm)
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (!vcpu->arch.pv_time_enabled)
|
||||
if (!vcpu->arch.pv_time.active)
|
||||
continue;
|
||||
|
||||
ret = kvm_set_guest_paused(vcpu);
|
||||
|
Loading…
Reference in New Issue
Block a user