mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Merge branch 'gpc-fixes' of git://git.infradead.org/users/dwmw2/linux into HEAD
Pull Xen-for-KVM changes from David Woodhouse: * add support for 32-bit guests in SCHEDOP_poll * the rest of the gfn-to-pfn cache API cleanup "I still haven't reinstated the last of those patches to make gpc->len immutable." Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
commit
5656374b16
@ -2311,13 +2311,11 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
|
||||
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
|
||||
|
||||
/* we verify if the enable bit is set... */
|
||||
if (system_time & 1) {
|
||||
kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
|
||||
KVM_HOST_USES_PFN, system_time & ~1ULL,
|
||||
if (system_time & 1)
|
||||
kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
|
||||
sizeof(struct pvclock_vcpu_time_info));
|
||||
} else {
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
}
|
||||
else
|
||||
kvm_gpc_deactivate(&vcpu->arch.pv_time);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -3047,12 +3045,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
|
||||
offset + sizeof(*guest_hv_clock))) {
|
||||
while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
|
||||
offset + sizeof(*guest_hv_clock)))
|
||||
if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
|
||||
return;
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
@ -3401,7 +3397,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
|
||||
|
||||
static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
kvm_gpc_deactivate(&vcpu->arch.pv_time);
|
||||
vcpu->arch.time = 0;
|
||||
}
|
||||
|
||||
@ -11559,7 +11555,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs_avail = ~0;
|
||||
vcpu->arch.regs_dirty = ~0;
|
||||
|
||||
kvm_gpc_init(&vcpu->arch.pv_time);
|
||||
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
|
@ -42,13 +42,12 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
|
||||
int idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (gfn == GPA_INVALID) {
|
||||
kvm_gpc_deactivate(kvm, gpc);
|
||||
kvm_gpc_deactivate(gpc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa,
|
||||
PAGE_SIZE);
|
||||
ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -273,14 +272,14 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
|
||||
* gfn_to_pfn caches that cover the region.
|
||||
*/
|
||||
read_lock_irqsave(&gpc1->lock, flags);
|
||||
while (!kvm_gpc_check(v->kvm, gpc1, gpc1->gpa, user_len1)) {
|
||||
while (!kvm_gpc_check(gpc1, user_len1)) {
|
||||
read_unlock_irqrestore(&gpc1->lock, flags);
|
||||
|
||||
/* When invoked from kvm_sched_out() we cannot sleep */
|
||||
if (atomic)
|
||||
return;
|
||||
|
||||
if (kvm_gpc_refresh(v->kvm, gpc1, gpc1->gpa, user_len1))
|
||||
if (kvm_gpc_refresh(gpc1, user_len1))
|
||||
return;
|
||||
|
||||
read_lock_irqsave(&gpc1->lock, flags);
|
||||
@ -309,7 +308,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
|
||||
*/
|
||||
read_lock(&gpc2->lock);
|
||||
|
||||
if (!kvm_gpc_check(v->kvm, gpc2, gpc2->gpa, user_len2)) {
|
||||
if (!kvm_gpc_check(gpc2, user_len2)) {
|
||||
read_unlock(&gpc2->lock);
|
||||
read_unlock_irqrestore(&gpc1->lock, flags);
|
||||
|
||||
@ -323,8 +322,8 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
|
||||
* to the second page now because the guest changed to
|
||||
* 64-bit mode, the second GPC won't have been set up.
|
||||
*/
|
||||
if (kvm_gpc_activate(v->kvm, gpc2, NULL, KVM_HOST_USES_PFN,
|
||||
gpc1->gpa + user_len1, user_len2))
|
||||
if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
|
||||
user_len2))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -489,12 +488,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
|
||||
* little more honest about it.
|
||||
*/
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
|
||||
sizeof(struct vcpu_info))) {
|
||||
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
|
||||
sizeof(struct vcpu_info)))
|
||||
if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
|
||||
return;
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
@ -554,8 +551,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
|
||||
sizeof(struct vcpu_info))) {
|
||||
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
/*
|
||||
@ -569,8 +565,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
if (in_atomic() || !task_is_running(current))
|
||||
return 1;
|
||||
|
||||
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
|
||||
sizeof(struct vcpu_info))) {
|
||||
if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
|
||||
/*
|
||||
* If this failed, userspace has screwed up the
|
||||
* vcpu_info mapping. No interrupts for you.
|
||||
@ -711,15 +706,13 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gpc_activate(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_info_cache, NULL,
|
||||
KVM_HOST_USES_PFN, data->u.gpa,
|
||||
sizeof(struct vcpu_info));
|
||||
r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
|
||||
data->u.gpa, sizeof(struct vcpu_info));
|
||||
if (!r)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
|
||||
@ -727,15 +720,13 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
kvm_gpc_deactivate(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gpc_activate(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache,
|
||||
NULL, KVM_HOST_USES_PFN, data->u.gpa,
|
||||
r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache,
|
||||
data->u.gpa,
|
||||
sizeof(struct pvclock_vcpu_time_info));
|
||||
if (!r)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
@ -751,10 +742,8 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
r = 0;
|
||||
deactivate_out:
|
||||
kvm_gpc_deactivate(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate2_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -770,20 +759,18 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
|
||||
/* How much fits in the (first) page? */
|
||||
sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
|
||||
r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate_cache,
|
||||
NULL, KVM_HOST_USES_PFN, data->u.gpa, sz1);
|
||||
r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache,
|
||||
data->u.gpa, sz1);
|
||||
if (r)
|
||||
goto deactivate_out;
|
||||
|
||||
/* Either map the second page, or deactivate the second GPC */
|
||||
if (sz1 >= sz) {
|
||||
kvm_gpc_deactivate(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate2_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
|
||||
} else {
|
||||
sz2 = sz - sz1;
|
||||
BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK);
|
||||
r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate2_cache,
|
||||
NULL, KVM_HOST_USES_PFN,
|
||||
r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache,
|
||||
data->u.gpa + sz1, sz2);
|
||||
if (r)
|
||||
goto deactivate_out;
|
||||
@ -1167,7 +1154,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
|
||||
if (!kvm_gpc_check(gpc, PAGE_SIZE))
|
||||
goto out_rcu;
|
||||
|
||||
ret = false;
|
||||
@ -1201,20 +1188,45 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
|
||||
evtchn_port_t port, *ports;
|
||||
gpa_t gpa;
|
||||
|
||||
if (!longmode || !lapic_in_kernel(vcpu) ||
|
||||
if (!lapic_in_kernel(vcpu) ||
|
||||
!(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
|
||||
return false;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
|
||||
sizeof(sched_poll))) {
|
||||
if (!gpa) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
|
||||
struct compat_sched_poll sp32;
|
||||
|
||||
/* Sanity check that the compat struct definition is correct */
|
||||
BUILD_BUG_ON(sizeof(sp32) != 16);
|
||||
|
||||
if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a 32-bit pointer to an array of evtchn_port_t which
|
||||
* are uint32_t, so once it's converted no further compat
|
||||
* handling is needed.
|
||||
*/
|
||||
sched_poll.ports = (void *)(unsigned long)(sp32.ports);
|
||||
sched_poll.nr_ports = sp32.nr_ports;
|
||||
sched_poll.timeout = sp32.timeout;
|
||||
} else {
|
||||
if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
|
||||
sizeof(sched_poll))) {
|
||||
*r = -EFAULT;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(sched_poll.nr_ports > 1)) {
|
||||
/* Xen (unofficially) limits number of pollers to 128 */
|
||||
if (sched_poll.nr_ports > 128) {
|
||||
@ -1564,7 +1576,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
|
||||
if (!kvm_gpc_check(gpc, PAGE_SIZE))
|
||||
goto out_rcu;
|
||||
|
||||
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
|
||||
@ -1598,7 +1610,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
|
||||
gpc = &vcpu->arch.xen.vcpu_info_cache;
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
|
||||
if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
|
||||
/*
|
||||
* Could not access the vcpu_info. Set the bit in-kernel
|
||||
* and prod the vCPU to deliver it for itself.
|
||||
@ -1696,7 +1708,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
|
||||
break;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
rc = kvm_gpc_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
|
||||
rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
} while(!rc);
|
||||
|
||||
@ -2026,10 +2038,14 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
|
||||
|
||||
timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
|
||||
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate_cache);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
@ -2037,10 +2053,10 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
if (kvm_xen_timer_enabled(vcpu))
|
||||
kvm_xen_stop_timer(vcpu);
|
||||
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate2_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
|
||||
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
|
||||
del_timer_sync(&vcpu->arch.xen.poll_timer);
|
||||
}
|
||||
@ -2048,7 +2064,7 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
void kvm_xen_init_vm(struct kvm *kvm)
|
||||
{
|
||||
idr_init(&kvm->arch.xen.evtchn_ports);
|
||||
kvm_gpc_init(&kvm->arch.xen.shinfo_cache);
|
||||
kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vm(struct kvm *kvm)
|
||||
@ -2056,7 +2072,7 @@ void kvm_xen_destroy_vm(struct kvm *kvm)
|
||||
struct evtchnfd *evtchnfd;
|
||||
int i;
|
||||
|
||||
kvm_gpc_deactivate(kvm, &kvm->arch.xen.shinfo_cache);
|
||||
kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
|
||||
|
||||
idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
|
||||
if (!evtchnfd->deliver.port.port)
|
||||
|
@ -207,4 +207,11 @@ struct compat_vcpu_runstate_info {
|
||||
uint64_t time[4];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct compat_sched_poll {
|
||||
/* This is actually a guest virtual address which points to ports. */
|
||||
uint32_t ports;
|
||||
unsigned int nr_ports;
|
||||
uint64_t timeout;
|
||||
};
|
||||
|
||||
#endif /* __ARCH_X86_KVM_XEN_H__ */
|
||||
|
@ -1260,18 +1260,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
* kvm_gpc_init - initialize gfn_to_pfn_cache.
|
||||
*
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
*
|
||||
* This sets up a gfn_to_pfn_cache by initializing locks. Note, the cache must
|
||||
* be zero-allocated (or zeroed by the caller before init).
|
||||
*/
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
|
||||
|
||||
/**
|
||||
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
|
||||
* physical address.
|
||||
*
|
||||
* @kvm: pointer to kvm instance.
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
* @vcpu: vCPU to be used for marking pages dirty and to be woken on
|
||||
* invalidation.
|
||||
* @usage: indicates if the resulting host physical PFN is used while
|
||||
@ -1280,27 +1269,36 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
|
||||
* changes!---will also force @vcpu to exit the guest and
|
||||
* refresh the cache); and/or if the PFN used directly
|
||||
* by KVM (and thus needs a kernel virtual mapping).
|
||||
*
|
||||
* This sets up a gfn_to_pfn_cache by initializing locks and assigning the
|
||||
* immutable attributes. Note, the cache must be zero-allocated (or zeroed by
|
||||
* the caller before init).
|
||||
*/
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
|
||||
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);
|
||||
|
||||
/**
|
||||
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
|
||||
* physical address.
|
||||
*
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
* @gpa: guest physical address to map.
|
||||
* @len: sanity check; the range being access must fit a single page.
|
||||
*
|
||||
* @return: 0 for success.
|
||||
* -EINVAL for a mapping which would cross a page boundary.
|
||||
* -EFAULT for an untranslatable guest physical address.
|
||||
* -EFAULT for an untranslatable guest physical address.
|
||||
*
|
||||
* This primes a gfn_to_pfn_cache and links it into the @kvm's list for
|
||||
* This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
|
||||
* invalidations to be processed. Callers are required to use kvm_gpc_check()
|
||||
* to ensure that the cache is valid before accessing the target page.
|
||||
*/
|
||||
int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
|
||||
gpa_t gpa, unsigned long len);
|
||||
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
|
||||
|
||||
/**
|
||||
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
|
||||
*
|
||||
* @kvm: pointer to kvm instance.
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
* @gpa: current guest physical address to map.
|
||||
* @len: sanity check; the range being access must fit a single page.
|
||||
*
|
||||
* @return: %true if the cache is still valid and the address matches.
|
||||
@ -1313,52 +1311,35 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
* Callers in IN_GUEST_MODE may do so without locking, although they should
|
||||
* still hold a read lock on kvm->scru for the memslot checks.
|
||||
*/
|
||||
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
unsigned long len);
|
||||
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
|
||||
|
||||
/**
|
||||
* kvm_gpc_refresh - update a previously initialized cache.
|
||||
*
|
||||
* @kvm: pointer to kvm instance.
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
* @gpa: updated guest physical address to map.
|
||||
* @len: sanity check; the range being access must fit a single page.
|
||||
*
|
||||
* @return: 0 for success.
|
||||
* -EINVAL for a mapping which would cross a page boundary.
|
||||
* -EFAULT for an untranslatable guest physical address.
|
||||
* -EFAULT for an untranslatable guest physical address.
|
||||
*
|
||||
* This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
|
||||
* returm from this function does not mean the page can be immediately
|
||||
* return from this function does not mean the page can be immediately
|
||||
* accessed because it may have raced with an invalidation. Callers must
|
||||
* still lock and check the cache status, as this function does not return
|
||||
* with the lock still held to permit access.
|
||||
*/
|
||||
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
unsigned long len);
|
||||
|
||||
/**
|
||||
* kvm_gpc_unmap - temporarily unmap a gfn_to_pfn_cache.
|
||||
*
|
||||
* @kvm: pointer to kvm instance.
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
*
|
||||
* This unmaps the referenced page. The cache is left in the invalid state
|
||||
* but at least the mapping from GPA to userspace HVA will remain cached
|
||||
* and can be reused on a subsequent refresh.
|
||||
*/
|
||||
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
|
||||
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
|
||||
|
||||
/**
|
||||
* kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
|
||||
*
|
||||
* @kvm: pointer to kvm instance.
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
*
|
||||
* This removes a cache from the @kvm's list to be processed on MMU notifier
|
||||
* This removes a cache from the VM's list to be processed on MMU notifier
|
||||
* invocation.
|
||||
*/
|
||||
void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
|
||||
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
|
||||
|
||||
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
|
||||
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
|
||||
|
@ -67,6 +67,7 @@ struct gfn_to_pfn_cache {
|
||||
gpa_t gpa;
|
||||
unsigned long uhva;
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct kvm *kvm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct list_head list;
|
||||
rwlock_t lock;
|
||||
|
@ -76,19 +76,17 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||
}
|
||||
}
|
||||
|
||||
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
unsigned long len)
|
||||
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
{
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
|
||||
|
||||
if (!gpc->active)
|
||||
return false;
|
||||
|
||||
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
|
||||
if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
|
||||
return false;
|
||||
|
||||
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
|
||||
kvm_is_error_hva(gpc->uhva))
|
||||
if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
|
||||
return false;
|
||||
|
||||
if (!gpc->valid)
|
||||
@ -139,7 +137,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
|
||||
return kvm->mmu_invalidate_seq != mmu_seq;
|
||||
}
|
||||
|
||||
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
/* Note, the new page offset may be different than the old! */
|
||||
void *old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
@ -159,7 +157,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
gpc->valid = false;
|
||||
|
||||
do {
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
mmu_seq = gpc->kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
@ -217,7 +215,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
* attempting to refresh.
|
||||
*/
|
||||
WARN_ON_ONCE(gpc->valid);
|
||||
} while (mmu_notifier_retry_cache(kvm, mmu_seq));
|
||||
} while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
|
||||
|
||||
gpc->valid = true;
|
||||
gpc->pfn = new_pfn;
|
||||
@ -238,10 +236,10 @@ out_error:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
unsigned long len)
|
||||
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
unsigned long len)
|
||||
{
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
|
||||
unsigned long page_offset = gpa & ~PAGE_MASK;
|
||||
bool unmap_old = false;
|
||||
unsigned long old_uhva;
|
||||
@ -295,7 +293,7 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
* drop the lock and do the HVA to PFN lookup again.
|
||||
*/
|
||||
if (!gpc->valid || old_uhva != gpc->uhva) {
|
||||
ret = hva_to_pfn_retry(kvm, gpc);
|
||||
ret = hva_to_pfn_retry(gpc);
|
||||
} else {
|
||||
/*
|
||||
* If the HVA→PFN mapping was already valid, don't unmap it.
|
||||
@ -303,9 +301,8 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
* may have changed.
|
||||
*/
|
||||
gpc->khva = old_khva + page_offset;
|
||||
old_pfn = KVM_PFN_ERR_FAULT;
|
||||
old_khva = NULL;
|
||||
ret = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
out:
|
||||
@ -333,55 +330,37 @@ out_unlock:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
{
|
||||
return __kvm_gpc_refresh(gpc, gpc->gpa, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
|
||||
|
||||
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
|
||||
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
|
||||
{
|
||||
void *old_khva;
|
||||
kvm_pfn_t old_pfn;
|
||||
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
|
||||
WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
|
||||
|
||||
mutex_lock(&gpc->refresh_lock);
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
gpc->valid = false;
|
||||
|
||||
old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
old_pfn = gpc->pfn;
|
||||
|
||||
/*
|
||||
* We can leave the GPA → uHVA map cache intact but the PFN
|
||||
* lookup will need to be redone even for the same page.
|
||||
*/
|
||||
gpc->khva = NULL;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
mutex_unlock(&gpc->refresh_lock);
|
||||
|
||||
gpc_unmap_khva(old_pfn, old_khva);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_unmap);
|
||||
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
rwlock_init(&gpc->lock);
|
||||
mutex_init(&gpc->refresh_lock);
|
||||
|
||||
gpc->kvm = kvm;
|
||||
gpc->vcpu = vcpu;
|
||||
gpc->usage = usage;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->uhva = KVM_HVA_ERR_BAD;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_init);
|
||||
|
||||
int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
|
||||
gpa_t gpa, unsigned long len)
|
||||
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
|
||||
{
|
||||
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
|
||||
struct kvm *kvm = gpc->kvm;
|
||||
|
||||
if (!gpc->active) {
|
||||
gpc->khva = NULL;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->uhva = KVM_HVA_ERR_BAD;
|
||||
gpc->vcpu = vcpu;
|
||||
gpc->usage = usage;
|
||||
gpc->valid = false;
|
||||
if (KVM_BUG_ON(gpc->valid, kvm))
|
||||
return -EIO;
|
||||
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_add(&gpc->list, &kvm->gpc_list);
|
||||
@ -396,12 +375,16 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
gpc->active = true;
|
||||
write_unlock_irq(&gpc->lock);
|
||||
}
|
||||
return kvm_gpc_refresh(kvm, gpc, gpa, len);
|
||||
return __kvm_gpc_refresh(gpc, gpa, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_activate);
|
||||
|
||||
void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
struct kvm *kvm = gpc->kvm;
|
||||
kvm_pfn_t old_pfn;
|
||||
void *old_khva;
|
||||
|
||||
if (gpc->active) {
|
||||
/*
|
||||
* Deactivate the cache before removing it from the list, KVM
|
||||
@ -410,13 +393,26 @@ void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
*/
|
||||
write_lock_irq(&gpc->lock);
|
||||
gpc->active = false;
|
||||
gpc->valid = false;
|
||||
|
||||
/*
|
||||
* Leave the GPA => uHVA cache intact, it's protected by the
|
||||
* memslot generation. The PFN lookup needs to be redone every
|
||||
* time as mmu_notifier protection is lost when the cache is
|
||||
* removed from the VM's gpc_list.
|
||||
*/
|
||||
old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
gpc->khva = NULL;
|
||||
|
||||
old_pfn = gpc->pfn;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_del(&gpc->list);
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
|
||||
kvm_gpc_unmap(kvm, gpc);
|
||||
gpc_unmap_khva(old_pfn, old_khva);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
|
||||
|
Loading…
Reference in New Issue
Block a user