mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
6ce5a090a9
kvm_coalesced_mmio_init() keeps to hold the addresses of a coalesced mmio ring page and dev even after it has freed them. Also, if this function fails, though it might be rare, it seems to be suggesting the system's serious state: so we'd better stop the works following the kvm_creat_vm(). This patch clears these problems. We move the coalesced mmio's initialization out of kvm_create_vm(). This seems to be natural because it includes a registration which can be done only when vm is successfully created. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
191 lines
4.1 KiB
C
191 lines
4.1 KiB
C
/*
|
|
* KVM coalesced MMIO
|
|
*
|
|
* Copyright (c) 2008 Bull S.A.S.
|
|
*
|
|
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
|
|
*
|
|
*/
|
|
|
|
#include "iodev.h"
|
|
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/kvm.h>
|
|
|
|
#include "coalesced_mmio.h"
|
|
|
|
static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
|
|
{
|
|
return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
|
|
}
|
|
|
|
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
|
|
gpa_t addr, int len)
|
|
{
|
|
struct kvm_coalesced_mmio_zone *zone;
|
|
struct kvm_coalesced_mmio_ring *ring;
|
|
unsigned avail;
|
|
int i;
|
|
|
|
/* Are we able to batch it ? */
|
|
|
|
/* last is the first free entry
|
|
* check if we don't meet the first used entry
|
|
* there is always one unused entry in the buffer
|
|
*/
|
|
ring = dev->kvm->coalesced_mmio_ring;
|
|
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
|
|
if (avail < KVM_MAX_VCPUS) {
|
|
/* full */
|
|
return 0;
|
|
}
|
|
|
|
/* is it in a batchable area ? */
|
|
|
|
for (i = 0; i < dev->nb_zones; i++) {
|
|
zone = &dev->zone[i];
|
|
|
|
/* (addr,len) is fully included in
|
|
* (zone->addr, zone->size)
|
|
*/
|
|
|
|
if (zone->addr <= addr &&
|
|
addr + len <= zone->addr + zone->size)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int coalesced_mmio_write(struct kvm_io_device *this,
|
|
gpa_t addr, int len, const void *val)
|
|
{
|
|
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
|
|
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
|
|
if (!coalesced_mmio_in_range(dev, addr, len))
|
|
return -EOPNOTSUPP;
|
|
|
|
spin_lock(&dev->lock);
|
|
|
|
/* copy data in first free entry of the ring */
|
|
|
|
ring->coalesced_mmio[ring->last].phys_addr = addr;
|
|
ring->coalesced_mmio[ring->last].len = len;
|
|
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
|
|
smp_wmb();
|
|
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
|
|
spin_unlock(&dev->lock);
|
|
return 0;
|
|
}
|
|
|
|
static void coalesced_mmio_destructor(struct kvm_io_device *this)
|
|
{
|
|
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
|
|
|
|
kfree(dev);
|
|
}
|
|
|
|
static const struct kvm_io_device_ops coalesced_mmio_ops = {
|
|
.write = coalesced_mmio_write,
|
|
.destructor = coalesced_mmio_destructor,
|
|
};
|
|
|
|
int kvm_coalesced_mmio_init(struct kvm *kvm)
|
|
{
|
|
struct kvm_coalesced_mmio_dev *dev;
|
|
struct page *page;
|
|
int ret;
|
|
|
|
ret = -ENOMEM;
|
|
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
|
if (!page)
|
|
goto out_err;
|
|
kvm->coalesced_mmio_ring = page_address(page);
|
|
|
|
ret = -ENOMEM;
|
|
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
|
|
if (!dev)
|
|
goto out_free_page;
|
|
spin_lock_init(&dev->lock);
|
|
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
|
|
dev->kvm = kvm;
|
|
kvm->coalesced_mmio_dev = dev;
|
|
|
|
mutex_lock(&kvm->slots_lock);
|
|
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
|
|
mutex_unlock(&kvm->slots_lock);
|
|
if (ret < 0)
|
|
goto out_free_dev;
|
|
|
|
return ret;
|
|
|
|
out_free_dev:
|
|
kvm->coalesced_mmio_dev = NULL;
|
|
kfree(dev);
|
|
out_free_page:
|
|
kvm->coalesced_mmio_ring = NULL;
|
|
__free_page(page);
|
|
out_err:
|
|
return ret;
|
|
}
|
|
|
|
void kvm_coalesced_mmio_free(struct kvm *kvm)
|
|
{
|
|
if (kvm->coalesced_mmio_ring)
|
|
free_page((unsigned long)kvm->coalesced_mmio_ring);
|
|
}
|
|
|
|
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
|
|
struct kvm_coalesced_mmio_zone *zone)
|
|
{
|
|
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
|
|
|
|
if (dev == NULL)
|
|
return -ENXIO;
|
|
|
|
mutex_lock(&kvm->slots_lock);
|
|
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
|
|
mutex_unlock(&kvm->slots_lock);
|
|
return -ENOBUFS;
|
|
}
|
|
|
|
dev->zone[dev->nb_zones] = *zone;
|
|
dev->nb_zones++;
|
|
|
|
mutex_unlock(&kvm->slots_lock);
|
|
return 0;
|
|
}
|
|
|
|
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
|
|
struct kvm_coalesced_mmio_zone *zone)
|
|
{
|
|
int i;
|
|
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
|
|
struct kvm_coalesced_mmio_zone *z;
|
|
|
|
if (dev == NULL)
|
|
return -ENXIO;
|
|
|
|
mutex_lock(&kvm->slots_lock);
|
|
|
|
i = dev->nb_zones;
|
|
while (i) {
|
|
z = &dev->zone[i - 1];
|
|
|
|
/* unregister all zones
|
|
* included in (zone->addr, zone->size)
|
|
*/
|
|
|
|
if (zone->addr <= z->addr &&
|
|
z->addr + z->size <= zone->addr + zone->size) {
|
|
dev->nb_zones--;
|
|
*z = dev->zone[dev->nb_zones];
|
|
}
|
|
i--;
|
|
}
|
|
|
|
mutex_unlock(&kvm->slots_lock);
|
|
|
|
return 0;
|
|
}
|