2007-11-13 03:30:26 +00:00
|
|
|
/*
|
|
|
|
* Virtio PCI driver
|
|
|
|
*
|
|
|
|
* This module allows virtio devices to be used over a virtual PCI device.
|
|
|
|
* This can be used with QEMU based VMMs like KVM or Xen.
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2007
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/pci.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2007-11-13 03:30:26 +00:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/virtio.h>
|
|
|
|
#include <linux/virtio_config.h>
|
|
|
|
#include <linux/virtio_ring.h>
|
|
|
|
#include <linux/virtio_pci.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
|
|
|
|
MODULE_DESCRIPTION("virtio-pci");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_VERSION("1");
|
|
|
|
|
|
|
|
/* Our device structure */
|
2014-12-03 14:34:30 +00:00
|
|
|
struct virtio_pci_device {
|
2007-11-13 03:30:26 +00:00
|
|
|
struct virtio_device vdev;
|
|
|
|
struct pci_dev *pci_dev;
|
|
|
|
|
|
|
|
/* the IO mapping for the PCI config space */
|
2008-03-29 03:09:48 +00:00
|
|
|
void __iomem *ioaddr;
|
2007-11-13 03:30:26 +00:00
|
|
|
|
2014-12-02 12:35:27 +00:00
|
|
|
/* the IO mapping for ISR operation */
|
|
|
|
void __iomem *isr;
|
|
|
|
|
2007-11-13 03:30:26 +00:00
|
|
|
/* a list of queues so we can dispatch IRQs */
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head virtqueues;
|
2009-05-14 10:55:41 +00:00
|
|
|
|
|
|
|
/* MSI-X support */
|
|
|
|
int msix_enabled;
|
|
|
|
int intx_enabled;
|
|
|
|
struct msix_entry *msix_entries;
|
2012-08-28 11:54:14 +00:00
|
|
|
cpumask_var_t *msix_affinity_masks;
|
2009-05-14 10:55:41 +00:00
|
|
|
/* Name strings for interrupts. This size should be enough,
|
|
|
|
* and I'm too lazy to allocate each name separately. */
|
|
|
|
char (*msix_names)[256];
|
|
|
|
/* Number of available vectors */
|
|
|
|
unsigned msix_vectors;
|
2009-07-26 12:48:08 +00:00
|
|
|
/* Vectors allocated, excluding per-vq vectors if any */
|
2009-05-14 10:55:41 +00:00
|
|
|
unsigned msix_used_vectors;
|
2011-12-22 11:28:26 +00:00
|
|
|
|
2009-07-26 12:48:08 +00:00
|
|
|
/* Whether we have vector per vq */
|
|
|
|
bool per_vq_vectors;
|
2009-05-14 10:55:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Constants for MSI-X */
|
|
|
|
/* Use first vector for configuration changes, second and the rest for
|
|
|
|
* virtqueues Thus, we need at least 2 vectors for MSI. */
|
|
|
|
enum {
|
|
|
|
VP_MSIX_CONFIG_VECTOR = 0,
|
|
|
|
VP_MSIX_VQ_VECTOR = 1,
|
2007-11-13 03:30:26 +00:00
|
|
|
};
|
|
|
|
|
2014-12-03 14:34:30 +00:00
|
|
|
struct virtio_pci_vq_info {
|
2007-11-13 03:30:26 +00:00
|
|
|
/* the actual virtqueue */
|
|
|
|
struct virtqueue *vq;
|
|
|
|
|
|
|
|
/* the number of entries in the queue */
|
|
|
|
int num;
|
|
|
|
|
|
|
|
/* the virtual address of the ring queue */
|
|
|
|
void *queue;
|
|
|
|
|
|
|
|
/* the list node for the virtqueues list */
|
|
|
|
struct list_head node;
|
2009-05-14 10:55:41 +00:00
|
|
|
|
|
|
|
/* MSI-X vector (or none) */
|
2009-09-24 04:26:29 +00:00
|
|
|
unsigned msix_vector;
|
2007-11-13 03:30:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
|
2014-07-26 22:01:01 +00:00
|
|
|
static const struct pci_device_id virtio_pci_id_table[] = {
|
2013-02-10 05:27:39 +00:00
|
|
|
{ PCI_DEVICE(0x1af4, PCI_ANY_ID) },
|
|
|
|
{ 0 }
|
2007-11-13 03:30:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
|
|
|
|
|
|
|
|
/* Convert a generic virtio device to our structure */
|
|
|
|
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
return container_of(vdev, struct virtio_pci_device, vdev);
|
|
|
|
}
|
|
|
|
|
2008-05-03 02:50:50 +00:00
|
|
|
/* virtio config->get_features() implementation */
|
2014-10-07 14:39:43 +00:00
|
|
|
static u64 vp_get_features(struct virtio_device *vdev)
|
2008-05-03 02:50:50 +00:00
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
|
|
|
|
/* When someone needs more than 32 feature bits, we'll need to
|
|
|
|
* steal a bit to indicate that the rest are somewhere else. */
|
|
|
|
return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
|
|
|
|
}
|
|
|
|
|
2008-07-25 17:06:07 +00:00
|
|
|
/* virtio config->finalize_features() implementation */
|
2014-12-04 18:20:27 +00:00
|
|
|
static int vp_finalize_features(struct virtio_device *vdev)
|
2007-11-13 03:30:26 +00:00
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
|
2008-07-25 17:06:13 +00:00
|
|
|
/* Give virtio_ring a chance to accept features. */
|
|
|
|
vring_transport_features(vdev);
|
|
|
|
|
2014-11-27 11:45:58 +00:00
|
|
|
/* Make sure we don't have any features > 32 bits! */
|
|
|
|
BUG_ON((u32)vdev->features != vdev->features);
|
|
|
|
|
2008-07-25 17:06:07 +00:00
|
|
|
/* We only support 32 feature bits. */
|
2014-10-07 14:39:42 +00:00
|
|
|
iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
|
2014-12-04 18:20:27 +00:00
|
|
|
|
|
|
|
return 0;
|
2007-11-13 03:30:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* virtio config->get() implementation */
|
|
|
|
static void vp_get(struct virtio_device *vdev, unsigned offset,
|
|
|
|
void *buf, unsigned len)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
2009-05-14 10:55:41 +00:00
|
|
|
void __iomem *ioaddr = vp_dev->ioaddr +
|
|
|
|
VIRTIO_PCI_CONFIG(vp_dev) + offset;
|
2007-11-13 03:30:26 +00:00
|
|
|
u8 *ptr = buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < len; i++)
|
|
|
|
ptr[i] = ioread8(ioaddr + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the config->set() implementation. it's symmetric to the config->get()
|
|
|
|
* implementation */
|
|
|
|
static void vp_set(struct virtio_device *vdev, unsigned offset,
|
|
|
|
const void *buf, unsigned len)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
2009-05-14 10:55:41 +00:00
|
|
|
void __iomem *ioaddr = vp_dev->ioaddr +
|
|
|
|
VIRTIO_PCI_CONFIG(vp_dev) + offset;
|
2007-11-13 03:30:26 +00:00
|
|
|
const u8 *ptr = buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < len; i++)
|
|
|
|
iowrite8(ptr[i], ioaddr + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* config->{get,set}_status() implementations */
|
|
|
|
static u8 vp_get_status(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vp_set_status(struct virtio_device *vdev, u8 status)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
/* We should never be setting status to 0. */
|
|
|
|
BUG_ON(status == 0);
|
2008-04-01 00:53:55 +00:00
|
|
|
iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
|
2007-11-13 03:30:26 +00:00
|
|
|
}
|
|
|
|
|
2011-11-17 15:41:15 +00:00
|
|
|
/* wait for pending irq handlers */
|
|
|
|
static void vp_synchronize_vectors(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (vp_dev->intx_enabled)
|
|
|
|
synchronize_irq(vp_dev->pci_dev->irq);
|
|
|
|
|
|
|
|
for (i = 0; i < vp_dev->msix_vectors; ++i)
|
|
|
|
synchronize_irq(vp_dev->msix_entries[i].vector);
|
|
|
|
}
|
|
|
|
|
2007-11-13 03:30:26 +00:00
|
|
|
static void vp_reset(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
/* 0 status means a reset. */
|
2008-04-01 00:53:55 +00:00
|
|
|
iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
|
2011-11-17 15:41:15 +00:00
|
|
|
/* Flush out the status write, and flush in device writes,
|
|
|
|
* including MSi-X interrupts, if any. */
|
|
|
|
ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
|
|
|
|
/* Flush pending VQ/configuration callbacks. */
|
|
|
|
vp_synchronize_vectors(vdev);
|
2007-11-13 03:30:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* the notify function used when creating a virt queue */
|
2013-10-28 23:08:50 +00:00
|
|
|
static bool vp_notify(struct virtqueue *vq)
|
2007-11-13 03:30:26 +00:00
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
|
|
|
|
|
|
|
|
/* we write the queue's selector into the notification register to
|
|
|
|
* signal the other end */
|
2012-10-16 13:26:14 +00:00
|
|
|
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
|
2013-10-28 23:08:50 +00:00
|
|
|
return true;
|
2007-11-13 03:30:26 +00:00
|
|
|
}
|
|
|
|
|
2009-05-14 10:55:31 +00:00
|
|
|
/* Handle a configuration change: Tell driver if it wants to know. */
|
|
|
|
static irqreturn_t vp_config_changed(int irq, void *opaque)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = opaque;
|
|
|
|
|
2014-10-14 00:10:34 +00:00
|
|
|
virtio_config_changed(&vp_dev->vdev);
|
2009-05-14 10:55:31 +00:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Notify all virtqueues on an interrupt. */
|
|
|
|
static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = opaque;
|
|
|
|
struct virtio_pci_vq_info *info;
|
|
|
|
irqreturn_t ret = IRQ_NONE;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vp_dev->lock, flags);
|
|
|
|
list_for_each_entry(info, &vp_dev->virtqueues, node) {
|
|
|
|
if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&vp_dev->lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-11-13 03:30:26 +00:00
|
|
|
/* A small wrapper to also acknowledge the interrupt when it's handled.
|
|
|
|
* I really need an EIO hook for the vring so I can ack the interrupt once we
|
|
|
|
* know that we'll be handling the IRQ but before we invoke the callback since
|
|
|
|
* the callback may notify the host which results in the host attempting to
|
|
|
|
* raise an interrupt that we would then mask once we acknowledged the
|
|
|
|
* interrupt. */
|
|
|
|
static irqreturn_t vp_interrupt(int irq, void *opaque)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = opaque;
|
|
|
|
u8 isr;
|
|
|
|
|
|
|
|
/* reading the ISR has the effect of also clearing it so it's very
|
|
|
|
* important to save off the value. */
|
2014-12-02 12:35:27 +00:00
|
|
|
isr = ioread8(vp_dev->isr);
|
2007-11-13 03:30:26 +00:00
|
|
|
|
|
|
|
/* It's definitely not us if the ISR was not high */
|
|
|
|
if (!isr)
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
/* Configuration change? Tell driver if it wants to know. */
|
2009-05-14 10:55:31 +00:00
|
|
|
if (isr & VIRTIO_PCI_ISR_CONFIG)
|
|
|
|
vp_config_changed(irq, opaque);
|
2007-11-13 03:30:26 +00:00
|
|
|
|
2009-05-14 10:55:31 +00:00
|
|
|
return vp_vring_interrupt(irq, opaque);
|
2007-11-13 03:30:26 +00:00
|
|
|
}
|
|
|
|
|
2009-05-14 10:55:41 +00:00
|
|
|
static void vp_free_vectors(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (vp_dev->intx_enabled) {
|
|
|
|
free_irq(vp_dev->pci_dev->irq, vp_dev);
|
|
|
|
vp_dev->intx_enabled = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
|
|
|
|
free_irq(vp_dev->msix_entries[i].vector, vp_dev);
|
|
|
|
|
2012-08-28 11:54:14 +00:00
|
|
|
for (i = 0; i < vp_dev->msix_vectors; i++)
|
|
|
|
if (vp_dev->msix_affinity_masks[i])
|
|
|
|
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
|
|
|
|
|
2009-05-14 10:55:41 +00:00
|
|
|
if (vp_dev->msix_enabled) {
|
|
|
|
/* Disable the vector used for configuration */
|
|
|
|
iowrite16(VIRTIO_MSI_NO_VECTOR,
|
|
|
|
vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
|
|
|
|
/* Flush the write out to device */
|
|
|
|
ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
|
|
|
|
|
|
|
|
pci_disable_msix(vp_dev->pci_dev);
|
2009-07-23 11:57:37 +00:00
|
|
|
vp_dev->msix_enabled = 0;
|
2009-05-14 10:55:41 +00:00
|
|
|
}
|
2009-07-23 11:57:37 +00:00
|
|
|
|
2013-07-02 06:05:13 +00:00
|
|
|
vp_dev->msix_vectors = 0;
|
2009-07-23 11:57:37 +00:00
|
|
|
vp_dev->msix_used_vectors = 0;
|
|
|
|
kfree(vp_dev->msix_names);
|
|
|
|
vp_dev->msix_names = NULL;
|
|
|
|
kfree(vp_dev->msix_entries);
|
|
|
|
vp_dev->msix_entries = NULL;
|
2012-08-28 11:54:14 +00:00
|
|
|
kfree(vp_dev->msix_affinity_masks);
|
|
|
|
vp_dev->msix_affinity_masks = NULL;
|
2009-05-14 10:55:41 +00:00
|
|
|
}
|
|
|
|
|
2009-09-24 04:26:29 +00:00
|
|
|
static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
|
|
|
|
bool per_vq_vectors)
|
2009-05-14 10:55:41 +00:00
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
const char *name = dev_name(&vp_dev->vdev.dev);
|
|
|
|
unsigned i, v;
|
|
|
|
int err = -ENOMEM;
|
2009-07-26 12:48:08 +00:00
|
|
|
|
2013-07-02 06:05:13 +00:00
|
|
|
vp_dev->msix_vectors = nvectors;
|
|
|
|
|
2009-05-14 10:55:41 +00:00
|
|
|
vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!vp_dev->msix_entries)
|
2009-07-23 11:57:37 +00:00
|
|
|
goto error;
|
2009-05-14 10:55:41 +00:00
|
|
|
vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!vp_dev->msix_names)
|
2009-07-23 11:57:37 +00:00
|
|
|
goto error;
|
2012-08-28 11:54:14 +00:00
|
|
|
vp_dev->msix_affinity_masks
|
|
|
|
= kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!vp_dev->msix_affinity_masks)
|
|
|
|
goto error;
|
|
|
|
for (i = 0; i < nvectors; ++i)
|
|
|
|
if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
|
|
|
|
GFP_KERNEL))
|
|
|
|
goto error;
|
2009-05-14 10:55:41 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nvectors; ++i)
|
|
|
|
vp_dev->msix_entries[i].entry = i;
|
|
|
|
|
2014-03-13 00:53:37 +00:00
|
|
|
err = pci_enable_msix_exact(vp_dev->pci_dev,
|
|
|
|
vp_dev->msix_entries, nvectors);
|
2009-07-26 12:48:08 +00:00
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
vp_dev->msix_enabled = 1;
|
|
|
|
|
|
|
|
/* Set the vector used for configuration */
|
|
|
|
v = vp_dev->msix_used_vectors;
|
|
|
|
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
|
|
|
|
"%s-config", name);
|
|
|
|
err = request_irq(vp_dev->msix_entries[v].vector,
|
|
|
|
vp_config_changed, 0, vp_dev->msix_names[v],
|
|
|
|
vp_dev);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
++vp_dev->msix_used_vectors;
|
2009-05-14 10:55:41 +00:00
|
|
|
|
2009-07-26 12:48:08 +00:00
|
|
|
iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
|
|
|
|
/* Verify we had enough resources to assign the vector */
|
|
|
|
v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
|
|
|
|
if (v == VIRTIO_MSI_NO_VECTOR) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto error;
|
2009-05-14 10:55:41 +00:00
|
|
|
}
|
|
|
|
|
2009-07-26 12:48:08 +00:00
|
|
|
if (!per_vq_vectors) {
|
2009-05-14 10:55:41 +00:00
|
|
|
/* Shared vector for all VQs */
|
|
|
|
v = vp_dev->msix_used_vectors;
|
|
|
|
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
|
|
|
|
"%s-virtqueues", name);
|
|
|
|
err = request_irq(vp_dev->msix_entries[v].vector,
|
|
|
|
vp_vring_interrupt, 0, vp_dev->msix_names[v],
|
|
|
|
vp_dev);
|
|
|
|
if (err)
|
2009-07-23 11:57:37 +00:00
|
|
|
goto error;
|
2009-05-14 10:55:41 +00:00
|
|
|
++vp_dev->msix_used_vectors;
|
|
|
|
}
|
|
|
|
return 0;
|
2009-07-23 11:57:37 +00:00
|
|
|
error:
|
2009-05-14 10:55:41 +00:00
|
|
|
vp_free_vectors(vdev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-09-24 04:26:29 +00:00
|
|
|
static int vp_request_intx(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
|
|
|
|
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
|
|
|
|
IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
|
|
|
|
if (!err)
|
|
|
|
vp_dev->intx_enabled = 1;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
|
|
|
|
void (*callback)(struct virtqueue *vq),
|
|
|
|
const char *name,
|
|
|
|
u16 msix_vec)
|
2007-11-13 03:30:26 +00:00
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
struct virtio_pci_vq_info *info;
|
|
|
|
struct virtqueue *vq;
|
2008-12-02 22:24:40 +00:00
|
|
|
unsigned long flags, size;
|
2009-07-26 12:48:08 +00:00
|
|
|
u16 num;
|
2007-11-13 03:30:26 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Select the queue we're interested in */
|
|
|
|
iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
|
|
|
|
|
|
|
|
/* Check if queue is either not available or already active. */
|
|
|
|
num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
|
|
|
|
if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
/* allocate and fill out our structure the represents an active
|
|
|
|
* queue */
|
|
|
|
info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL);
|
|
|
|
if (!info)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
info->num = num;
|
2009-09-24 04:26:29 +00:00
|
|
|
info->msix_vector = msix_vec;
|
2007-11-13 03:30:26 +00:00
|
|
|
|
2008-12-30 15:25:57 +00:00
|
|
|
size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
|
2008-12-02 22:24:40 +00:00
|
|
|
info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
|
2007-11-13 03:30:26 +00:00
|
|
|
if (info->queue == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* activate the queue */
|
2008-12-30 15:25:56 +00:00
|
|
|
iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
|
2007-11-13 03:30:26 +00:00
|
|
|
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
|
|
|
|
|
|
|
|
/* create the vring */
|
2012-08-28 11:54:13 +00:00
|
|
|
vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
|
2012-01-12 05:14:42 +00:00
|
|
|
true, info->queue, vp_notify, callback, name);
|
2007-11-13 03:30:26 +00:00
|
|
|
if (!vq) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_activate_queue;
|
|
|
|
}
|
|
|
|
|
|
|
|
vq->priv = info;
|
|
|
|
info->vq = vq;
|
|
|
|
|
2009-09-24 04:26:29 +00:00
|
|
|
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
|
|
|
|
iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
|
|
|
|
msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
|
|
|
|
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
|
2009-05-14 10:55:41 +00:00
|
|
|
err = -EBUSY;
|
|
|
|
goto out_assign;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-05 05:38:59 +00:00
|
|
|
if (callback) {
|
|
|
|
spin_lock_irqsave(&vp_dev->lock, flags);
|
|
|
|
list_add(&info->node, &vp_dev->virtqueues);
|
|
|
|
spin_unlock_irqrestore(&vp_dev->lock, flags);
|
|
|
|
} else {
|
|
|
|
INIT_LIST_HEAD(&info->node);
|
|
|
|
}
|
2007-11-13 03:30:26 +00:00
|
|
|
|
|
|
|
return vq;
|
|
|
|
|
2009-05-14 10:55:41 +00:00
|
|
|
out_assign:
|
|
|
|
vring_del_virtqueue(vq);
|
2007-11-13 03:30:26 +00:00
|
|
|
out_activate_queue:
|
|
|
|
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
|
2008-12-02 22:24:40 +00:00
|
|
|
free_pages_exact(info->queue, size);
|
2007-11-13 03:30:26 +00:00
|
|
|
out_info:
|
|
|
|
kfree(info);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vp_del_vq(struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
|
|
|
|
struct virtio_pci_vq_info *info = vq->priv;
|
2009-07-26 12:48:01 +00:00
|
|
|
unsigned long flags, size;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vp_dev->lock, flags);
|
|
|
|
list_del(&info->node);
|
|
|
|
spin_unlock_irqrestore(&vp_dev->lock, flags);
|
2007-11-13 03:30:26 +00:00
|
|
|
|
2012-10-16 13:26:14 +00:00
|
|
|
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
|
2009-05-14 10:55:41 +00:00
|
|
|
|
|
|
|
if (vp_dev->msix_enabled) {
|
|
|
|
iowrite16(VIRTIO_MSI_NO_VECTOR,
|
|
|
|
vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
|
|
|
|
/* Flush the write out to device */
|
|
|
|
ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
|
|
|
|
}
|
|
|
|
|
2007-11-13 03:30:26 +00:00
|
|
|
vring_del_virtqueue(vq);
|
|
|
|
|
|
|
|
/* Select and deactivate the queue */
|
|
|
|
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
|
|
|
|
|
2008-12-30 15:25:57 +00:00
|
|
|
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
|
2008-12-02 22:24:40 +00:00
|
|
|
free_pages_exact(info->queue, size);
|
2007-11-13 03:30:26 +00:00
|
|
|
kfree(info);
|
|
|
|
}
|
|
|
|
|
2009-05-14 10:55:41 +00:00
|
|
|
/* the config->del_vqs() implementation */
|
2009-06-13 04:16:36 +00:00
|
|
|
static void vp_del_vqs(struct virtio_device *vdev)
|
|
|
|
{
|
2009-07-26 12:48:08 +00:00
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
2009-06-13 04:16:36 +00:00
|
|
|
struct virtqueue *vq, *n;
|
2009-07-26 12:48:08 +00:00
|
|
|
struct virtio_pci_vq_info *info;
|
2009-06-13 04:16:36 +00:00
|
|
|
|
2009-07-26 12:48:08 +00:00
|
|
|
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
|
|
|
|
info = vq->priv;
|
2010-02-25 17:08:55 +00:00
|
|
|
if (vp_dev->per_vq_vectors &&
|
|
|
|
info->msix_vector != VIRTIO_MSI_NO_VECTOR)
|
2009-09-24 04:26:29 +00:00
|
|
|
free_irq(vp_dev->msix_entries[info->msix_vector].vector,
|
|
|
|
vq);
|
2009-06-13 04:16:36 +00:00
|
|
|
vp_del_vq(vq);
|
2009-07-26 12:48:08 +00:00
|
|
|
}
|
|
|
|
vp_dev->per_vq_vectors = false;
|
2009-05-14 10:55:41 +00:00
|
|
|
|
|
|
|
vp_free_vectors(vdev);
|
2009-06-13 04:16:36 +00:00
|
|
|
}
|
|
|
|
|
2009-07-26 12:48:08 +00:00
|
|
|
static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
|
|
|
struct virtqueue *vqs[],
|
|
|
|
vq_callback_t *callbacks[],
|
|
|
|
const char *names[],
|
2009-09-24 04:26:29 +00:00
|
|
|
bool use_msix,
|
2009-07-26 12:48:08 +00:00
|
|
|
bool per_vq_vectors)
|
2009-06-13 04:16:36 +00:00
|
|
|
{
|
2009-07-26 12:48:08 +00:00
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
2009-09-24 04:26:29 +00:00
|
|
|
u16 msix_vec;
|
|
|
|
int i, err, nvectors, allocated_vectors;
|
2009-05-14 10:55:41 +00:00
|
|
|
|
2009-09-24 04:26:29 +00:00
|
|
|
if (!use_msix) {
|
|
|
|
/* Old style: one normal interrupt for change and all vqs. */
|
|
|
|
err = vp_request_intx(vdev);
|
|
|
|
if (err)
|
|
|
|
goto error_request;
|
|
|
|
} else {
|
|
|
|
if (per_vq_vectors) {
|
|
|
|
/* Best option: one for change interrupt, one per vq. */
|
|
|
|
nvectors = 1;
|
|
|
|
for (i = 0; i < nvqs; ++i)
|
|
|
|
if (callbacks[i])
|
|
|
|
++nvectors;
|
|
|
|
} else {
|
|
|
|
/* Second best: one for change, shared for all vqs. */
|
|
|
|
nvectors = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
|
|
|
|
if (err)
|
|
|
|
goto error_request;
|
|
|
|
}
|
2009-06-13 04:16:36 +00:00
|
|
|
|
2009-07-26 12:48:08 +00:00
|
|
|
vp_dev->per_vq_vectors = per_vq_vectors;
|
|
|
|
allocated_vectors = vp_dev->msix_used_vectors;
|
2009-06-13 04:16:36 +00:00
|
|
|
for (i = 0; i < nvqs; ++i) {
|
2012-09-05 18:47:45 +00:00
|
|
|
if (!names[i]) {
|
|
|
|
vqs[i] = NULL;
|
|
|
|
continue;
|
|
|
|
} else if (!callbacks[i] || !vp_dev->msix_enabled)
|
2009-09-24 04:26:29 +00:00
|
|
|
msix_vec = VIRTIO_MSI_NO_VECTOR;
|
2009-07-26 12:48:08 +00:00
|
|
|
else if (vp_dev->per_vq_vectors)
|
2009-09-24 04:26:29 +00:00
|
|
|
msix_vec = allocated_vectors++;
|
2009-07-26 12:48:08 +00:00
|
|
|
else
|
2009-09-24 04:26:29 +00:00
|
|
|
msix_vec = VP_MSIX_VQ_VECTOR;
|
|
|
|
vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
|
2009-07-26 12:48:08 +00:00
|
|
|
if (IS_ERR(vqs[i])) {
|
|
|
|
err = PTR_ERR(vqs[i]);
|
2009-05-14 10:55:41 +00:00
|
|
|
goto error_find;
|
2009-07-26 12:48:08 +00:00
|
|
|
}
|
2009-10-22 13:06:06 +00:00
|
|
|
|
|
|
|
if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
|
|
|
|
continue;
|
|
|
|
|
2009-07-26 12:48:08 +00:00
|
|
|
/* allocate per-vq irq if available and necessary */
|
2009-10-22 13:06:06 +00:00
|
|
|
snprintf(vp_dev->msix_names[msix_vec],
|
|
|
|
sizeof *vp_dev->msix_names,
|
|
|
|
"%s-%s",
|
|
|
|
dev_name(&vp_dev->vdev.dev), names[i]);
|
|
|
|
err = request_irq(vp_dev->msix_entries[msix_vec].vector,
|
|
|
|
vring_interrupt, 0,
|
|
|
|
vp_dev->msix_names[msix_vec],
|
|
|
|
vqs[i]);
|
|
|
|
if (err) {
|
|
|
|
vp_del_vq(vqs[i]);
|
|
|
|
goto error_find;
|
2009-07-26 12:48:08 +00:00
|
|
|
}
|
2009-06-13 04:16:36 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
2009-05-14 10:55:41 +00:00
|
|
|
error_find:
|
2009-06-13 04:16:36 +00:00
|
|
|
vp_del_vqs(vdev);
|
2009-05-14 10:55:41 +00:00
|
|
|
|
|
|
|
error_request:
|
2009-07-26 12:48:08 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the config->find_vqs() implementation */
|
|
|
|
static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
|
|
|
struct virtqueue *vqs[],
|
|
|
|
vq_callback_t *callbacks[],
|
|
|
|
const char *names[])
|
|
|
|
{
|
2009-09-24 04:26:29 +00:00
|
|
|
int err;
|
2009-07-26 12:48:08 +00:00
|
|
|
|
2009-09-24 04:26:29 +00:00
|
|
|
/* Try MSI-X with one vector per queue. */
|
|
|
|
err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
|
2009-07-26 12:48:08 +00:00
|
|
|
if (!err)
|
|
|
|
return 0;
|
2009-09-24 04:26:29 +00:00
|
|
|
/* Fallback: MSI-X with one vector for config, one shared for queues. */
|
2009-07-26 12:48:08 +00:00
|
|
|
err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
|
2009-09-24 04:26:29 +00:00
|
|
|
true, false);
|
2009-07-26 12:48:08 +00:00
|
|
|
if (!err)
|
|
|
|
return 0;
|
|
|
|
/* Finally fall back to regular interrupts. */
|
2009-09-24 04:26:29 +00:00
|
|
|
return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
|
|
|
|
false, false);
|
2009-06-13 04:16:36 +00:00
|
|
|
}
|
|
|
|
|
2011-11-14 14:17:08 +00:00
|
|
|
static const char *vp_bus_name(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
|
|
|
|
return pci_name(vp_dev->pci_dev);
|
|
|
|
}
|
|
|
|
|
2012-08-28 11:54:14 +00:00
|
|
|
/* Setup the affinity for a virtqueue:
|
|
|
|
* - force the affinity for per vq vector
|
|
|
|
* - OR over all affinities for shared MSI
|
|
|
|
* - ignore the affinity request if we're using INTX
|
|
|
|
*/
|
|
|
|
static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
|
|
|
|
{
|
|
|
|
struct virtio_device *vdev = vq->vdev;
|
|
|
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
|
|
|
struct virtio_pci_vq_info *info = vq->priv;
|
|
|
|
struct cpumask *mask;
|
|
|
|
unsigned int irq;
|
|
|
|
|
|
|
|
if (!vq->callback)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vp_dev->msix_enabled) {
|
|
|
|
mask = vp_dev->msix_affinity_masks[info->msix_vector];
|
|
|
|
irq = vp_dev->msix_entries[info->msix_vector].vector;
|
|
|
|
if (cpu == -1)
|
|
|
|
irq_set_affinity_hint(irq, NULL);
|
|
|
|
else {
|
|
|
|
cpumask_set_cpu(cpu, mask);
|
|
|
|
irq_set_affinity_hint(irq, mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-02-10 05:27:38 +00:00
|
|
|
static const struct virtio_config_ops virtio_pci_config_ops = {
|
2007-11-13 03:30:26 +00:00
|
|
|
.get = vp_get,
|
|
|
|
.set = vp_set,
|
|
|
|
.get_status = vp_get_status,
|
|
|
|
.set_status = vp_set_status,
|
|
|
|
.reset = vp_reset,
|
2009-06-13 04:16:36 +00:00
|
|
|
.find_vqs = vp_find_vqs,
|
|
|
|
.del_vqs = vp_del_vqs,
|
2008-05-03 02:50:50 +00:00
|
|
|
.get_features = vp_get_features,
|
2008-07-25 17:06:07 +00:00
|
|
|
.finalize_features = vp_finalize_features,
|
2011-11-14 14:17:08 +00:00
|
|
|
.bus_name = vp_bus_name,
|
2012-08-28 11:54:14 +00:00
|
|
|
.set_vq_affinity = vp_set_vq_affinity,
|
2007-11-13 03:30:26 +00:00
|
|
|
};
|
|
|
|
|
2008-12-10 17:45:34 +00:00
|
|
|
static void virtio_pci_release_dev(struct device *_d)
|
|
|
|
{
|
2011-11-07 16:37:05 +00:00
|
|
|
/*
|
|
|
|
* No need for a release method as we allocate/free
|
|
|
|
* all devices together with the pci devices.
|
|
|
|
* Provide an empty one to avoid getting a warning from core.
|
|
|
|
*/
|
2008-12-10 17:45:34 +00:00
|
|
|
}
|
|
|
|
|
2007-11-13 03:30:26 +00:00
|
|
|
/* the PCI probing function */
|
2012-12-21 21:05:30 +00:00
|
|
|
static int virtio_pci_probe(struct pci_dev *pci_dev,
|
|
|
|
const struct pci_device_id *id)
|
2007-11-13 03:30:26 +00:00
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
|
|
|
|
if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2008-01-28 15:59:59 +00:00
|
|
|
if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
|
|
|
|
printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
|
|
|
|
VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2007-11-13 03:30:26 +00:00
|
|
|
/* allocate our structure and fill it out */
|
|
|
|
vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
|
|
|
|
if (vp_dev == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-01-07 08:55:06 +00:00
|
|
|
vp_dev->vdev.dev.parent = &pci_dev->dev;
|
2008-12-10 17:45:34 +00:00
|
|
|
vp_dev->vdev.dev.release = virtio_pci_release_dev;
|
2007-11-13 03:30:26 +00:00
|
|
|
vp_dev->vdev.config = &virtio_pci_config_ops;
|
|
|
|
vp_dev->pci_dev = pci_dev;
|
|
|
|
INIT_LIST_HEAD(&vp_dev->virtqueues);
|
|
|
|
spin_lock_init(&vp_dev->lock);
|
|
|
|
|
2010-06-24 04:49:06 +00:00
|
|
|
/* Disable MSI/MSIX to bring device to a known good state. */
|
|
|
|
pci_msi_off(pci_dev);
|
|
|
|
|
2007-11-13 03:30:26 +00:00
|
|
|
/* enable the device */
|
|
|
|
err = pci_enable_device(pci_dev);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = pci_request_regions(pci_dev, "virtio-pci");
|
|
|
|
if (err)
|
|
|
|
goto out_enable_device;
|
|
|
|
|
|
|
|
vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
|
2012-09-17 17:31:17 +00:00
|
|
|
if (vp_dev->ioaddr == NULL) {
|
|
|
|
err = -ENOMEM;
|
2007-11-13 03:30:26 +00:00
|
|
|
goto out_req_regions;
|
2012-09-17 17:31:17 +00:00
|
|
|
}
|
2007-11-13 03:30:26 +00:00
|
|
|
|
2014-12-02 12:35:27 +00:00
|
|
|
vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
|
|
|
|
|
2007-11-13 03:30:26 +00:00
|
|
|
pci_set_drvdata(pci_dev, vp_dev);
|
2009-11-29 15:52:00 +00:00
|
|
|
pci_set_master(pci_dev);
|
2007-11-13 03:30:26 +00:00
|
|
|
|
|
|
|
/* we use the subsystem vendor/device id as the virtio vendor/device
|
|
|
|
* id. this allows us to use the same PCI vendor/device id for all
|
|
|
|
* virtio devices and to identify the particular virtio driver by
|
2010-03-16 10:47:56 +00:00
|
|
|
* the subsystem ids */
|
2007-11-13 03:30:26 +00:00
|
|
|
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
|
|
|
|
vp_dev->vdev.id.device = pci_dev->subsystem_device;
|
|
|
|
|
|
|
|
/* finally register the virtio device */
|
|
|
|
err = register_virtio_device(&vp_dev->vdev);
|
|
|
|
if (err)
|
2009-05-14 10:55:41 +00:00
|
|
|
goto out_set_drvdata;
|
2007-11-13 03:30:26 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_set_drvdata:
|
|
|
|
pci_iounmap(pci_dev, vp_dev->ioaddr);
|
|
|
|
out_req_regions:
|
|
|
|
pci_release_regions(pci_dev);
|
|
|
|
out_enable_device:
|
|
|
|
pci_disable_device(pci_dev);
|
|
|
|
out:
|
|
|
|
kfree(vp_dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-12-21 21:05:30 +00:00
|
|
|
static void virtio_pci_remove(struct pci_dev *pci_dev)
|
2007-11-13 03:30:26 +00:00
|
|
|
{
|
|
|
|
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
|
|
|
|
2008-03-20 01:35:04 +00:00
|
|
|
unregister_virtio_device(&vp_dev->vdev);
|
virtio_pci: Prevent double-free of pci regions after device hot-unplug
In the case where a virtio-console port is in use (opened by a program)
and a virtio-console device is removed, the port is kept around but all
the virtio-related state is assumed to be gone.
When the port is finally released (close() called), we call
device_destroy() on the port's device. This results in the parent
device's structures to be freed as well. This includes the PCI regions
for the virtio-console PCI device.
Once this is done, however, virtio_pci_release_dev() kicks in, as the
last ref to the virtio device is now gone, and attempts to do
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
which results in a double-free warning.
Move the code that releases regions, etc., to the virtio_pci_remove()
function, and all that's now left in release_dev is the final freeing of
the vp_dev.
Signed-off-by: Amit Shah <amit.shah@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2011-03-14 12:15:02 +00:00
|
|
|
|
|
|
|
vp_del_vqs(&vp_dev->vdev);
|
|
|
|
pci_iounmap(pci_dev, vp_dev->ioaddr);
|
|
|
|
pci_release_regions(pci_dev);
|
|
|
|
pci_disable_device(pci_dev);
|
2011-11-07 16:37:05 +00:00
|
|
|
kfree(vp_dev);
|
2007-11-13 03:30:26 +00:00
|
|
|
}
|
|
|
|
|
2013-09-09 00:27:12 +00:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
2011-12-22 11:28:26 +00:00
|
|
|
static int virtio_pci_freeze(struct device *dev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
|
|
|
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
|
|
|
int ret;
|
|
|
|
|
2014-10-14 00:10:35 +00:00
|
|
|
ret = virtio_device_freeze(&vp_dev->vdev);
|
2011-12-22 11:28:26 +00:00
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
pci_disable_device(pci_dev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-03-29 07:24:43 +00:00
|
|
|
static int virtio_pci_restore(struct device *dev)
|
2011-12-22 11:28:26 +00:00
|
|
|
{
|
|
|
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
|
|
|
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = pci_enable_device(pci_dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2012-03-29 07:24:43 +00:00
|
|
|
|
2011-12-22 11:28:26 +00:00
|
|
|
pci_set_master(pci_dev);
|
2014-10-14 00:10:35 +00:00
|
|
|
return virtio_device_restore(&vp_dev->vdev);
|
2011-12-22 11:28:26 +00:00
|
|
|
}
|
|
|
|
|
2011-12-22 11:28:25 +00:00
|
|
|
static const struct dev_pm_ops virtio_pci_pm_ops = {
|
2012-03-29 07:28:05 +00:00
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
|
2011-12-22 11:28:25 +00:00
|
|
|
};
|
2007-11-13 03:30:26 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
static struct pci_driver virtio_pci_driver = {
|
|
|
|
.name = "virtio-pci",
|
|
|
|
.id_table = virtio_pci_id_table,
|
|
|
|
.probe = virtio_pci_probe,
|
2012-12-21 21:05:30 +00:00
|
|
|
.remove = virtio_pci_remove,
|
2013-09-09 00:27:12 +00:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
2011-12-22 11:28:25 +00:00
|
|
|
.driver.pm = &virtio_pci_pm_ops,
|
2007-11-13 03:30:26 +00:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2012-10-16 13:26:13 +00:00
|
|
|
module_pci_driver(virtio_pci_driver);
|