virtio: fixes

This fixes 3 issues:
 - prevent admin commands on one VF blocking another:
   prevents a bad VF from blocking a good one, as well
   as fixing a scalability issue with large # of VFs
 - correctly return error on command failure on octeon:
   currently if any commands fail we treat it as success
 - fix modpost warning when building virtio_dma_buf:
   harmless, but the fix is trivial
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFCBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmajTPEPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRposcH90Gjchks1cqznVEGiH/FRzclYeK2VZKZDR/A
 EVfoe+vnSgCO1vAx0fla6Db2IR/FM81jBFolfwpCKhA4W/vbczeUTop2/kAM+JQD
 uP+yjQ0xpEIqVBp+tpUVuekiGGxfJd1Ky3jCp6kYf7TPbQ50+BczCUd3YBnZ7YtY
 WHNod1wpxTDj66bvHPBxb3dnAcHsVUHWmB8kpgFQMsd6CNVk9mvA9WIj9UgIfbj1
 /PNyETZRyqEjwl8/A11LdTqcUeZKm5PJjB7FfWc/ZfJOqtRpP/9848ESVvlgPjMd
 I4YSN5E5rLjv1R1ErTKfgiq5R82JJv6WhzqxdbhhlKb+TJaTDQ==
 =wH2A
 -----END PGP SIGNATURE-----

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
 "The biggest thing here is the adminq change - but it looks like the
  only way to avoid headq blocking causing indefinite stalls.

  This fixes three issues:

   - Prevent admin commands on one VF blocking another.

     This prevents a bad VF from blocking a good one, as well as fixing
     a scalability issue with large # of VFs

   - Correctly return error on command failure on octeon. We used to
     treat failed commands as a success.

   - Fix modpost warning when building virtio_dma_buf. Harmless, but the
     fix is trivial"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio_pci_modern: remove admin queue serialization lock
  virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
  virtio_pci_modern: pass cmd as an identification token
  virtio_pci_modern: create admin queue of queried size
  virtio: create admin queues alongside other virtqueues
  virtio_pci: pass vq info as an argument to vp_setup_vq()
  virtio: push out code to vp_avq_index()
  virtio_pci_modern: treat vp_dev->admin_vq.info.vq pointer as static
  virtio_pci: introduce vector allocation fallback for slow path virtqueues
  virtio_pci: pass vector policy enum to vp_find_one_vq_msix()
  virtio_pci: pass vector policy enum to vp_find_vqs_msix()
  virtio_pci: simplify vp_request_msix_vectors() call a bit
  virtio_pci: push out single vq find code to vp_find_one_vq_msix()
  vdpa/octeon_ep: Fix error code in octep_process_mbox()
  virtio: add missing MODULE_DESCRIPTION() macro
This commit is contained in:
Linus Torvalds 2024-07-29 12:53:37 -07:00
commit 10826505f5
7 changed files with 242 additions and 164 deletions

View File

@ -140,7 +140,7 @@ static int octep_process_mbox(struct octep_hw *oct_hw, u16 id, u16 qid, void *bu
val = octep_read_sig(mbox);
if ((val & 0xFFFF) != MBOX_RSP_SIG) {
dev_warn(&pdev->dev, "Invalid Signature from mbox : %d response\n", id);
return ret;
return -EINVAL;
}
val = octep_read_sts(mbox);

View File

@ -305,15 +305,9 @@ static int virtio_dev_probe(struct device *_d)
if (err)
goto err;
if (dev->config->create_avq) {
err = dev->config->create_avq(dev);
if (err)
goto err;
}
err = drv->probe(dev);
if (err)
goto err_probe;
goto err;
/* If probe didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
@ -326,9 +320,6 @@ static int virtio_dev_probe(struct device *_d)
return 0;
err_probe:
if (dev->config->destroy_avq)
dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@ -344,9 +335,6 @@ static void virtio_dev_remove(struct device *_d)
drv->remove(dev);
if (dev->config->destroy_avq)
dev->config->destroy_avq(dev);
/* Driver should have reset device. */
WARN_ON_ONCE(dev->config->get_status(dev));
@ -524,9 +512,6 @@ int virtio_device_freeze(struct virtio_device *dev)
}
}
if (dev->config->destroy_avq)
dev->config->destroy_avq(dev);
return 0;
}
EXPORT_SYMBOL_GPL(virtio_device_freeze);
@ -562,16 +547,10 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
if (dev->config->create_avq) {
ret = dev->config->create_avq(dev);
if (ret)
goto err;
}
if (drv->restore) {
ret = drv->restore(dev);
if (ret)
goto err_restore;
goto err;
}
/* If restore didn't do it, mark device DRIVER_OK ourselves. */
@ -582,9 +561,6 @@ int virtio_device_restore(struct virtio_device *dev)
return 0;
err_restore:
if (dev->config->destroy_avq)
dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;

View File

@ -46,12 +46,26 @@ bool vp_notify(struct virtqueue *vq)
return true;
}
/* Notify all slow path virtqueues on an interrupt. */
static void vp_vring_slow_path_interrupt(int irq,
struct virtio_pci_device *vp_dev)
{
struct virtio_pci_vq_info *info;
unsigned long flags;
spin_lock_irqsave(&vp_dev->lock, flags);
list_for_each_entry(info, &vp_dev->slow_virtqueues, node)
vring_interrupt(irq, info->vq);
spin_unlock_irqrestore(&vp_dev->lock, flags);
}
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
virtio_config_changed(&vp_dev->vdev);
vp_vring_slow_path_interrupt(irq, vp_dev);
return IRQ_HANDLED;
}
@ -125,6 +139,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL))
goto error;
if (!per_vq_vectors)
desc = NULL;
if (desc) {
flags |= PCI_IRQ_AFFINITY;
desc->pre_vectors++; /* virtio config vector */
@ -171,11 +188,17 @@ error:
return err;
}
static bool vp_is_slow_path_vector(u16 msix_vec)
{
return msix_vec == VP_MSIX_CONFIG_VECTOR;
}
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
u16 msix_vec)
u16 msix_vec,
struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
@ -194,13 +217,16 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
info->vq = vq;
if (callback) {
spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues);
if (!vp_is_slow_path_vector(msix_vec))
list_add(&info->node, &vp_dev->virtqueues);
else
list_add(&info->node, &vp_dev->slow_virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
} else {
INIT_LIST_HEAD(&info->node);
}
vp_dev->vqs[index] = info;
*p_info = info;
return vq;
out_info:
@ -236,13 +262,11 @@ void vp_del_vqs(struct virtio_device *vdev)
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index))
continue;
if (vp_dev->per_vq_vectors) {
int v = vp_dev->vqs[vq->index]->msix_vector;
if (v != VIRTIO_MSI_NO_VECTOR) {
if (v != VIRTIO_MSI_NO_VECTOR &&
!vp_is_slow_path_vector(v)) {
int irq = pci_irq_vector(vp_dev->pci_dev, v);
irq_update_affinity_hint(irq, NULL);
@ -284,21 +308,85 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->vqs = NULL;
}
enum vp_vq_vector_policy {
VP_VQ_VECTOR_POLICY_EACH,
VP_VQ_VECTOR_POLICY_SHARED_SLOW,
VP_VQ_VECTOR_POLICY_SHARED,
};
static struct virtqueue *
vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
vq_callback_t *callback, const char *name, bool ctx,
bool slow_path, int *allocated_vectors,
enum vp_vq_vector_policy vector_policy,
struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
u16 msix_vec;
int err;
if (!callback)
msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH ||
(vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW &&
!slow_path))
msix_vec = (*allocated_vectors)++;
else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH &&
slow_path)
msix_vec = VP_MSIX_CONFIG_VECTOR;
else
msix_vec = VP_MSIX_VQ_VECTOR;
vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec,
p_info);
if (IS_ERR(vq))
return vq;
if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
msix_vec == VIRTIO_MSI_NO_VECTOR ||
vp_is_slow_path_vector(msix_vec))
return vq;
/* allocate per-vq irq if available and necessary */
snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names),
"%s-%s", dev_name(&vp_dev->vdev.dev), name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, 0,
vp_dev->msix_names[msix_vec], vq);
if (err) {
vp_del_vq(vq);
return ERR_PTR(err);
}
return vq;
}
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
struct virtqueue_info vqs_info[],
bool per_vq_vectors,
enum vp_vq_vector_policy vector_policy,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
struct virtqueue_info *vqi;
u16 msix_vec;
int i, err, nvectors, allocated_vectors, queue_idx = 0;
struct virtqueue *vq;
bool per_vq_vectors;
u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
if (vp_dev->avq_index) {
err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
if (err)
goto error_find;
}
per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED;
if (per_vq_vectors) {
/* Best option: one for change interrupt, one per vq. */
nvectors = 1;
@ -307,13 +395,14 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
if (vqi->name && vqi->callback)
++nvectors;
}
if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
++nvectors;
} else {
/* Second best: one for change, shared for all vqs. */
nvectors = 2;
}
err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
per_vq_vectors ? desc : NULL);
err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, desc);
if (err)
goto error_find;
@ -325,37 +414,27 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
vqs[i] = NULL;
continue;
}
if (!vqi->callback)
msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (vp_dev->per_vq_vectors)
msix_vec = allocated_vectors++;
else
msix_vec = VP_MSIX_VQ_VECTOR;
vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
vqi->name, vqi->ctx, msix_vec);
vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
vqi->name, vqi->ctx, false,
&allocated_vectors, vector_policy,
&vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
}
if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
continue;
/* allocate per-vq irq if available and necessary */
snprintf(vp_dev->msix_names[msix_vec],
sizeof *vp_dev->msix_names,
"%s-%s",
dev_name(&vp_dev->vdev.dev), vqi->name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, 0,
vp_dev->msix_names[msix_vec],
vqs[i]);
if (err) {
vp_del_vq(vqs[i]);
goto error_find;
}
}
if (!avq_num)
return 0;
sprintf(avq->name, "avq.%u", avq->vq_index);
vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
avq->name, false, true, &allocated_vectors,
vector_policy, &vp_dev->admin_vq.info);
if (IS_ERR(vq)) {
err = PTR_ERR(vq);
goto error_find;
}
return 0;
error_find:
@ -368,12 +447,21 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue_info vqs_info[])
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
int i, err, queue_idx = 0;
struct virtqueue *vq;
u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
if (vp_dev->avq_index) {
err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
if (err)
goto out_del_vqs;
}
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev);
if (err)
@ -390,13 +478,24 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
}
vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
vqi->name, vqi->ctx,
VIRTIO_MSI_NO_VECTOR);
VIRTIO_MSI_NO_VECTOR, &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto out_del_vqs;
}
}
if (!avq_num)
return 0;
sprintf(avq->name, "avq.%u", avq->vq_index);
vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
false, VIRTIO_MSI_NO_VECTOR,
&vp_dev->admin_vq.info);
if (IS_ERR(vq)) {
err = PTR_ERR(vq);
goto out_del_vqs;
}
return 0;
out_del_vqs:
vp_del_vqs(vdev);
@ -411,11 +510,20 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
int err;
/* Try MSI-X with one vector per queue. */
err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, true, desc);
err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
VP_VQ_VECTOR_POLICY_EACH, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one shared vector for config and
* slow path queues, one vector per queue for the rest.
*/
err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
VP_VQ_VECTOR_POLICY_SHARED_SLOW, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, false, desc);
err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
VP_VQ_VECTOR_POLICY_SHARED, desc);
if (!err)
return 0;
/* Is there an interrupt? If not give up. */
@ -466,7 +574,8 @@ const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
if (!vp_dev->per_vq_vectors ||
vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR ||
vp_is_slow_path_vector(vp_dev->vqs[index]->msix_vector))
return NULL;
return pci_irq_get_affinity(vp_dev->pci_dev,
@ -574,6 +683,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->pci_dev = pci_dev;
INIT_LIST_HEAD(&vp_dev->virtqueues);
INIT_LIST_HEAD(&vp_dev->slow_virtqueues);
spin_lock_init(&vp_dev->lock);
/* enable the device */

View File

@ -35,7 +35,7 @@ struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
/* the list node for the virtqueues list */
/* the list node for the virtqueues or slow_virtqueues list */
struct list_head node;
/* MSI-X vector (or none) */
@ -44,9 +44,9 @@ struct virtio_pci_vq_info {
struct virtio_pci_admin_vq {
/* Virtqueue info associated with this admin queue. */
struct virtio_pci_vq_info info;
/* serializing admin commands execution and virtqueue deletion */
struct mutex cmd_lock;
struct virtio_pci_vq_info *info;
/* Protects virtqueue access. */
spinlock_t lock;
u64 supported_cmds;
/* Name of the admin queue: avq.$vq_index. */
char name[10];
@ -66,9 +66,12 @@ struct virtio_pci_device {
/* Where to read and clear interrupt */
u8 __iomem *isr;
/* a list of queues so we can dispatch IRQs */
/* Lists of queues and potentially slow path queues
* so we can dispatch IRQs.
*/
spinlock_t lock;
struct list_head virtqueues;
struct list_head slow_virtqueues;
/* Array of all virtqueues reported in the
* PCI common config num_queues field
@ -102,7 +105,7 @@ struct virtio_pci_device {
void (*del_vq)(struct virtio_pci_vq_info *info);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
bool (*is_avq)(struct virtio_device *vdev, unsigned int index);
int (*avq_index)(struct virtio_device *vdev, u16 *index, u16 *num);
};
/* Constants for MSI-X */
@ -175,6 +178,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
#define VIRTIO_ADMIN_CMD_BITMAP 0
#endif
void vp_modern_avq_done(struct virtqueue *vq);
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
struct virtio_admin_cmd *cmd);

View File

@ -28,6 +28,21 @@ static u64 vp_get_features(struct virtio_device *vdev)
return vp_modern_get_features(&vp_dev->mdev);
}
static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
*num = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return 0;
*num = vp_modern_avq_num(&vp_dev->mdev);
if (!(*num))
return -EINVAL;
*index = vp_modern_avq_index(&vp_dev->mdev);
return 0;
}
static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@ -38,17 +53,35 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
return index == vp_dev->admin_vq.vq_index;
}
void vp_modern_avq_done(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
struct virtio_admin_cmd *cmd;
unsigned long flags;
unsigned int len;
spin_lock_irqsave(&admin_vq->lock, flags);
do {
virtqueue_disable_cb(vq);
while ((cmd = virtqueue_get_buf(vq, &len)))
complete(&cmd->completion);
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&admin_vq->lock, flags);
}
static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
u16 opcode,
struct scatterlist **sgs,
unsigned int out_num,
unsigned int in_num,
void *data)
struct virtio_admin_cmd *cmd)
{
struct virtqueue *vq;
int ret, len;
unsigned long flags;
int ret;
vq = admin_vq->info.vq;
vq = admin_vq->info->vq;
if (!vq)
return -EIO;
@ -57,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
!((1ULL << opcode) & admin_vq->supported_cmds))
return -EOPNOTSUPP;
ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL);
if (ret < 0)
return -EIO;
if (unlikely(!virtqueue_kick(vq)))
return -EIO;
while (!virtqueue_get_buf(vq, &len) &&
!virtqueue_is_broken(vq))
cpu_relax();
init_completion(&cmd->completion);
again:
if (virtqueue_is_broken(vq))
return -EIO;
return 0;
spin_lock_irqsave(&admin_vq->lock, flags);
ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
if (ret < 0) {
if (ret == -ENOSPC) {
spin_unlock_irqrestore(&admin_vq->lock, flags);
cpu_relax();
goto again;
}
goto unlock_err;
}
if (!virtqueue_kick(vq))
goto unlock_err;
spin_unlock_irqrestore(&admin_vq->lock, flags);
wait_for_completion(&cmd->completion);
return cmd->ret;
unlock_err:
spin_unlock_irqrestore(&admin_vq->lock, flags);
return -EIO;
}
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
@ -122,12 +167,9 @@ int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
in_num++;
}
mutex_lock(&vp_dev->admin_vq.cmd_lock);
ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
le16_to_cpu(cmd->opcode),
sgs, out_num, in_num, sgs);
mutex_unlock(&vp_dev->admin_vq.cmd_lock);
sgs, out_num, in_num, cmd);
if (ret) {
dev_err(&vdev->dev,
"Failed to execute command on admin vq: %d\n.", ret);
@ -188,25 +230,29 @@ end:
static void vp_modern_avq_activate(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
__virtqueue_unbreak(admin_vq->info.vq);
virtio_pci_admin_cmd_list_init(vdev);
}
static void vp_modern_avq_deactivate(struct virtio_device *vdev)
static void vp_modern_avq_cleanup(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
struct virtio_admin_cmd *cmd;
struct virtqueue *vq;
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
__virtqueue_break(admin_vq->info.vq);
vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
if (!vq)
return;
while ((cmd = virtqueue_detach_unused_buf(vq))) {
cmd->ret = -EIO;
complete(&cmd->completion);
}
}
static void vp_transport_features(struct virtio_device *vdev, u64 features)
@ -403,7 +449,7 @@ static void vp_reset(struct virtio_device *vdev)
while (vp_modern_get_status(mdev))
msleep(1);
vp_modern_avq_deactivate(vdev);
vp_modern_avq_cleanup(vdev);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
@ -552,8 +598,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
return ERR_PTR(-EINVAL);
num = is_avq ?
VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
num = vp_modern_get_queue_size(mdev, index);
/* Check if queue is either not available or already active. */
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
@ -580,12 +625,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
goto err;
}
if (is_avq) {
mutex_lock(&vp_dev->admin_vq.cmd_lock);
vp_dev->admin_vq.info.vq = vq;
mutex_unlock(&vp_dev->admin_vq.cmd_lock);
}
return vq;
err:
@ -620,12 +659,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
if (vp_is_avq(&vp_dev->vdev, vq->index)) {
mutex_lock(&vp_dev->admin_vq.cmd_lock);
vp_dev->admin_vq.info.vq = NULL;
mutex_unlock(&vp_dev->admin_vq.cmd_lock);
}
if (vp_dev->msix_enabled)
vp_modern_queue_vector(mdev, vq->index,
VIRTIO_MSI_NO_VECTOR);
@ -735,45 +768,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
return true;
}
static int vp_modern_create_avq(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_admin_vq *avq;
struct virtqueue *vq;
u16 admin_q_num;
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return 0;
admin_q_num = vp_modern_avq_num(&vp_dev->mdev);
if (!admin_q_num)
return -EINVAL;
avq = &vp_dev->admin_vq;
avq->vq_index = vp_modern_avq_index(&vp_dev->mdev);
sprintf(avq->name, "avq.%u", avq->vq_index);
vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vq)) {
dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld",
PTR_ERR(vq));
return PTR_ERR(vq);
}
vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true);
return 0;
}
static void vp_modern_destroy_avq(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
vp_dev->del_vq(&vp_dev->admin_vq.info);
}
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL,
.set = NULL,
@ -792,8 +786,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
.create_avq = vp_modern_create_avq,
.destroy_avq = vp_modern_destroy_avq,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@ -814,8 +806,6 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
.create_avq = vp_modern_create_avq,
.destroy_avq = vp_modern_destroy_avq,
};
/* the PCI probing function */
@ -839,11 +829,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
vp_dev->is_avq = vp_is_avq;
vp_dev->avq_index = vp_avq_index;
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
mutex_init(&vp_dev->admin_vq.cmd_lock);
spin_lock_init(&vp_dev->admin_vq.lock);
return 0;
}
@ -851,6 +841,5 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
mutex_destroy(&vp_dev->admin_vq.cmd_lock);
vp_modern_remove(mdev);
}

View File

@ -10,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@ -109,6 +110,8 @@ struct virtio_admin_cmd {
__le64 group_member_id;
struct scatterlist *data_sg;
struct scatterlist *result_sg;
struct completion completion;
int ret;
};
/**

View File

@ -104,8 +104,6 @@ struct virtqueue_info {
* Returns 0 on success or error status
* If disable_vq_and_reset is set, then enable_vq_after_reset must also be
* set.
* @create_avq: create admin virtqueue resource.
* @destroy_avq: destroy admin virtqueue resource.
*/
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
@ -133,8 +131,6 @@ struct virtio_config_ops {
struct virtio_shm_region *region, u8 id);
int (*disable_vq_and_reset)(struct virtqueue *vq);
int (*enable_vq_after_reset)(struct virtqueue *vq);
int (*create_avq)(struct virtio_device *vdev);
void (*destroy_avq)(struct virtio_device *vdev);
};
/* If driver didn't advertise the feature, it will never appear. */