virtio: features, fixes, cleanups

Several new features here:
 
 - virtio-net is finally supported in vduse.
 
 - Virtio (balloon and mem) interaction with suspend is improved
 
 - vhost-scsi now handles signals better/faster.
 
 Fixes, cleanups all over the place.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmZN570PHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRp2JUH/1K3fZOHymop6Y5Z3USFS7YdlF+dniedY/vg
 TKyWERkXOlxq1d9DVxC0mN7tk72DweuWI0YJjLXofrEW1VuW29ecSbyFXxpeWJls
 b7ErffxDAFRas5jkMCngD8TuFnbEegU0mGP5kbiHpEndBydQ2hH99Gg0x7swW+cE
 xsvU5zonCCLwLGIP2DrVrn9qGOHtV6o8eZfVKDVXfvicn3lFBkUSxlwEYsO9RMup
 aKxV4FT2Pb1yBicwBK4TH1oeEXqEGy1YLEn+kAHRbgoC/5L0/LaiqrkzwzwwOIPj
 uPGkacf8CIbX0qZo5EzD8kvfcYL1xhU3eT9WBmpp2ZwD+4bINd4=
 =nax1
 -----END PGP SIGNATURE-----

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:
 "Several new features here:

   - virtio-net is finally supported in vduse

   - virtio (balloon and mem) interaction with suspend is improved

   - vhost-scsi now handles signals better/faster

  And fixes, cleanups all over the place"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (48 commits)
  virtio-pci: Check if is_avq is NULL
  virtio: delete vq in vp_find_vqs_msix() when request_irq() fails
  MAINTAINERS: add Eugenio Pérez as reviewer
  vhost-vdpa: Remove usage of the deprecated ida_simple_xx() API
  vp_vdpa: don't allocate unused msix vectors
  sound: virtio: drop owner assignment
  fuse: virtio: drop owner assignment
  scsi: virtio: drop owner assignment
  rpmsg: virtio: drop owner assignment
  nvdimm: virtio_pmem: drop owner assignment
  wifi: mac80211_hwsim: drop owner assignment
  vsock/virtio: drop owner assignment
  net: 9p: virtio: drop owner assignment
  net: virtio: drop owner assignment
  net: caif: virtio: drop owner assignment
  misc: nsm: drop owner assignment
  iommu: virtio: drop owner assignment
  drm/virtio: drop owner assignment
  gpio: virtio: drop owner assignment
  firmware: arm_scmi: virtio: drop owner assignment
  ...
This commit is contained in:
Linus Torvalds 2024-05-23 12:04:36 -07:00
commit 2ef32ad224
40 changed files with 355 additions and 177 deletions

View File

@ -10654,8 +10654,10 @@ F: include/net/nl802154.h
F: net/ieee802154/
F: net/mac802154/
IFCVF VIRTIO DATA PATH ACCELERATOR
R: Zhu Lingshan <lingshan.zhu@intel.com>
Intel VIRTIO DATA PATH ACCELERATOR
M: Zhu Lingshan <lingshan.zhu@intel.com>
L: virtualization@lists.linux.dev
S: Supported
F: drivers/vdpa/ifcvf/
IFE PROTOCOL
@ -23746,6 +23748,7 @@ M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Paolo Bonzini <pbonzini@redhat.com>
R: Stefan Hajnoczi <stefanha@redhat.com>
R: Eugenio Pérez <eperezma@redhat.com>
L: virtualization@lists.linux.dev
S: Maintained
F: drivers/block/virtio_blk.c
@ -23764,6 +23767,7 @@ VIRTIO CORE AND NET DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
R: Eugenio Pérez <eperezma@redhat.com>
L: virtualization@lists.linux.dev
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-vdpa
@ -23805,6 +23809,7 @@ VIRTIO FILE SYSTEM
M: Vivek Goyal <vgoyal@redhat.com>
M: Stefan Hajnoczi <stefanha@redhat.com>
M: Miklos Szeredi <miklos@szeredi.hu>
R: Eugenio Pérez <eperezma@redhat.com>
L: virtualization@lists.linux.dev
L: linux-fsdevel@vger.kernel.org
S: Supported
@ -23838,6 +23843,7 @@ F: include/uapi/linux/virtio_gpu.h
VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Eugenio Pérez <eperezma@redhat.com>
L: kvm@vger.kernel.org
L: virtualization@lists.linux.dev
L: netdev@vger.kernel.org

View File

@ -752,7 +752,6 @@ MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver um_pci_virtio_driver = {
.driver.name = "virtio-pci",
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = um_pci_virtio_probe,
.remove = um_pci_virtio_remove,

View File

@ -1658,7 +1658,6 @@ static struct virtio_driver virtio_blk = {
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtblk_probe,
.remove = virtblk_remove,

View File

@ -415,7 +415,6 @@ static const unsigned int virtbt_features[] = {
static struct virtio_driver virtbt_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.feature_table = virtbt_features,
.feature_table_size = ARRAY_SIZE(virtbt_features),
.id_table = virtbt_table,

View File

@ -245,7 +245,6 @@ static const struct virtio_device_id id_table[] = {
static struct virtio_driver virtio_rng_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtrng_probe,
.remove = virtrng_remove,

View File

@ -2173,7 +2173,6 @@ static struct virtio_driver virtio_console = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtcons_probe,
.remove = virtcons_remove,
@ -2188,7 +2187,6 @@ static struct virtio_driver virtio_rproc_serial = {
.feature_table = rproc_serial_features,
.feature_table_size = ARRAY_SIZE(rproc_serial_features),
.driver.name = "virtio_rproc_serial",
.driver.owner = THIS_MODULE,
.id_table = rproc_serial_id_table,
.probe = virtcons_probe,
.remove = virtcons_remove,

View File

@ -581,7 +581,6 @@ static const struct virtio_device_id id_table[] = {
static struct virtio_driver virtio_crypto_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,

View File

@ -908,7 +908,6 @@ static const struct virtio_device_id id_table[] = {
static struct virtio_driver virtio_scmi_driver = {
.driver.name = "scmi-virtio",
.driver.owner = THIS_MODULE,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,

View File

@ -653,7 +653,6 @@ static struct virtio_driver virtio_gpio_driver = {
.remove = virtio_gpio_remove,
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
module_virtio_driver(virtio_gpio_driver);

View File

@ -154,7 +154,6 @@ static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,

View File

@ -1251,7 +1251,6 @@ MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_iommu_drv = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),

View File

@ -494,7 +494,6 @@ static struct virtio_driver virtio_nsm_driver = {
.feature_table_legacy = 0,
.feature_table_size_legacy = 0,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = nsm_device_probe,
.remove = nsm_device_remove,

View File

@ -782,7 +782,6 @@ static struct virtio_driver caif_virtio_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = cfv_probe,
.remove = cfv_remove,

View File

@ -6039,7 +6039,6 @@ static struct virtio_driver virtio_net_driver = {
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.validate = virtnet_validate,
.probe = virtnet_probe,

View File

@ -6678,7 +6678,6 @@ MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_hwsim = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = hwsim_virtio_probe,
.remove = hwsim_virtio_remove,

View File

@ -151,7 +151,6 @@ static struct virtio_driver virtio_pmem_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.validate = virtio_pmem_validate,
.probe = virtio_pmem_probe,

View File

@ -1053,7 +1053,6 @@ static struct virtio_driver virtio_ipc_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = rpmsg_probe,
.remove = rpmsg_remove,

View File

@ -1052,7 +1052,6 @@ static struct virtio_driver virtio_scsi_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtscsi_probe,
#ifdef CONFIG_PM_SLEEP

View File

@ -98,7 +98,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
len = sysfs_emit(buf, "%s\n", vdev->driver_override);
device_unlock(dev);
return len;

View File

@ -8,6 +8,7 @@
*
*/
#include "linux/virtio_net.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cdev.h>
@ -28,6 +29,7 @@
#include <uapi/linux/virtio_config.h>
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_blk.h>
#include <uapi/linux/virtio_ring.h>
#include <linux/mod_devicetable.h>
#include "iova_domain.h"
@ -141,6 +143,7 @@ static struct workqueue_struct *vduse_irq_bound_wq;
static u32 allowed_device_id[] = {
VIRTIO_ID_BLOCK,
VIRTIO_ID_NET,
};
static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa)
@ -1705,13 +1708,21 @@ static bool device_is_allowed(u32 device_id)
return false;
}
static bool features_is_valid(u64 features)
static bool features_is_valid(struct vduse_dev_config *config)
{
if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
if (!(config->features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
return false;
/* Now we only support read-only configuration space */
if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE))
if ((config->device_id == VIRTIO_ID_BLOCK) &&
(config->features & BIT_ULL(VIRTIO_BLK_F_CONFIG_WCE)))
return false;
else if ((config->device_id == VIRTIO_ID_NET) &&
(config->features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
return false;
if ((config->device_id == VIRTIO_ID_NET) &&
!(config->features & BIT_ULL(VIRTIO_F_VERSION_1)))
return false;
return true;
@ -1738,7 +1749,7 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
if (!device_is_allowed(config->device_id))
return false;
if (!features_is_valid(config->features))
if (!features_is_valid(config))
return false;
return true;
@ -1821,6 +1832,10 @@ static int vduse_create_dev(struct vduse_dev_config *config,
int ret;
struct vduse_dev *dev;
ret = -EPERM;
if ((config->device_id == VIRTIO_ID_NET) && !capable(CAP_NET_ADMIN))
goto err;
ret = -EEXIST;
if (vduse_find_dev(config->name))
goto err;
@ -2064,6 +2079,7 @@ static const struct vdpa_mgmtdev_ops vdpa_dev_mgmtdev_ops = {
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
};

View File

@ -160,7 +160,13 @@ static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
struct pci_dev *pdev = mdev->pci_dev;
int i, ret, irq;
int queues = vp_vdpa->queues;
int vectors = queues + 1;
int vectors = 1;
int msix_vec = 0;
for (i = 0; i < queues; i++) {
if (vp_vdpa->vring[i].cb.callback)
vectors++;
}
ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
if (ret != vectors) {
@ -173,9 +179,12 @@ static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
vp_vdpa->vectors = vectors;
for (i = 0; i < queues; i++) {
if (!vp_vdpa->vring[i].cb.callback)
continue;
snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
"vp-vdpa[%s]-%d\n", pci_name(pdev), i);
irq = pci_irq_vector(pdev, i);
irq = pci_irq_vector(pdev, msix_vec);
ret = devm_request_irq(&pdev->dev, irq,
vp_vdpa_vq_handler,
0, vp_vdpa->vring[i].msix_name,
@ -185,21 +194,22 @@ static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
"vp_vdpa: fail to request irq for vq %d\n", i);
goto err;
}
vp_modern_queue_vector(mdev, i, i);
vp_modern_queue_vector(mdev, i, msix_vec);
vp_vdpa->vring[i].irq = irq;
msix_vec++;
}
snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
pci_name(pdev));
irq = pci_irq_vector(pdev, queues);
irq = pci_irq_vector(pdev, msix_vec);
ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
vp_vdpa->msix_name, vp_vdpa);
if (ret) {
dev_err(&pdev->dev,
"vp_vdpa: fail to request irq for vq %d\n", i);
"vp_vdpa: fail to request irq for config: %d\n", ret);
goto err;
}
vp_modern_config_vector(mdev, queues);
vp_modern_config_vector(mdev, msix_vec);
vp_vdpa->config_irq = irq;
return 0;
@ -216,7 +226,10 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
!(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
vp_vdpa_request_irq(vp_vdpa);
if (vp_vdpa_request_irq(vp_vdpa)) {
WARN_ON(1);
return;
}
}
vp_modern_set_status(mdev, status);

View File

@ -210,6 +210,7 @@ struct vhost_scsi {
struct vhost_scsi_tmf {
struct vhost_work vwork;
struct work_struct flush_work;
struct vhost_scsi *vhost;
struct vhost_scsi_virtqueue *svq;
@ -358,14 +359,23 @@ static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
vhost_scsi_put_inflight(inflight);
}
static void vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue *svq)
{
struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
llnode = llist_del_all(&svq->completion_list);
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list)
vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
}
static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
{
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
struct vhost_scsi_tmf *tmf = container_of(se_cmd,
struct vhost_scsi_tmf, se_cmd);
struct vhost_virtqueue *vq = &tmf->svq->vq;
vhost_vq_work_queue(vq, &tmf->vwork);
schedule_work(&tmf->flush_work);
} else {
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
@ -373,7 +383,8 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
struct vhost_scsi_virtqueue, vq);
llist_add(&cmd->tvc_completion_list, &svq->completion_list);
vhost_vq_work_queue(&svq->vq, &svq->completion_work);
if (!vhost_vq_work_queue(&svq->vq, &svq->completion_work))
vhost_scsi_drop_cmds(svq);
}
}
@ -497,10 +508,8 @@ again:
vq_err(vq, "Faulted on vhost_scsi_send_event\n");
}
static void vhost_scsi_evt_work(struct vhost_work *work)
static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct vhost_scsi_evt *evt, *t;
struct llist_node *llnode;
@ -508,12 +517,20 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
mutex_lock(&vq->mutex);
llnode = llist_del_all(&vs->vs_event_list);
llist_for_each_entry_safe(evt, t, llnode, list) {
vhost_scsi_do_evt_work(vs, evt);
if (!drop)
vhost_scsi_do_evt_work(vs, evt);
vhost_scsi_free_evt(vs, evt);
}
mutex_unlock(&vq->mutex);
}
static void vhost_scsi_evt_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_event_work);
vhost_scsi_complete_events(vs, false);
}
static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
{
struct iov_iter *iter = &cmd->saved_iter;
@ -1270,33 +1287,32 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
{
struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
vwork);
struct vhost_virtqueue *ctl_vq, *vq;
int resp_code, i;
if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) {
/*
* Flush IO vqs that don't share a worker with the ctl to make
* sure they have sent their responses before us.
*/
ctl_vq = &tmf->vhost->vqs[VHOST_SCSI_VQ_CTL].vq;
for (i = VHOST_SCSI_VQ_IO; i < tmf->vhost->dev.nvqs; i++) {
vq = &tmf->vhost->vqs[i].vq;
if (vhost_vq_is_setup(vq) &&
vq->worker != ctl_vq->worker)
vhost_vq_flush(vq);
}
int resp_code;
if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
} else {
else
resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
}
vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
tmf->vq_desc, &tmf->resp_iov, resp_code);
vhost_scsi_release_tmf_res(tmf);
}
static void vhost_scsi_tmf_flush_work(struct work_struct *work)
{
struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
flush_work);
struct vhost_virtqueue *vq = &tmf->svq->vq;
/*
* Make sure we have sent responses for other commands before we
* send our response.
*/
vhost_dev_flush(vq->dev);
if (!vhost_vq_work_queue(vq, &tmf->vwork))
vhost_scsi_release_tmf_res(tmf);
}
static void
vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
struct vhost_virtqueue *vq,
@ -1320,6 +1336,7 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
if (!tmf)
goto send_reject;
INIT_WORK(&tmf->flush_work, vhost_scsi_tmf_flush_work);
vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
tmf->vhost = vs;
tmf->svq = svq;
@ -1509,7 +1526,8 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
}
llist_add(&evt->list, &vs->vs_event_list);
vhost_vq_work_queue(vq, &vs->vs_event_work);
if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
vhost_scsi_complete_events(vs, true);
}
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)

View File

@ -1548,7 +1548,7 @@ static void vhost_vdpa_release_dev(struct device *device)
struct vhost_vdpa *v =
container_of(device, struct vhost_vdpa, dev);
ida_simple_remove(&vhost_vdpa_ida, v->minor);
ida_free(&vhost_vdpa_ida, v->minor);
kfree(v->vqs);
kfree(v);
}
@ -1571,8 +1571,8 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
if (!v)
return -ENOMEM;
minor = ida_simple_get(&vhost_vdpa_ida, 0,
VHOST_VDPA_DEV_MAX, GFP_KERNEL);
minor = ida_alloc_max(&vhost_vdpa_ida, VHOST_VDPA_DEV_MAX - 1,
GFP_KERNEL);
if (minor < 0) {
kfree(v);
return minor;

View File

@ -263,34 +263,37 @@ bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
}
EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
void vhost_vq_flush(struct vhost_virtqueue *vq)
{
struct vhost_flush_struct flush;
init_completion(&flush.wait_event);
vhost_work_init(&flush.work, vhost_flush_work);
if (vhost_vq_work_queue(vq, &flush.work))
wait_for_completion(&flush.wait_event);
}
EXPORT_SYMBOL_GPL(vhost_vq_flush);
/**
* vhost_worker_flush - flush a worker
* __vhost_worker_flush - flush a worker
* @worker: worker to flush
*
* This does not use RCU to protect the worker, so the device or worker
* mutex must be held.
* The worker's flush_mutex must be held.
*/
static void vhost_worker_flush(struct vhost_worker *worker)
static void __vhost_worker_flush(struct vhost_worker *worker)
{
struct vhost_flush_struct flush;
if (!worker->attachment_cnt || worker->killed)
return;
init_completion(&flush.wait_event);
vhost_work_init(&flush.work, vhost_flush_work);
vhost_worker_queue(worker, &flush.work);
/*
* Drop mutex in case our worker is killed and it needs to take the
* mutex to force cleanup.
*/
mutex_unlock(&worker->mutex);
wait_for_completion(&flush.wait_event);
mutex_lock(&worker->mutex);
}
static void vhost_worker_flush(struct vhost_worker *worker)
{
mutex_lock(&worker->mutex);
__vhost_worker_flush(worker);
mutex_unlock(&worker->mutex);
}
void vhost_dev_flush(struct vhost_dev *dev)
@ -298,15 +301,8 @@ void vhost_dev_flush(struct vhost_dev *dev)
struct vhost_worker *worker;
unsigned long i;
xa_for_each(&dev->worker_xa, i, worker) {
mutex_lock(&worker->mutex);
if (!worker->attachment_cnt) {
mutex_unlock(&worker->mutex);
continue;
}
xa_for_each(&dev->worker_xa, i, worker)
vhost_worker_flush(worker);
mutex_unlock(&worker->mutex);
}
}
EXPORT_SYMBOL_GPL(vhost_dev_flush);
@ -392,7 +388,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
__vhost_vq_meta_reset(vq);
}
static bool vhost_worker(void *data)
static bool vhost_run_work_list(void *data)
{
struct vhost_worker *worker = data;
struct vhost_work *work, *work_next;
@ -417,6 +413,40 @@ static bool vhost_worker(void *data)
return !!node;
}
static void vhost_worker_killed(void *data)
{
struct vhost_worker *worker = data;
struct vhost_dev *dev = worker->dev;
struct vhost_virtqueue *vq;
int i, attach_cnt = 0;
mutex_lock(&worker->mutex);
worker->killed = true;
for (i = 0; i < dev->nvqs; i++) {
vq = dev->vqs[i];
mutex_lock(&vq->mutex);
if (worker ==
rcu_dereference_check(vq->worker,
lockdep_is_held(&vq->mutex))) {
rcu_assign_pointer(vq->worker, NULL);
attach_cnt++;
}
mutex_unlock(&vq->mutex);
}
worker->attachment_cnt -= attach_cnt;
if (attach_cnt)
synchronize_rcu();
/*
* Finish vhost_worker_flush calls and any other works that snuck in
* before the synchronize_rcu.
*/
vhost_run_work_list(worker);
mutex_unlock(&worker->mutex);
}
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->indirect);
@ -631,9 +661,11 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
if (!worker)
return NULL;
worker->dev = dev;
snprintf(name, sizeof(name), "vhost-%d", current->pid);
vtsk = vhost_task_create(vhost_worker, worker, name);
vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
worker, name);
if (!vtsk)
goto free_worker;
@ -664,22 +696,37 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
{
struct vhost_worker *old_worker;
old_worker = rcu_dereference_check(vq->worker,
lockdep_is_held(&vq->dev->mutex));
mutex_lock(&worker->mutex);
worker->attachment_cnt++;
mutex_unlock(&worker->mutex);
rcu_assign_pointer(vq->worker, worker);
if (!old_worker)
if (worker->killed) {
mutex_unlock(&worker->mutex);
return;
}
mutex_lock(&vq->mutex);
old_worker = rcu_dereference_check(vq->worker,
lockdep_is_held(&vq->mutex));
rcu_assign_pointer(vq->worker, worker);
worker->attachment_cnt++;
if (!old_worker) {
mutex_unlock(&vq->mutex);
mutex_unlock(&worker->mutex);
return;
}
mutex_unlock(&vq->mutex);
mutex_unlock(&worker->mutex);
/*
* Take the worker mutex to make sure we see the work queued from
* device wide flushes which doesn't use RCU for execution.
*/
mutex_lock(&old_worker->mutex);
old_worker->attachment_cnt--;
if (old_worker->killed) {
mutex_unlock(&old_worker->mutex);
return;
}
/*
* We don't want to call synchronize_rcu for every vq during setup
* because it will slow down VM startup. If we haven't done
@ -690,6 +737,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
mutex_lock(&vq->mutex);
if (!vhost_vq_get_backend(vq) && !vq->kick) {
mutex_unlock(&vq->mutex);
old_worker->attachment_cnt--;
mutex_unlock(&old_worker->mutex);
/*
* vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
@ -705,7 +754,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
/* Make sure new vq queue/flush/poll calls see the new worker */
synchronize_rcu();
/* Make sure whatever was queued gets run */
vhost_worker_flush(old_worker);
__vhost_worker_flush(old_worker);
old_worker->attachment_cnt--;
mutex_unlock(&old_worker->mutex);
}
@ -754,10 +804,16 @@ static int vhost_free_worker(struct vhost_dev *dev,
return -ENODEV;
mutex_lock(&worker->mutex);
if (worker->attachment_cnt) {
if (worker->attachment_cnt || worker->killed) {
mutex_unlock(&worker->mutex);
return -EBUSY;
}
/*
* A flush might have raced and snuck in before attachment_cnt was set
* to zero. Make sure flushes are flushed from the queue before
* freeing.
*/
__vhost_worker_flush(worker);
mutex_unlock(&worker->mutex);
vhost_worker_destroy(dev, worker);

View File

@ -28,12 +28,14 @@ struct vhost_work {
struct vhost_worker {
struct vhost_task *vtsk;
struct vhost_dev *dev;
/* Used to serialize device wide flushing with worker swapping. */
struct mutex mutex;
struct llist_head work_list;
u64 kcov_handle;
u32 id;
int attachment_cnt;
bool killed;
};
/* Poll a file (eventfd or socket) */
@ -205,7 +207,6 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
void vhost_vq_flush(struct vhost_virtqueue *vq);
bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
bool vhost_vq_has_work(struct vhost_virtqueue *vq);
bool vhost_vq_is_setup(struct vhost_virtqueue *vq);

View File

@ -121,11 +121,14 @@ struct virtio_balloon {
struct page_reporting_dev_info pr_dev_info;
/* State for keeping the wakeup_source active while adjusting the balloon */
spinlock_t adjustment_lock;
bool adjustment_signal_pending;
bool adjustment_in_progress;
spinlock_t wakeup_lock;
bool processing_wakeup_event;
u32 wakeup_signal_mask;
};
#define VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST (1 << 0)
#define VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS (1 << 1)
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
{ 0 },
@ -140,6 +143,36 @@ static u32 page_to_balloon_pfn(struct page *page)
return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
}
static void start_wakeup_event(struct virtio_balloon *vb, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&vb->wakeup_lock, flags);
vb->wakeup_signal_mask |= mask;
if (!vb->processing_wakeup_event) {
vb->processing_wakeup_event = true;
pm_stay_awake(&vb->vdev->dev);
}
spin_unlock_irqrestore(&vb->wakeup_lock, flags);
}
static void process_wakeup_event(struct virtio_balloon *vb, u32 mask)
{
spin_lock_irq(&vb->wakeup_lock);
vb->wakeup_signal_mask &= ~mask;
spin_unlock_irq(&vb->wakeup_lock);
}
static void finish_wakeup_event(struct virtio_balloon *vb)
{
spin_lock_irq(&vb->wakeup_lock);
if (!vb->wakeup_signal_mask && vb->processing_wakeup_event) {
vb->processing_wakeup_event = false;
pm_relax(&vb->vdev->dev);
}
spin_unlock_irq(&vb->wakeup_lock);
}
static void balloon_ack(struct virtqueue *vq)
{
struct virtio_balloon *vb = vq->vdev->priv;
@ -370,8 +403,10 @@ static void stats_request(struct virtqueue *vq)
struct virtio_balloon *vb = vq->vdev->priv;
spin_lock(&vb->stop_update_lock);
if (!vb->stop_update)
if (!vb->stop_update) {
start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
}
spin_unlock(&vb->stop_update_lock);
}
@ -444,29 +479,10 @@ static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
static void start_update_balloon_size(struct virtio_balloon *vb)
{
unsigned long flags;
spin_lock_irqsave(&vb->adjustment_lock, flags);
vb->adjustment_signal_pending = true;
if (!vb->adjustment_in_progress) {
vb->adjustment_in_progress = true;
pm_stay_awake(vb->vdev->dev.parent);
}
spin_unlock_irqrestore(&vb->adjustment_lock, flags);
start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
queue_work(system_freezable_wq, &vb->update_balloon_size_work);
}
static void end_update_balloon_size(struct virtio_balloon *vb)
{
spin_lock_irq(&vb->adjustment_lock);
if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) {
vb->adjustment_in_progress = false;
pm_relax(vb->vdev->dev.parent);
}
spin_unlock_irq(&vb->adjustment_lock);
}
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@ -495,7 +511,10 @@ static void update_balloon_stats_func(struct work_struct *work)
vb = container_of(work, struct virtio_balloon,
update_balloon_stats_work);
process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
stats_handle_request(vb);
finish_wakeup_event(vb);
}
static void update_balloon_size_func(struct work_struct *work)
@ -506,9 +525,7 @@ static void update_balloon_size_func(struct work_struct *work)
vb = container_of(work, struct virtio_balloon,
update_balloon_size_work);
spin_lock_irq(&vb->adjustment_lock);
vb->adjustment_signal_pending = false;
spin_unlock_irq(&vb->adjustment_lock);
process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
diff = towards_target(vb);
@ -523,7 +540,7 @@ static void update_balloon_size_func(struct work_struct *work)
if (diff)
queue_work(system_freezable_wq, work);
else
end_update_balloon_size(vb);
finish_wakeup_event(vb);
}
static int init_vqs(struct virtio_balloon *vb)
@ -1027,7 +1044,16 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_unregister_oom;
}
spin_lock_init(&vb->adjustment_lock);
spin_lock_init(&vb->wakeup_lock);
/*
* The virtio balloon itself can't wake up the device, but it is
* responsible for processing wakeup events passed up from the transport
* layer. Wakeup sources don't support nesting/chaining calls, so we use
* our own wakeup source to ensure wakeup events are properly handled
* without trampling on the transport layer's wakeup source.
*/
device_set_wakeup_capable(&vb->vdev->dev, true);
virtio_device_ready(vdev);
@ -1155,7 +1181,6 @@ static struct virtio_driver virtio_balloon_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.validate = virtballoon_validate,
.probe = virtballoon_probe,

View File

@ -394,7 +394,6 @@ static const struct virtio_device_id id_table[] = {
static struct virtio_driver virtio_input_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,

View File

@ -22,6 +22,7 @@
#include <linux/lockdep.h>
#include <linux/log2.h>
#include <linux/vmalloc.h>
#include <linux/suspend.h>
#include <acpi/acpi_numa.h>
@ -253,6 +254,9 @@ struct virtio_mem {
/* Memory notifier (online/offline events). */
struct notifier_block memory_notifier;
/* Notifier to block hibernation image storing/reloading. */
struct notifier_block pm_notifier;
#ifdef CONFIG_PROC_VMCORE
/* vmcore callback for /proc/vmcore handling in kdump mode */
struct vmcore_cb vmcore_cb;
@ -1112,6 +1116,25 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
return rc;
}
static int virtio_mem_pm_notifier_cb(struct notifier_block *nb,
unsigned long action, void *arg)
{
struct virtio_mem *vm = container_of(nb, struct virtio_mem,
pm_notifier);
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_RESTORE_PREPARE:
/*
* When restarting the VM, all memory is unplugged. Don't
* allow to hibernate and restore from an image.
*/
dev_err(&vm->vdev->dev, "hibernation is not supported.\n");
return NOTIFY_BAD;
default:
return NOTIFY_OK;
}
}
/*
* Set a range of pages PG_offline. Remember pages that were never onlined
* (via generic_online_page()) using PageDirty().
@ -2616,11 +2639,19 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
rc = register_memory_notifier(&vm->memory_notifier);
if (rc)
goto out_unreg_group;
rc = register_virtio_mem_device(vm);
/* Block hibernation as early as possible. */
vm->pm_notifier.priority = INT_MAX;
vm->pm_notifier.notifier_call = virtio_mem_pm_notifier_cb;
rc = register_pm_notifier(&vm->pm_notifier);
if (rc)
goto out_unreg_mem;
rc = register_virtio_mem_device(vm);
if (rc)
goto out_unreg_pm;
return 0;
out_unreg_pm:
unregister_pm_notifier(&vm->pm_notifier);
out_unreg_mem:
unregister_memory_notifier(&vm->memory_notifier);
out_unreg_group:
@ -2898,6 +2929,7 @@ static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
/* unregister callbacks */
unregister_virtio_mem_device(vm);
unregister_pm_notifier(&vm->pm_notifier);
unregister_memory_notifier(&vm->memory_notifier);
/*
@ -2961,17 +2993,40 @@ static void virtio_mem_config_changed(struct virtio_device *vdev)
#ifdef CONFIG_PM_SLEEP
static int virtio_mem_freeze(struct virtio_device *vdev)
{
struct virtio_mem *vm = vdev->priv;
/*
* When restarting the VM, all memory is usually unplugged. Don't
* allow to suspend/hibernate.
* We block hibernation using the PM notifier completely. The workqueue
* is already frozen by the PM core at this point, so we simply
* reset the device and cleanup the queues.
*/
dev_err(&vdev->dev, "save/restore not supported.\n");
return -EPERM;
if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE &&
vm->plugged_size &&
!virtio_has_feature(vm->vdev, VIRTIO_MEM_F_PERSISTENT_SUSPEND)) {
dev_err(&vm->vdev->dev,
"suspending with plugged memory is not supported\n");
return -EPERM;
}
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
vm->vq = NULL;
return 0;
}
static int virtio_mem_restore(struct virtio_device *vdev)
{
return -EPERM;
struct virtio_mem *vm = vdev->priv;
int ret;
ret = virtio_mem_init_vq(vm);
if (ret)
return ret;
virtio_device_ready(vdev);
/* Let's check if anything changed. */
virtio_mem_config_changed(vdev);
return 0;
}
#endif
@ -2980,6 +3035,7 @@ static unsigned int virtio_mem_features[] = {
VIRTIO_MEM_F_ACPI_PXM,
#endif
VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE,
VIRTIO_MEM_F_PERSISTENT_SUSPEND,
};
static const struct virtio_device_id virtio_mem_id_table[] = {
@ -2991,7 +3047,6 @@ static struct virtio_driver virtio_mem_driver = {
.feature_table = virtio_mem_features,
.feature_table_size = ARRAY_SIZE(virtio_mem_features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = virtio_mem_id_table,
.probe = virtio_mem_probe,
.remove = virtio_mem_remove,

View File

@ -696,12 +696,10 @@ free_vm_dev:
return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
static void virtio_mmio_remove(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
unregister_virtio_device(&vm_dev->vdev);
return 0;
}
@ -847,7 +845,7 @@ MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
static struct platform_driver virtio_mmio_driver = {
.probe = virtio_mmio_probe,
.remove = virtio_mmio_remove,
.remove_new = virtio_mmio_remove,
.driver = {
.name = "virtio-mmio",
.of_match_table = virtio_mmio_match,

View File

@ -236,7 +236,7 @@ void vp_del_vqs(struct virtio_device *vdev)
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
if (vp_dev->is_avq(vdev, vq->index))
if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index))
continue;
if (vp_dev->per_vq_vectors) {
@ -348,8 +348,10 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
vring_interrupt, 0,
vp_dev->msix_names[msix_vec],
vqs[i]);
if (err)
if (err) {
vp_del_vq(vqs[i]);
goto error_find;
}
}
return 0;

View File

@ -376,9 +376,7 @@ static int zap_process(struct task_struct *start, int exit_code)
if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
/* The vhost_worker does not particpate in coredumps */
if ((t->flags & (PF_USER_WORKER | PF_IO_WORKER)) != PF_USER_WORKER)
nr++;
nr++;
}
}

View File

@ -1078,7 +1078,6 @@ static const unsigned int feature_table[] = {};
static struct virtio_driver virtio_fs_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.feature_table = feature_table,
.feature_table_size = ARRAY_SIZE(feature_table),

View File

@ -4,7 +4,8 @@
struct vhost_task;
struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
struct vhost_task *vhost_task_create(bool (*fn)(void *),
void (*handle_kill)(void *), void *arg,
const char *name);
void vhost_task_start(struct vhost_task *vtsk);
void vhost_task_stop(struct vhost_task *vtsk);

View File

@ -90,6 +90,8 @@
#define VIRTIO_MEM_F_ACPI_PXM 0
/* unplugged memory must not be accessed */
#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
/* plugged memory will remain plugged when suspending+resuming */
#define VIRTIO_MEM_F_PERSISTENT_SUSPEND 2
/* --- virtio-mem: guest -> host requests --- */

View File

@ -413,10 +413,7 @@ static void coredump_task_exit(struct task_struct *tsk)
tsk->flags |= PF_POSTCOREDUMP;
core_state = tsk->signal->core_state;
spin_unlock_irq(&tsk->sighand->siglock);
/* The vhost_worker does not particpate in coredumps */
if (core_state &&
((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) {
if (core_state) {
struct core_thread self;
self.task = current;

View File

@ -1375,9 +1375,7 @@ int zap_other_threads(struct task_struct *p)
for_other_threads(p, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
/* Don't require de_thread to wait for the vhost_worker */
if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
count++;
count++;
/* Don't bother with already dead threads */
if (t->exit_state)

View File

@ -10,38 +10,32 @@
enum vhost_task_flags {
VHOST_TASK_FLAGS_STOP,
VHOST_TASK_FLAGS_KILLED,
};
struct vhost_task {
bool (*fn)(void *data);
void (*handle_sigkill)(void *data);
void *data;
struct completion exited;
unsigned long flags;
struct task_struct *task;
/* serialize SIGKILL and vhost_task_stop calls */
struct mutex exit_mutex;
};
static int vhost_task_fn(void *data)
{
struct vhost_task *vtsk = data;
bool dead = false;
for (;;) {
bool did_work;
if (!dead && signal_pending(current)) {
if (signal_pending(current)) {
struct ksignal ksig;
/*
* Calling get_signal will block in SIGSTOP,
* or clear fatal_signal_pending, but remember
* what was set.
*
* This thread won't actually exit until all
* of the file descriptors are closed, and
* the release function is called.
*/
dead = get_signal(&ksig);
if (dead)
clear_thread_flag(TIF_SIGPENDING);
if (get_signal(&ksig))
break;
}
/* mb paired w/ vhost_task_stop */
@ -57,7 +51,19 @@ static int vhost_task_fn(void *data)
schedule();
}
mutex_lock(&vtsk->exit_mutex);
/*
* If a vhost_task_stop and SIGKILL race, we can ignore the SIGKILL.
* When the vhost layer has called vhost_task_stop it's already stopped
* new work and flushed.
*/
if (!test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) {
set_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags);
vtsk->handle_sigkill(vtsk->data);
}
mutex_unlock(&vtsk->exit_mutex);
complete(&vtsk->exited);
do_exit(0);
}
@ -78,12 +84,17 @@ EXPORT_SYMBOL_GPL(vhost_task_wake);
* @vtsk: vhost_task to stop
*
* vhost_task_fn ensures the worker thread exits after
* VHOST_TASK_FLAGS_SOP becomes true.
* VHOST_TASK_FLAGS_STOP becomes true.
*/
void vhost_task_stop(struct vhost_task *vtsk)
{
set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
vhost_task_wake(vtsk);
mutex_lock(&vtsk->exit_mutex);
if (!test_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags)) {
set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
vhost_task_wake(vtsk);
}
mutex_unlock(&vtsk->exit_mutex);
/*
* Make sure vhost_task_fn is no longer accessing the vhost_task before
* freeing it below.
@ -96,14 +107,16 @@ EXPORT_SYMBOL_GPL(vhost_task_stop);
/**
* vhost_task_create - create a copy of a task to be used by the kernel
* @fn: vhost worker function
* @arg: data to be passed to fn
* @handle_sigkill: vhost function to handle when we are killed
* @arg: data to be passed to fn and handled_kill
* @name: the thread's name
*
* This returns a specialized task for use by the vhost layer or NULL on
* failure. The returned task is inactive, and the caller must fire it up
* through vhost_task_start().
*/
struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
struct vhost_task *vhost_task_create(bool (*fn)(void *),
void (*handle_sigkill)(void *), void *arg,
const char *name)
{
struct kernel_clone_args args = {
@ -122,8 +135,10 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
if (!vtsk)
return NULL;
init_completion(&vtsk->exited);
mutex_init(&vtsk->exit_mutex);
vtsk->data = arg;
vtsk->fn = fn;
vtsk->handle_sigkill = handle_sigkill;
args.fn_arg = vtsk;

View File

@ -781,7 +781,6 @@ static struct virtio_driver p9_virtio_drv = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = p9_virtio_probe,
.remove = p9_virtio_remove,

View File

@ -859,7 +859,6 @@ static struct virtio_driver virtio_vsock_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_vsock_probe,
.remove = virtio_vsock_remove,

View File

@ -438,7 +438,6 @@ static unsigned int features[] = {
static struct virtio_driver virtsnd_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),