forked from Minki/linux
vhost,virtio,vdpa: bugfixes
Misc fixes all over the place.
Revert of virtio used length validation series: the approach taken does
not seem to work, breaking too many guests in the process. We'll need to
do length validation using some other approach.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-----BEGIN PGP SIGNATURE-----
iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmGe0sEPHG1zdEByZWRo
YXQuY29tAAoJECgfDbjSjVRp8WEH/imDIq1iduDeAuvFnmrm5eEO9w3wzXCT4NiG
8Pla241FzQ1pEFEAne16KP0+SlLhj7P0oc5FR8vkYvxxuyneDbCzcS2M1kYMOpA1
ry28PuObAnekzE/WXxvC031ozB5Zb/FL54gmw+/1EdAOdMGL0CdQ1aJxREBHRTBo
p4ZHr83GA2D2C/IyKCsgQ8cB9ZrMqImTQQ4vRD89HoFBp+GH2u2Di1iyXEWuOqdI
n1+7M9jjbyW8A+N1bkOicpShS/6UcyJQOOcg8kvUQOV6srVkYhfaiWC/CbOP2g73
8PKK+/K2Htf92s6RdvDUPSKmvqGR/4KPZWPtWThXBYXGgWul0uI=
=q6tO
-----END PGP SIGNATURE-----
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull vhost,virtio,vdpa bugfixes from Michael Tsirkin:
"Misc fixes all over the place.
Revert of virtio used length validation series: the approach taken
does not seem to work, breaking too many guests in the process. We'll
need to do length validation using some other approach"
[ This merge also ends up reverting commit f7a36b03a7
("vsock/virtio:
suppress used length validation"), which came in through the
networking tree in the meantime, and was part of that whole used
length validation series - Linus ]
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
vdpa_sim: avoid putting an uninitialized iova_domain
vhost-vdpa: clean irqs before reseting vdpa device
virtio-blk: modify the value type of num in virtio_queue_rq()
vhost/vsock: cleanup removing `len` variable
vhost/vsock: fix incorrect used length reported to the guest
Revert "virtio_ring: validate used buffer length"
Revert "virtio-net: don't let virtio core to validate used length"
Revert "virtio-blk: don't let virtio core to validate used length"
Revert "virtio-scsi: don't let virtio core to validate used buffer length"
This commit is contained in:
commit
d06c942efe
@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *req = bd->rq;
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
unsigned long flags;
|
||||
unsigned int num;
|
||||
int num;
|
||||
int qid = hctx->queue_num;
|
||||
bool notify = false;
|
||||
blk_status_t status;
|
||||
@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = {
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.feature_table_legacy = features_legacy,
|
||||
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
|
||||
.suppress_used_validation = true,
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
|
@ -3423,7 +3423,6 @@ static struct virtio_driver virtio_net_driver = {
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.feature_table_legacy = features_legacy,
|
||||
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
|
||||
.suppress_used_validation = true,
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
|
@ -977,7 +977,6 @@ static unsigned int features[] = {
|
||||
static struct virtio_driver virtio_scsi_driver = {
|
||||
.feature_table = features,
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.suppress_used_validation = true,
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
|
@ -591,8 +591,11 @@ static void vdpasim_free(struct vdpa_device *vdpa)
|
||||
vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
|
||||
}
|
||||
|
||||
if (vdpa_get_dma_dev(vdpa)) {
|
||||
put_iova_domain(&vdpasim->iova);
|
||||
iova_cache_put();
|
||||
}
|
||||
|
||||
kvfree(vdpasim->buffer);
|
||||
if (vdpasim->iommu)
|
||||
vhost_iotlb_free(vdpasim->iommu);
|
||||
|
@ -1014,12 +1014,12 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
|
||||
|
||||
mutex_lock(&d->mutex);
|
||||
filep->private_data = NULL;
|
||||
vhost_vdpa_clean_irq(v);
|
||||
vhost_vdpa_reset(v);
|
||||
vhost_dev_stop(&v->vdev);
|
||||
vhost_vdpa_iotlb_free(v);
|
||||
vhost_vdpa_free_domain(v);
|
||||
vhost_vdpa_config_put(v);
|
||||
vhost_vdpa_clean_irq(v);
|
||||
vhost_dev_cleanup(&v->vdev);
|
||||
kfree(v->vdev.vqs);
|
||||
mutex_unlock(&d->mutex);
|
||||
|
@ -511,8 +511,6 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
|
||||
|
||||
vhost_disable_notify(&vsock->dev, vq);
|
||||
do {
|
||||
u32 len;
|
||||
|
||||
if (!vhost_vsock_more_replies(vsock)) {
|
||||
/* Stop tx until the device processes already
|
||||
* pending replies. Leave tx virtqueue
|
||||
@ -540,7 +538,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
|
||||
continue;
|
||||
}
|
||||
|
||||
len = pkt->len;
|
||||
total_len += sizeof(pkt->hdr) + pkt->len;
|
||||
|
||||
/* Deliver to monitoring devices all received packets */
|
||||
virtio_transport_deliver_tap_pkt(pkt);
|
||||
@ -553,9 +551,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
|
||||
else
|
||||
virtio_transport_free_pkt(pkt);
|
||||
|
||||
len += sizeof(pkt->hdr);
|
||||
vhost_add_used(vq, head, len);
|
||||
total_len += len;
|
||||
vhost_add_used(vq, head, 0);
|
||||
added = true;
|
||||
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
|
||||
|
||||
|
@ -14,9 +14,6 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <xen/xen.h>
|
||||
|
||||
static bool force_used_validation = false;
|
||||
module_param(force_used_validation, bool, 0444);
|
||||
|
||||
#ifdef DEBUG
|
||||
/* For development, we want to crash whenever the ring is screwed. */
|
||||
#define BAD_RING(_vq, fmt, args...) \
|
||||
@ -185,9 +182,6 @@ struct vring_virtqueue {
|
||||
} packed;
|
||||
};
|
||||
|
||||
/* Per-descriptor in buffer length */
|
||||
u32 *buflen;
|
||||
|
||||
/* How to notify other side. FIXME: commonalize hcalls! */
|
||||
bool (*notify)(struct virtqueue *vq);
|
||||
|
||||
@ -496,7 +490,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
unsigned int i, n, avail, descs_used, prev, err_idx;
|
||||
int head;
|
||||
bool indirect;
|
||||
u32 buflen = 0;
|
||||
|
||||
START_USE(vq);
|
||||
|
||||
@ -578,7 +571,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
VRING_DESC_F_NEXT |
|
||||
VRING_DESC_F_WRITE,
|
||||
indirect);
|
||||
buflen += sg->length;
|
||||
}
|
||||
}
|
||||
/* Last one doesn't continue. */
|
||||
@ -618,10 +610,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
else
|
||||
vq->split.desc_state[head].indir_desc = ctx;
|
||||
|
||||
/* Store in buffer length if necessary */
|
||||
if (vq->buflen)
|
||||
vq->buflen[head] = buflen;
|
||||
|
||||
/* Put entry in available array (but don't update avail->idx until they
|
||||
* do sync). */
|
||||
avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
|
||||
@ -796,11 +784,6 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
|
||||
BAD_RING(vq, "id %u is not a head!\n", i);
|
||||
return NULL;
|
||||
}
|
||||
if (vq->buflen && unlikely(*len > vq->buflen[i])) {
|
||||
BAD_RING(vq, "used len %d is larger than in buflen %u\n",
|
||||
*len, vq->buflen[i]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* detach_buf_split clears data, so grab it now. */
|
||||
ret = vq->split.desc_state[i].data;
|
||||
@ -1079,7 +1062,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
|
||||
unsigned int i, n, err_idx;
|
||||
u16 head, id;
|
||||
dma_addr_t addr;
|
||||
u32 buflen = 0;
|
||||
|
||||
head = vq->packed.next_avail_idx;
|
||||
desc = alloc_indirect_packed(total_sg, gfp);
|
||||
@ -1109,8 +1091,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
|
||||
desc[i].addr = cpu_to_le64(addr);
|
||||
desc[i].len = cpu_to_le32(sg->length);
|
||||
i++;
|
||||
if (n >= out_sgs)
|
||||
buflen += sg->length;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1164,10 +1144,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
|
||||
vq->packed.desc_state[id].indir_desc = desc;
|
||||
vq->packed.desc_state[id].last = id;
|
||||
|
||||
/* Store in buffer length if necessary */
|
||||
if (vq->buflen)
|
||||
vq->buflen[id] = buflen;
|
||||
|
||||
vq->num_added += 1;
|
||||
|
||||
pr_debug("Added buffer head %i to %p\n", head, vq);
|
||||
@ -1203,7 +1179,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
||||
__le16 head_flags, flags;
|
||||
u16 head, id, prev, curr, avail_used_flags;
|
||||
int err;
|
||||
u32 buflen = 0;
|
||||
|
||||
START_USE(vq);
|
||||
|
||||
@ -1283,8 +1258,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
||||
1 << VRING_PACKED_DESC_F_AVAIL |
|
||||
1 << VRING_PACKED_DESC_F_USED;
|
||||
}
|
||||
if (n >= out_sgs)
|
||||
buflen += sg->length;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1304,10 +1277,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
||||
vq->packed.desc_state[id].indir_desc = ctx;
|
||||
vq->packed.desc_state[id].last = prev;
|
||||
|
||||
/* Store in buffer length if necessary */
|
||||
if (vq->buflen)
|
||||
vq->buflen[id] = buflen;
|
||||
|
||||
/*
|
||||
* A driver MUST NOT make the first descriptor in the list
|
||||
* available before all subsequent descriptors comprising
|
||||
@ -1494,11 +1463,6 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
|
||||
BAD_RING(vq, "id %u is not a head!\n", id);
|
||||
return NULL;
|
||||
}
|
||||
if (vq->buflen && unlikely(*len > vq->buflen[id])) {
|
||||
BAD_RING(vq, "used len %d is larger than in buflen %u\n",
|
||||
*len, vq->buflen[id]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* detach_buf_packed clears data, so grab it now. */
|
||||
ret = vq->packed.desc_state[id].data;
|
||||
@ -1704,7 +1668,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
||||
struct vring_virtqueue *vq;
|
||||
struct vring_packed_desc *ring;
|
||||
struct vring_packed_desc_event *driver, *device;
|
||||
struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
|
||||
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
|
||||
size_t ring_size_in_bytes, event_size_in_bytes;
|
||||
|
||||
@ -1794,15 +1757,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
||||
if (!vq->packed.desc_extra)
|
||||
goto err_desc_extra;
|
||||
|
||||
if (!drv->suppress_used_validation || force_used_validation) {
|
||||
vq->buflen = kmalloc_array(num, sizeof(*vq->buflen),
|
||||
GFP_KERNEL);
|
||||
if (!vq->buflen)
|
||||
goto err_buflen;
|
||||
} else {
|
||||
vq->buflen = NULL;
|
||||
}
|
||||
|
||||
/* No callback? Tell other side not to bother us. */
|
||||
if (!callback) {
|
||||
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
|
||||
@ -1815,8 +1769,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
||||
spin_unlock(&vdev->vqs_list_lock);
|
||||
return &vq->vq;
|
||||
|
||||
err_buflen:
|
||||
kfree(vq->packed.desc_extra);
|
||||
err_desc_extra:
|
||||
kfree(vq->packed.desc_state);
|
||||
err_desc_state:
|
||||
@ -2224,7 +2176,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||
void (*callback)(struct virtqueue *),
|
||||
const char *name)
|
||||
{
|
||||
struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
|
||||
struct vring_virtqueue *vq;
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
|
||||
@ -2284,15 +2235,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||
if (!vq->split.desc_extra)
|
||||
goto err_extra;
|
||||
|
||||
if (!drv->suppress_used_validation || force_used_validation) {
|
||||
vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen),
|
||||
GFP_KERNEL);
|
||||
if (!vq->buflen)
|
||||
goto err_buflen;
|
||||
} else {
|
||||
vq->buflen = NULL;
|
||||
}
|
||||
|
||||
/* Put everything in free lists. */
|
||||
vq->free_head = 0;
|
||||
memset(vq->split.desc_state, 0, vring.num *
|
||||
@ -2303,8 +2245,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||
spin_unlock(&vdev->vqs_list_lock);
|
||||
return &vq->vq;
|
||||
|
||||
err_buflen:
|
||||
kfree(vq->split.desc_extra);
|
||||
err_extra:
|
||||
kfree(vq->split.desc_state);
|
||||
err_state:
|
||||
|
@ -152,7 +152,6 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
|
||||
* @feature_table_size: number of entries in the feature table array.
|
||||
* @feature_table_legacy: same as feature_table but when working in legacy mode.
|
||||
* @feature_table_size_legacy: number of entries in feature table legacy array.
|
||||
* @suppress_used_validation: set to not have core validate used length
|
||||
* @probe: the function to call when a device is found. Returns 0 or -errno.
|
||||
* @scan: optional function to call after successful probe; intended
|
||||
* for virtio-scsi to invoke a scan.
|
||||
@ -169,7 +168,6 @@ struct virtio_driver {
|
||||
unsigned int feature_table_size;
|
||||
const unsigned int *feature_table_legacy;
|
||||
unsigned int feature_table_size_legacy;
|
||||
bool suppress_used_validation;
|
||||
int (*validate)(struct virtio_device *dev);
|
||||
int (*probe)(struct virtio_device *dev);
|
||||
void (*scan)(struct virtio_device *dev);
|
||||
|
@ -731,7 +731,6 @@ static unsigned int features[] = {
|
||||
static struct virtio_driver virtio_vsock_driver = {
|
||||
.feature_table = features,
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.suppress_used_validation = true,
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
|
Loading…
Reference in New Issue
Block a user