mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
virtio: harden vring IRQ
This is a rework on the previous IRQ hardening that is done for
virtio-pci where several drawbacks were found and were reverted:
1) try to use IRQF_NO_AUTOEN which is not friendly to affinity managed IRQ
that is used by some device such as virtio-blk
2) done only for PCI transport
The vq->broken is re-used in this patch for implementing the IRQ
hardening. The vq->broken is set to true during both initialization
and reset. And the vq->broken is set to false in
virtio_device_ready(). Then vring_interrupt() can check and return
when vq->broken is true. And in this case, switch to return IRQ_NONE
to let the interrupt core aware of such invalid interrupt to prevent
IRQ storm.
The reason of using a per queue variable instead of a per device one
is that we may need it for per queue reset hardening in the future.
Note that the hardening is only done for vring interrupt since the
config interrupt hardening is already done in commit 22b7050a02
("virtio: defer config changed notifications"). But the method that is
used by config interrupt can't be reused by the vring interrupt
handler because it uses spinlock to do the synchronization which is
expensive.
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Halil Pasic <pasic@linux.ibm.com>
Cc: Cornelia Huck <cohuck@redhat.com>
Cc: Vineeth Vijayan <vneethv@linux.ibm.com>
Cc: Peter Oberparleiter <oberpar@linux.ibm.com>
Cc: linux-s390@vger.kernel.org
Signed-off-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20220527060120.20964-9-jasowang@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
This commit is contained in:
parent
be83f04d25
commit
8b4ec69d7e
@ -971,6 +971,10 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
|
||||
ccw->flags = 0;
|
||||
ccw->count = sizeof(status);
|
||||
ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
|
||||
/* We use ssch for setting the status which is a serializing
|
||||
* instruction that guarantees the memory writes have
|
||||
* completed before ssch.
|
||||
*/
|
||||
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
|
||||
/* Write failed? We assume status is unchanged. */
|
||||
if (ret)
|
||||
|
@ -220,6 +220,15 @@ static int virtio_features_ok(struct virtio_device *dev)
|
||||
* */
|
||||
void virtio_reset_device(struct virtio_device *dev)
|
||||
{
|
||||
/*
|
||||
* The below virtio_synchronize_cbs() guarantees that any
|
||||
* interrupt for this line arriving after
|
||||
* virtio_synchronize_vqs() has completed is guaranteed to see
|
||||
* vq->broken as true.
|
||||
*/
|
||||
virtio_break_device(dev);
|
||||
virtio_synchronize_cbs(dev);
|
||||
|
||||
dev->config->reset(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtio_reset_device);
|
||||
@ -428,6 +437,9 @@ int register_virtio_device(struct virtio_device *dev)
|
||||
dev->config_enabled = false;
|
||||
dev->config_change_pending = false;
|
||||
|
||||
INIT_LIST_HEAD(&dev->vqs);
|
||||
spin_lock_init(&dev->vqs_list_lock);
|
||||
|
||||
/* We always start by resetting the device, in case a previous
|
||||
* driver messed it up. This also tests that code path a little. */
|
||||
virtio_reset_device(dev);
|
||||
@ -435,9 +447,6 @@ int register_virtio_device(struct virtio_device *dev)
|
||||
/* Acknowledge that we've seen the device. */
|
||||
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
|
||||
|
||||
INIT_LIST_HEAD(&dev->vqs);
|
||||
spin_lock_init(&dev->vqs_list_lock);
|
||||
|
||||
/*
|
||||
* device_add() causes the bus infrastructure to look for a matching
|
||||
* driver.
|
||||
|
@ -253,6 +253,11 @@ static void vm_set_status(struct virtio_device *vdev, u8 status)
|
||||
/* We should never be setting status to 0. */
|
||||
BUG_ON(status == 0);
|
||||
|
||||
/*
|
||||
* Per memory-barriers.txt, wmb() is not needed to guarantee
|
||||
* that the the cache coherent memory writes have completed
|
||||
* before writing to the MMIO region.
|
||||
*/
|
||||
writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
|
||||
}
|
||||
|
||||
|
@ -467,6 +467,11 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
|
||||
{
|
||||
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
|
||||
|
||||
/*
|
||||
* Per memory-barriers.txt, wmb() is not needed to guarantee
|
||||
* that the the cache coherent memory writes have completed
|
||||
* before writing to the MMIO region.
|
||||
*/
|
||||
vp_iowrite8(status, &cfg->device_status);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vp_modern_set_status);
|
||||
|
@ -1688,7 +1688,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
||||
vq->we_own_ring = true;
|
||||
vq->notify = notify;
|
||||
vq->weak_barriers = weak_barriers;
|
||||
vq->broken = false;
|
||||
vq->broken = true;
|
||||
vq->last_used_idx = 0;
|
||||
vq->event_triggered = false;
|
||||
vq->num_added = 0;
|
||||
@ -2134,8 +2134,11 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
if (unlikely(vq->broken))
|
||||
return IRQ_HANDLED;
|
||||
if (unlikely(vq->broken)) {
|
||||
dev_warn_once(&vq->vq.vdev->dev,
|
||||
"virtio vring IRQ raised before DRIVER_OK");
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/* Just a hint for performance: so it's ok that this can be racy! */
|
||||
if (vq->event)
|
||||
@ -2177,7 +2180,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||
vq->we_own_ring = false;
|
||||
vq->notify = notify;
|
||||
vq->weak_barriers = weak_barriers;
|
||||
vq->broken = false;
|
||||
vq->broken = true;
|
||||
vq->last_used_idx = 0;
|
||||
vq->event_triggered = false;
|
||||
vq->num_added = 0;
|
||||
|
@ -256,6 +256,26 @@ void virtio_device_ready(struct virtio_device *dev)
|
||||
unsigned status = dev->config->get_status(dev);
|
||||
|
||||
BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
|
||||
|
||||
/*
|
||||
* The virtio_synchronize_cbs() makes sure vring_interrupt()
|
||||
* will see the driver specific setup if it sees vq->broken
|
||||
* as false (even if the notifications come before DRIVER_OK).
|
||||
*/
|
||||
virtio_synchronize_cbs(dev);
|
||||
__virtio_unbreak_device(dev);
|
||||
/*
|
||||
* The transport should ensure the visibility of vq->broken
|
||||
* before setting DRIVER_OK. See the comments for the transport
|
||||
* specific set_status() method.
|
||||
*
|
||||
* A well behaved device will only notify a virtqueue after
|
||||
* DRIVER_OK, this means the device should "see" the coherenct
|
||||
* memory write that set vq->broken as false which is done by
|
||||
* the driver when it sees DRIVER_OK, then the following
|
||||
* driver's vring_interrupt() will see vq->broken as false so
|
||||
* we won't lose any notification.
|
||||
*/
|
||||
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user