mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
KVM: Clean up coalesced MMIO ring full check
Fold coalesced_mmio_has_room() into its sole caller, coalesced_mmio_write(), as it's really just a single line of code, has a goofy return value, and is unnecessarily brittle. E.g. if coalesced_mmio_has_room() were to check ring->last directly, or the caller failed to use READ_ONCE(), KVM would be susceptible to TOCTOU attacks from userspace. Opportunistically add a comment explaining why on earth KVM leaves one entry free, which may not be obvious to readers that aren't familiar with ring buffers. No functional change intended. Reviewed-by: Ilias Stamatis <ilstam@amazon.com> Cc: Paul Durrant <paul@xen.org> Link: https://lore.kernel.org/r/20240828181446.652474-3-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
215b3cb7a8
commit
e027ba1b83
@ -40,25 +40,6 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
|
|
||||||
{
|
|
||||||
struct kvm_coalesced_mmio_ring *ring;
|
|
||||||
|
|
||||||
/* Are we able to batch it ? */
|
|
||||||
|
|
||||||
/* last is the first free entry
|
|
||||||
* check if we don't meet the first used entry
|
|
||||||
* there is always one unused entry in the buffer
|
|
||||||
*/
|
|
||||||
ring = dev->kvm->coalesced_mmio_ring;
|
|
||||||
if ((last + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
|
|
||||||
/* full */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
|
static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_io_device *this, gpa_t addr,
|
struct kvm_io_device *this, gpa_t addr,
|
||||||
int len, const void *val)
|
int len, const void *val)
|
||||||
@ -72,9 +53,15 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
|
|||||||
|
|
||||||
spin_lock(&dev->kvm->ring_lock);
|
spin_lock(&dev->kvm->ring_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* last is the index of the entry to fill. Verify userspace hasn't
|
||||||
|
* set last to be out of range, and that there is room in the ring.
|
||||||
|
* Leave one entry free in the ring so that userspace can differentiate
|
||||||
|
* between an empty ring and a full ring.
|
||||||
|
*/
|
||||||
insert = READ_ONCE(ring->last);
|
insert = READ_ONCE(ring->last);
|
||||||
if (!coalesced_mmio_has_room(dev, insert) ||
|
if (insert >= KVM_COALESCED_MMIO_MAX ||
|
||||||
insert >= KVM_COALESCED_MMIO_MAX) {
|
(insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
|
||||||
spin_unlock(&dev->kvm->ring_lock);
|
spin_unlock(&dev->kvm->ring_lock);
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user