forked from Minki/linux
Drivers: hv: vmbus: Introduce functions for estimating room in the ring buffer
Introduce separate functions for estimating how much can be read from and written to the ring buffer. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
a389fcfd2c
commit
a6341f0000
@ -38,8 +38,6 @@ void hv_begin_read(struct hv_ring_buffer_info *rbi)
|
|||||||
|
|
||||||
u32 hv_end_read(struct hv_ring_buffer_info *rbi)
|
u32 hv_end_read(struct hv_ring_buffer_info *rbi)
|
||||||
{
|
{
|
||||||
u32 read;
|
|
||||||
u32 write;
|
|
||||||
|
|
||||||
rbi->ring_buffer->interrupt_mask = 0;
|
rbi->ring_buffer->interrupt_mask = 0;
|
||||||
mb();
|
mb();
|
||||||
@ -49,9 +47,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
|
|||||||
* If it is not, we raced and we need to process new
|
* If it is not, we raced and we need to process new
|
||||||
* incoming messages.
|
* incoming messages.
|
||||||
*/
|
*/
|
||||||
hv_get_ringbuffer_availbytes(rbi, &read, &write);
|
return hv_get_bytes_to_read(rbi);
|
||||||
|
|
||||||
return read;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -106,9 +102,6 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
|
|||||||
static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
|
static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
|
||||||
{
|
{
|
||||||
u32 cur_write_sz;
|
u32 cur_write_sz;
|
||||||
u32 r_size;
|
|
||||||
u32 write_loc;
|
|
||||||
u32 read_loc = rbi->ring_buffer->read_index;
|
|
||||||
u32 pending_sz;
|
u32 pending_sz;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -125,14 +118,11 @@ static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
|
|||||||
mb();
|
mb();
|
||||||
|
|
||||||
pending_sz = rbi->ring_buffer->pending_send_sz;
|
pending_sz = rbi->ring_buffer->pending_send_sz;
|
||||||
write_loc = rbi->ring_buffer->write_index;
|
|
||||||
/* If the other end is not blocked on write don't bother. */
|
/* If the other end is not blocked on write don't bother. */
|
||||||
if (pending_sz == 0)
|
if (pending_sz == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
r_size = rbi->ring_datasize;
|
cur_write_sz = hv_get_bytes_to_write(rbi);
|
||||||
cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
|
|
||||||
read_loc - write_loc;
|
|
||||||
|
|
||||||
if (cur_write_sz >= pending_sz)
|
if (cur_write_sz >= pending_sz)
|
||||||
return true;
|
return true;
|
||||||
@ -332,7 +322,6 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
u32 bytes_avail_towrite;
|
u32 bytes_avail_towrite;
|
||||||
u32 bytes_avail_toread;
|
|
||||||
u32 totalbytes_towrite = 0;
|
u32 totalbytes_towrite = 0;
|
||||||
|
|
||||||
u32 next_write_location;
|
u32 next_write_location;
|
||||||
@ -348,9 +337,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|||||||
if (lock)
|
if (lock)
|
||||||
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
||||||
|
|
||||||
hv_get_ringbuffer_availbytes(outring_info,
|
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
|
||||||
&bytes_avail_toread,
|
|
||||||
&bytes_avail_towrite);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there is only room for the packet, assume it is full.
|
* If there is only room for the packet, assume it is full.
|
||||||
@ -401,7 +388,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
|
|||||||
void *buffer, u32 buflen, u32 *buffer_actual_len,
|
void *buffer, u32 buflen, u32 *buffer_actual_len,
|
||||||
u64 *requestid, bool *signal, bool raw)
|
u64 *requestid, bool *signal, bool raw)
|
||||||
{
|
{
|
||||||
u32 bytes_avail_towrite;
|
|
||||||
u32 bytes_avail_toread;
|
u32 bytes_avail_toread;
|
||||||
u32 next_read_location = 0;
|
u32 next_read_location = 0;
|
||||||
u64 prev_indices = 0;
|
u64 prev_indices = 0;
|
||||||
@ -417,10 +403,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
|
|||||||
*buffer_actual_len = 0;
|
*buffer_actual_len = 0;
|
||||||
*requestid = 0;
|
*requestid = 0;
|
||||||
|
|
||||||
hv_get_ringbuffer_availbytes(inring_info,
|
bytes_avail_toread = hv_get_bytes_to_read(inring_info);
|
||||||
&bytes_avail_toread,
|
|
||||||
&bytes_avail_towrite);
|
|
||||||
|
|
||||||
/* Make sure there is something to read */
|
/* Make sure there is something to read */
|
||||||
if (bytes_avail_toread < sizeof(desc)) {
|
if (bytes_avail_toread < sizeof(desc)) {
|
||||||
/*
|
/*
|
||||||
|
@ -151,6 +151,33 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
|
|||||||
*read = dsize - *write;
|
*read = dsize - *write;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
|
||||||
|
{
|
||||||
|
u32 read_loc, write_loc, dsize, read;
|
||||||
|
|
||||||
|
dsize = rbi->ring_datasize;
|
||||||
|
read_loc = rbi->ring_buffer->read_index;
|
||||||
|
write_loc = READ_ONCE(rbi->ring_buffer->write_index);
|
||||||
|
|
||||||
|
read = write_loc >= read_loc ? (write_loc - read_loc) :
|
||||||
|
(dsize - read_loc) + write_loc;
|
||||||
|
|
||||||
|
return read;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
|
||||||
|
{
|
||||||
|
u32 read_loc, write_loc, dsize, write;
|
||||||
|
|
||||||
|
dsize = rbi->ring_datasize;
|
||||||
|
read_loc = READ_ONCE(rbi->ring_buffer->read_index);
|
||||||
|
write_loc = rbi->ring_buffer->write_index;
|
||||||
|
|
||||||
|
write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
|
||||||
|
read_loc - write_loc;
|
||||||
|
return write;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VMBUS version is 32 bit entity broken up into
|
* VMBUS version is 32 bit entity broken up into
|
||||||
* two 16 bit quantities: major_number. minor_number.
|
* two 16 bit quantities: major_number. minor_number.
|
||||||
|
Loading…
Reference in New Issue
Block a user