ibmvnic: rework to ensure SCRQ entry reads are properly ordered

Move the dma_rmb() between pending_scrq() and ibmvnic_next_scrq()
into the end of pending_scrq() to save the duplicated code since
this dma_rmb will be used 3 times.

Signed-off-by: Lijun Pan <ljp@linux.ibm.com>
Acked-by: Thomas Falcon <tlfalcon@linux.ibm.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Lijun Pan 2021-01-29 19:19:04 -06:00 committed by Jakub Kicinski
parent 1a2b60f6f1
commit 665ab1eb18

View File

@ -2444,12 +2444,6 @@ restart_poll:
if (!pending_scrq(adapter, rx_scrq)) if (!pending_scrq(adapter, rx_scrq))
break; break;
/* The queue entry at the current index is peeked at above
* to determine that there is a valid descriptor awaiting
* processing. We want to be sure that the current slot
* holds a valid descriptor before reading its contents.
*/
dma_rmb();
next = ibmvnic_next_scrq(adapter, rx_scrq); next = ibmvnic_next_scrq(adapter, rx_scrq);
rx_buff = rx_buff =
(struct ibmvnic_rx_buff *)be64_to_cpu(next-> (struct ibmvnic_rx_buff *)be64_to_cpu(next->
@ -3189,13 +3183,6 @@ restart_loop:
int total_bytes = 0; int total_bytes = 0;
int num_packets = 0; int num_packets = 0;
/* The queue entry at the current index is peeked at above
* to determine that there is a valid descriptor awaiting
* processing. We want to be sure that the current slot
* holds a valid descriptor before reading its contents.
*/
dma_rmb();
next = ibmvnic_next_scrq(adapter, scrq); next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) { for (i = 0; i < next->tx_comp.num_comps; i++) {
if (next->tx_comp.rcs[i]) if (next->tx_comp.rcs[i])
@ -3569,11 +3556,16 @@ static int pending_scrq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq) struct ibmvnic_sub_crq_queue *scrq)
{ {
union sub_crq *entry = &scrq->msgs[scrq->cur]; union sub_crq *entry = &scrq->msgs[scrq->cur];
int rc;
if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
return 1;
else /* Ensure that the SCRQ valid flag is loaded prior to loading the
return 0; * contents of the SCRQ descriptor
*/
dma_rmb();
return rc;
} }
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
@ -3592,8 +3584,8 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
} }
spin_unlock_irqrestore(&scrq->lock, flags); spin_unlock_irqrestore(&scrq->lock, flags);
/* Ensure that the entire buffer descriptor has been /* Ensure that the SCRQ valid flag is loaded prior to loading the
* loaded before reading its contents * contents of the SCRQ descriptor
*/ */
dma_rmb(); dma_rmb();