ibmvnic: clean pending indirect buffs during reset

We batch subordinate command response queue (scrq) descriptors that we
need to send to the VIOS using an "indirect" buffer. If after we queue
one or more scrqs in the indirect buffer encounter an error (say fail
to allocate an skb), we leave the queued scrq descriptors in the
indirect buffer until the next call to ibmvnic_xmit().

On the next call to ibmvnic_xmit(), it is possible that the adapter is
going through a reset and it is possible that the long term  buffers
have been unmapped on the VIOS side. If we proceed to flush (send) the
packets that are in the indirect buffer, we will end up using the old
map ids and this can cause the VIOS to trigger an unnecessary FATAL
error reset.

Instead of flushing packets remaining on the indirect_buff, discard
(clean) them instead.

Fixes: 0d97338818 ("ibmvnic: Introduce xmit_more support using batched subCRQ hcalls")
Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Sukadev Bhattiprolu 2021-06-23 21:13:12 -07:00 committed by David S. Miller
parent 2ca220f928
commit 65d6470d13

View File

@ -106,6 +106,8 @@ static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *); static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter); static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter); static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
struct ibmvnic_stat { struct ibmvnic_stat {
char name[ETH_GSTRING_LEN]; char name[ETH_GSTRING_LEN];
@ -668,6 +670,7 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
tx_scrqs = adapter->num_active_tx_pools; tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) { for (i = 0; i < tx_scrqs; i++) {
ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc) if (rc)
return rc; return rc;
@ -1618,7 +1621,8 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
ind_bufp->index = 0; ind_bufp->index = 0;
if (atomic_sub_return(entries, &tx_scrq->used) <= if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) && (adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev, queue_num)) { __netif_subqueue_stopped(adapter->netdev, queue_num) &&
!test_bit(0, &adapter->resetting)) {
netif_wake_subqueue(adapter->netdev, queue_num); netif_wake_subqueue(adapter->netdev, queue_num);
netdev_dbg(adapter->netdev, "Started queue %d\n", netdev_dbg(adapter->netdev, "Started queue %d\n",
queue_num); queue_num);
@ -1711,7 +1715,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++; tx_send_failed++;
tx_dropped++; tx_dropped++;
ret = NETDEV_TX_OK; ret = NETDEV_TX_OK;
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out; goto out;
} }
@ -3175,6 +3178,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
i); i);
ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
if (adapter->tx_scrq[i]->irq) { if (adapter->tx_scrq[i]->irq) {
free_irq(adapter->tx_scrq[i]->irq, free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]); adapter->tx_scrq[i]);