nfp: cleanup tx ring flush and rename to reset

Since we never used flush without freeing the ring later
the functionality of the two operations is mixed.
Rename flush to ring reset and move there all the things
which have to be done after FW ring state is cleared.
While at it do some clean-ups.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski 2016-04-07 19:39:39 +01:00 committed by David S. Miller
parent 73725d9dfd
commit 827deea9bc

View File

@ -867,61 +867,59 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
} }
/** /**
* nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
* @tx_ring: TX ring structure * @nn: NFP Net device
* @tx_ring: TX ring structure
* *
* Assumes that the device is stopped * Assumes that the device is stopped
*/ */
static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring) static void
nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
{ {
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
const struct skb_frag_struct *frag; const struct skb_frag_struct *frag;
struct netdev_queue *nd_q; struct netdev_queue *nd_q;
struct sk_buff *skb; struct pci_dev *pdev = nn->pdev;
int nr_frags;
int fidx;
int idx;
while (tx_ring->rd_p != tx_ring->wr_p) { while (tx_ring->rd_p != tx_ring->wr_p) {
int nr_frags, fidx, idx;
struct sk_buff *skb;
idx = tx_ring->rd_p % tx_ring->cnt; idx = tx_ring->rd_p % tx_ring->cnt;
skb = tx_ring->txbufs[idx].skb; skb = tx_ring->txbufs[idx].skb;
if (skb) { nr_frags = skb_shinfo(skb)->nr_frags;
nr_frags = skb_shinfo(skb)->nr_frags; fidx = tx_ring->txbufs[idx].fidx;
fidx = tx_ring->txbufs[idx].fidx;
if (fidx == -1) { if (fidx == -1) {
/* unmap head */ /* unmap head */
dma_unmap_single(&pdev->dev, dma_unmap_single(&pdev->dev,
tx_ring->txbufs[idx].dma_addr, tx_ring->txbufs[idx].dma_addr,
skb_headlen(skb), skb_headlen(skb), DMA_TO_DEVICE);
DMA_TO_DEVICE); } else {
} else { /* unmap fragment */
/* unmap fragment */ frag = &skb_shinfo(skb)->frags[fidx];
frag = &skb_shinfo(skb)->frags[fidx]; dma_unmap_page(&pdev->dev,
dma_unmap_page(&pdev->dev, tx_ring->txbufs[idx].dma_addr,
tx_ring->txbufs[idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE);
skb_frag_size(frag),
DMA_TO_DEVICE);
}
/* check for last gather fragment */
if (fidx == nr_frags - 1)
dev_kfree_skb_any(skb);
tx_ring->txbufs[idx].dma_addr = 0;
tx_ring->txbufs[idx].skb = NULL;
tx_ring->txbufs[idx].fidx = -2;
} }
memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx])); /* check for last gather fragment */
if (fidx == nr_frags - 1)
dev_kfree_skb_any(skb);
tx_ring->txbufs[idx].dma_addr = 0;
tx_ring->txbufs[idx].skb = NULL;
tx_ring->txbufs[idx].fidx = -2;
tx_ring->qcp_rd_p++; tx_ring->qcp_rd_p++;
tx_ring->rd_p++; tx_ring->rd_p++;
} }
memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q); netdev_tx_reset_queue(nd_q);
} }
@ -1362,11 +1360,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
tx_ring->txds, tx_ring->dma); tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0; tx_ring->cnt = 0;
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
tx_ring->txbufs = NULL; tx_ring->txbufs = NULL;
tx_ring->txds = NULL; tx_ring->txds = NULL;
tx_ring->dma = 0; tx_ring->dma = 0;
@ -1859,7 +1852,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
*/ */
for (r = 0; r < nn->num_r_vecs; r++) { for (r = 0; r < nn->num_r_vecs; r++) {
nfp_net_rx_flush(nn->r_vecs[r].rx_ring); nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
nfp_net_tx_flush(nn->r_vecs[r].tx_ring); nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);