bnxt_en: do not map packet buffers twice

Remove double-mapping of DMA buffers as it can prevent page pool entries
from being freed.  Mapping is managed by page pool infrastructure and
was previously managed by the driver in __bnxt_alloc_rx_page before
allowing the page pool infrastructure to manage it.

Fixes: 578fcfd26e ("bnxt_en: Let the page pool manage the DMA mapping")
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Andy Gospodarek <andrew.gospodarek@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Reviewed-by: David Wei <dw@davidwei.uk>
Link: https://lore.kernel.org/r/20231214213138.98095-1-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Andy Gospodarek 2023-12-14 13:31:38 -08:00 committed by Jakub Kicinski
parent 64b8bc7d5f
commit 23c93c3b62

View File

@ -59,7 +59,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
for (i = 0; i < num_frags ; i++) {
skb_frag_t *frag = &sinfo->frags[i];
struct bnxt_sw_tx_bd *frag_tx_buf;
struct pci_dev *pdev = bp->pdev;
dma_addr_t frag_mapping;
int frag_len;
@ -73,16 +72,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag);
frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
frag_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
return NULL;
dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
flags = frag_len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
skb_frag_off(frag);
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
len = frag_len;