From b16fe6d82b71fa0dd5c957bc22d66a694976d6eb Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 27 Jul 2022 23:20:50 +0200 Subject: [PATCH 1/3] net: ethernet: mtk_eth_soc: introduce mtk_xdp_frame_map utility routine This is a preliminary patch to add xdp multi-frag support to mtk_eth_soc driver Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 68 +++++++++++++-------- 1 file changed, 42 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c370d6589596..8450604d22ff 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1523,6 +1523,41 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi) skb_free_frag(data); } +static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, + struct mtk_tx_dma_desc_info *txd_info, + struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf, + void *data, u16 headroom, int index, bool dma_map) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_tx_dma *txd_pdma; + + if (dma_map) { /* ndo_xdp_xmit */ + txd_info->addr = dma_map_single(eth->dma_dev, data, + txd_info->size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) + return -ENOMEM; + + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; + } else { + struct page *page = virt_to_head_page(data); + + txd_info->addr = page_pool_get_dma_addr(page) + + sizeof(struct xdp_frame) + headroom; + dma_sync_single_for_device(eth->dma_dev, txd_info->addr, + txd_info->size, DMA_BIDIRECTIONAL); + } + mtk_tx_set_dma_desc(dev, txd, txd_info); + + tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; + + txd_pdma = qdma_to_pdma(ring, txd); + setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, + index); + + return 0; +} + static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, struct net_device *dev, bool dma_map) { @@ -1533,9 +1568,8 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, .first = true, .last = true, }; - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_tx_dma *txd, *txd_pdma; int err = 0, index = 0, n_desc = 1; + struct mtk_tx_dma *txd, *txd_pdma; struct mtk_tx_buf *tx_buf; if (unlikely(test_bit(MTK_RESETTING, ð->state))) @@ -1555,36 +1589,18 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); memset(tx_buf, 0, sizeof(*tx_buf)); - if (dma_map) { /* ndo_xdp_xmit */ - txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data, - txd_info.size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) { - err = -ENOMEM; - goto out; - } - tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; - } else { - struct page *page = virt_to_head_page(xdpf->data); - - txd_info.addr = page_pool_get_dma_addr(page) + - sizeof(*xdpf) + xdpf->headroom; - dma_sync_single_for_device(eth->dma_dev, txd_info.addr, - txd_info.size, - DMA_BIDIRECTIONAL); - } - mtk_tx_set_dma_desc(dev, txd, &txd_info); - - tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; - - txd_pdma = qdma_to_pdma(ring, txd); - setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size, - index++); + err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, + xdpf->data, xdpf->headroom, index, + dma_map); + if (err < 0) + goto out; /* store xdpf for cleanup */ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; tx_buf->data = xdpf; if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + txd_pdma = qdma_to_pdma(ring, txd); if (index & 1) txd_pdma->txd2 |= TX_DMA_LS0; else From 155738a4f319538a09f734ce1f5a2eac3ada1de2 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 27 Jul 2022 23:20:51 +0200 Subject: [PATCH 2/3] net: ethernet: mtk_eth_soc: introduce xdp multi-frag support Add the capability to map non-linear xdp frames in XDP_TX and ndo_xdp_xmit callback. Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 125 +++++++++++++------- 1 file changed, 82 insertions(+), 43 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 8450604d22ff..24235f8f0a8f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1031,23 +1031,22 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, } } - if (tx_buf->type == MTK_TYPE_SKB) { - if (tx_buf->data && - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { + if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { + if (tx_buf->type == MTK_TYPE_SKB) { struct sk_buff *skb = tx_buf->data; if (napi) napi_consume_skb(skb, napi); else dev_kfree_skb_any(skb); - } - } else if (tx_buf->data) { - struct xdp_frame *xdpf = tx_buf->data; + } else { + struct xdp_frame *xdpf = tx_buf->data; - if (napi && tx_buf->type == MTK_TYPE_XDP_TX) - xdp_return_frame_rx_napi(xdpf); - else - xdp_return_frame(xdpf); + if (napi && tx_buf->type == MTK_TYPE_XDP_TX) + xdp_return_frame_rx_napi(xdpf); + else + xdp_return_frame(xdpf); + } } tx_buf->flags = 0; tx_buf->data = NULL; @@ -1550,6 +1549,8 @@ static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, mtk_tx_set_dma_desc(dev, txd, txd_info); tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; + tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; txd_pdma = qdma_to_pdma(ring, txd); setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, @@ -1561,43 +1562,69 @@ static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, struct net_device *dev, bool dma_map) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_dma_desc_info txd_info = { .size = xdpf->len, .first = true, - .last = true, + .last = !xdp_frame_has_frags(xdpf), }; - int err = 0, index = 0, n_desc = 1; - struct mtk_tx_dma *txd, *txd_pdma; - struct mtk_tx_buf *tx_buf; + int err, index = 0, n_desc = 1, nr_frags; + struct mtk_tx_dma *htxd, *txd, *txd_pdma; + struct mtk_tx_buf *htx_buf, *tx_buf; + void *data = xdpf->data; if (unlikely(test_bit(MTK_RESETTING, ð->state))) return -EBUSY; - if (unlikely(atomic_read(&ring->free_count) <= 1)) + nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags)) return -EBUSY; spin_lock(ð->page_lock); txd = ring->next_free; if (txd == ring->last_free) { - err = -ENOMEM; - goto out; + spin_unlock(ð->page_lock); + return -ENOMEM; } + htxd = txd; tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); memset(tx_buf, 0, sizeof(*tx_buf)); + htx_buf = tx_buf; - err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, - xdpf->data, xdpf->headroom, index, - dma_map); - if (err < 0) - goto out; + for (;;) { + err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, + data, xdpf->headroom, index, dma_map); + if (err < 0) + goto unmap; + if (txd_info.last) + break; + + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) { + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); + txd_pdma = qdma_to_pdma(ring, txd); + if (txd == ring->last_free) + goto unmap; + + tx_buf = mtk_desc_to_tx_buf(ring, txd, + soc->txrx.txd_size); + memset(tx_buf, 0, sizeof(*tx_buf)); + n_desc++; + } + + memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); + txd_info.size = skb_frag_size(&sinfo->frags[index]); + txd_info.last = index + 1 == nr_frags; + data = skb_frag_address(&sinfo->frags[index]); + + index++; + } /* store xdpf for cleanup */ - tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; - tx_buf->data = xdpf; + htx_buf->data = xdpf; if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { txd_pdma = qdma_to_pdma(ring, txd); @@ -1624,7 +1651,24 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), MT7628_TX_CTX_IDX0); } -out: + + spin_unlock(ð->page_lock); + + return 0; + +unmap: + while (htxd != txd) { + txd_pdma = qdma_to_pdma(ring, htxd); + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); + mtk_tx_unmap(eth, tx_buf, false); + + htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) + txd_pdma->txd2 = TX_DMA_DESP2_DEF; + + htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2); + } + spin_unlock(ð->page_lock); return err; @@ -1953,18 +1997,15 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, if (!tx_buf->data) break; - if (tx_buf->type == MTK_TYPE_SKB && - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { - struct sk_buff *skb = tx_buf->data; + if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { + if (tx_buf->type == MTK_TYPE_SKB) { + struct sk_buff *skb = tx_buf->data; - bytes[mac] += skb->len; - done[mac]++; - budget--; - } else if (tx_buf->type == MTK_TYPE_XDP_TX || - tx_buf->type == MTK_TYPE_XDP_NDO) { + bytes[mac] += skb->len; + done[mac]++; + } budget--; } - mtk_tx_unmap(eth, tx_buf, true); ring->last_free = desc; @@ -1995,17 +2036,15 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, if (!tx_buf->data) break; - if (tx_buf->type == MTK_TYPE_SKB && - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { - struct sk_buff *skb = tx_buf->data; - bytes[0] += skb->len; - done[0]++; - budget--; - } else if (tx_buf->type == MTK_TYPE_XDP_TX || - tx_buf->type == MTK_TYPE_XDP_NDO) { + if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { + if (tx_buf->type == MTK_TYPE_SKB) { + struct sk_buff *skb = tx_buf->data; + + bytes[0] += skb->len; + done[0]++; + } budget--; } - mtk_tx_unmap(eth, tx_buf, true); desc = ring->dma + cpu * eth->soc->txrx.txd_size; From 853246dbf5e806e2775b584a0726be281ab7b0a0 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 27 Jul 2022 23:20:52 +0200 Subject: [PATCH 3/3] net: ethernet: mtk_eth_soc: add xdp tx return bulking support Convert mtk_eth_soc driver to xdp_return_frame_bulk APIs. Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 24235f8f0a8f..d9426b01f462 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1001,7 +1001,7 @@ static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size) } static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, - bool napi) + struct xdp_frame_bulk *bq, bool napi) { if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { @@ -1044,6 +1044,8 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, if (napi && tx_buf->type == MTK_TYPE_XDP_TX) xdp_return_frame_rx_napi(xdpf); + else if (bq) + xdp_return_frame_bulk(xdpf, bq); else xdp_return_frame(xdpf); } @@ -1296,7 +1298,7 @@ err_dma: tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); /* unmap dma */ - mtk_tx_unmap(eth, tx_buf, false); + mtk_tx_unmap(eth, tx_buf, NULL, false); itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) @@ -1660,7 +1662,7 @@ unmap: while (htxd != txd) { txd_pdma = qdma_to_pdma(ring, htxd); tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); - mtk_tx_unmap(eth, tx_buf, false); + mtk_tx_unmap(eth, tx_buf, NULL, false); htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) @@ -1973,6 +1975,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_buf *tx_buf; + struct xdp_frame_bulk bq; struct mtk_tx_dma *desc; u32 cpu, dma; @@ -1980,6 +1983,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, dma = mtk_r32(eth, reg_map->qdma.drx_ptr); desc = mtk_qdma_phys_to_virt(ring, cpu); + xdp_frame_bulk_init(&bq); while ((cpu != dma) && budget) { u32 next_cpu = desc->txd2; @@ -2006,13 +2010,14 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, } budget--; } - mtk_tx_unmap(eth, tx_buf, true); + mtk_tx_unmap(eth, tx_buf, &bq, true); ring->last_free = desc; atomic_inc(&ring->free_count); cpu = next_cpu; } + xdp_flush_frame_bulk(&bq); ring->last_free_ptr = cpu; mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); @@ -2025,11 +2030,13 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, { struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_buf *tx_buf; + struct xdp_frame_bulk bq; struct mtk_tx_dma *desc; u32 cpu, dma; cpu = ring->cpu_idx; dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); + xdp_frame_bulk_init(&bq); while ((cpu != dma) && budget) { tx_buf = &ring->buf[cpu]; @@ -2045,7 +2052,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, } budget--; } - mtk_tx_unmap(eth, tx_buf, true); + mtk_tx_unmap(eth, tx_buf, &bq, true); desc = ring->dma + cpu * eth->soc->txrx.txd_size; ring->last_free = desc; @@ -2053,6 +2060,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, cpu = NEXT_DESP_IDX(cpu, ring->dma_size); } + xdp_flush_frame_bulk(&bq); ring->cpu_idx = cpu; @@ -2262,7 +2270,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) if (ring->buf) { for (i = 0; i < MTK_DMA_SIZE; i++) - mtk_tx_unmap(eth, &ring->buf[i], false); + mtk_tx_unmap(eth, &ring->buf[i], NULL, false); kfree(ring->buf); ring->buf = NULL; }