forked from Minki/linux
staging: rtl8192e: rtl_core: Remove pci-dma-compat wrapper APIs.
The legacy API wrappers in include/linux/pci-dma-compat.h should go away as it creates unnecessary midlayering for include/linux/dma-mapping.h APIs, instead use dma-mapping.h APIs directly. The patch has been generated with the coccinelle script below and compile-tested. @@@@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@@@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@@@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@@@ - PCI_DMA_NONE + DMA_NONE @@ expression E1, E2, E3; @@ - pci_alloc_consistent(E1, E2, E3) + dma_alloc_coherent(&E1->dev, E2, E3, GFP_ATOMIC) @@ expression E1, E2, E3; @@ - pci_zalloc_consistent(E1, E2, E3) + dma_alloc_coherent(&E1->dev, E2, E3, GFP_ATOMIC) @@ expression E1, E2, E3, E4; @@ - pci_free_consistent(E1, E2, E3, E4) + dma_free_coherent(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4; @@ - pci_map_single(E1, E2, E3, E4) + dma_map_single(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4; @@ - pci_unmap_single(E1, E2, E3, E4) + dma_unmap_single(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4, E5; @@ - pci_map_page(E1, E2, E3, E4, E5) + dma_map_page(&E1->dev, E2, E3, E4, E5) @@ expression E1, E2, E3, E4; @@ - pci_unmap_page(E1, E2, E3, E4) + dma_unmap_page(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4; @@ - pci_map_sg(E1, E2, E3, E4) + dma_map_sg(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4; @@ - pci_unmap_sg(E1, E2, E3, E4) + dma_unmap_sg(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4; @@ - pci_dma_sync_single_for_cpu(E1, E2, E3, E4) + dma_sync_single_for_cpu(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4; @@ - pci_dma_sync_single_for_device(E1, E2, E3, E4) + dma_sync_single_for_device(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4; @@ - pci_dma_sync_sg_for_cpu(E1, E2, E3, E4) + dma_sync_sg_for_cpu(&E1->dev, E2, E3, E4) @@ expression E1, E2, E3, E4; @@ - pci_dma_sync_sg_for_device(E1, E2, E3, E4) + dma_sync_sg_for_device(&E1->dev, E2, E3, E4) @@ expression E1, E2; @@ - pci_dma_mapping_error(E1, E2) + dma_mapping_error(&E1->dev, E2) @@ expression E1, E2; @@ - pci_set_consistent_dma_mask(E1, E2) + dma_set_coherent_mask(&E1->dev, E2) @@ expression E1, E2; @@ - pci_set_dma_mask(E1, E2) + dma_set_mask(&E1->dev, E2) Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com> Link: https://lore.kernel.org/r/20200711132349.GA21618@blackclown Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e4c9b73bab
commit
8aaeac5beb
@ -1558,17 +1558,16 @@ static void _rtl92e_free_rx_ring(struct net_device *dev)
|
||||
if (!skb)
|
||||
continue;
|
||||
|
||||
pci_unmap_single(priv->pdev,
|
||||
*((dma_addr_t *)skb->cb),
|
||||
priv->rxbuffersize, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(&priv->pdev->dev,
|
||||
*((dma_addr_t *)skb->cb),
|
||||
priv->rxbuffersize, DMA_FROM_DEVICE);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
pci_free_consistent(priv->pdev,
|
||||
sizeof(*priv->rx_ring[rx_queue_idx]) *
|
||||
priv->rxringcount,
|
||||
priv->rx_ring[rx_queue_idx],
|
||||
priv->rx_ring_dma[rx_queue_idx]);
|
||||
dma_free_coherent(&priv->pdev->dev,
|
||||
sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount,
|
||||
priv->rx_ring[rx_queue_idx],
|
||||
priv->rx_ring_dma[rx_queue_idx]);
|
||||
priv->rx_ring[rx_queue_idx] = NULL;
|
||||
}
|
||||
}
|
||||
@ -1582,14 +1581,15 @@ static void _rtl92e_free_tx_ring(struct net_device *dev, unsigned int prio)
|
||||
struct tx_desc *entry = &ring->desc[ring->idx];
|
||||
struct sk_buff *skb = __skb_dequeue(&ring->queue);
|
||||
|
||||
pci_unmap_single(priv->pdev, entry->TxBuffAddr,
|
||||
skb->len, PCI_DMA_TODEVICE);
|
||||
dma_unmap_single(&priv->pdev->dev, entry->TxBuffAddr,
|
||||
skb->len, DMA_TO_DEVICE);
|
||||
kfree_skb(skb);
|
||||
ring->idx = (ring->idx + 1) % ring->entries;
|
||||
}
|
||||
|
||||
pci_free_consistent(priv->pdev, sizeof(*ring->desc) * ring->entries,
|
||||
ring->desc, ring->dma);
|
||||
dma_free_coherent(&priv->pdev->dev,
|
||||
sizeof(*ring->desc) * ring->entries, ring->desc,
|
||||
ring->dma);
|
||||
ring->desc = NULL;
|
||||
}
|
||||
|
||||
@ -1676,8 +1676,8 @@ static void _rtl92e_tx_isr(struct net_device *dev, int prio)
|
||||
}
|
||||
|
||||
skb = __skb_dequeue(&ring->queue);
|
||||
pci_unmap_single(priv->pdev, entry->TxBuffAddr,
|
||||
skb->len, PCI_DMA_TODEVICE);
|
||||
dma_unmap_single(&priv->pdev->dev, entry->TxBuffAddr,
|
||||
skb->len, DMA_TO_DEVICE);
|
||||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
@ -1782,9 +1782,10 @@ static short _rtl92e_alloc_rx_ring(struct net_device *dev)
|
||||
int i, rx_queue_idx;
|
||||
|
||||
for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) {
|
||||
priv->rx_ring[rx_queue_idx] = pci_zalloc_consistent(priv->pdev,
|
||||
sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount,
|
||||
&priv->rx_ring_dma[rx_queue_idx]);
|
||||
priv->rx_ring[rx_queue_idx] = dma_alloc_coherent(&priv->pdev->dev,
|
||||
sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount,
|
||||
&priv->rx_ring_dma[rx_queue_idx],
|
||||
GFP_ATOMIC);
|
||||
if (!priv->rx_ring[rx_queue_idx] ||
|
||||
(unsigned long)priv->rx_ring[rx_queue_idx] & 0xFF) {
|
||||
netdev_warn(dev, "Cannot allocate RX ring\n");
|
||||
@ -1803,11 +1804,10 @@ static short _rtl92e_alloc_rx_ring(struct net_device *dev)
|
||||
skb->dev = dev;
|
||||
priv->rx_buf[rx_queue_idx][i] = skb;
|
||||
mapping = (dma_addr_t *)skb->cb;
|
||||
*mapping = pci_map_single(priv->pdev,
|
||||
*mapping = dma_map_single(&priv->pdev->dev,
|
||||
skb_tail_pointer_rsl(skb),
|
||||
priv->rxbuffersize,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(priv->pdev, *mapping)) {
|
||||
priv->rxbuffersize, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&priv->pdev->dev, *mapping)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -1831,7 +1831,8 @@ static int _rtl92e_alloc_tx_ring(struct net_device *dev, unsigned int prio,
|
||||
dma_addr_t dma;
|
||||
int i;
|
||||
|
||||
ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
|
||||
ring = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ring) * entries,
|
||||
&dma, GFP_ATOMIC);
|
||||
if (!ring || (unsigned long)ring & 0xFF) {
|
||||
netdev_warn(dev, "Cannot allocate TX ring (prio = %d)\n", prio);
|
||||
return -ENOMEM;
|
||||
@ -1905,9 +1906,9 @@ void rtl92e_reset_desc_ring(struct net_device *dev)
|
||||
struct sk_buff *skb =
|
||||
__skb_dequeue(&ring->queue);
|
||||
|
||||
pci_unmap_single(priv->pdev,
|
||||
entry->TxBuffAddr,
|
||||
skb->len, PCI_DMA_TODEVICE);
|
||||
dma_unmap_single(&priv->pdev->dev,
|
||||
entry->TxBuffAddr, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
kfree_skb(skb);
|
||||
ring->idx = (ring->idx + 1) % ring->entries;
|
||||
}
|
||||
@ -2028,10 +2029,8 @@ static void _rtl92e_rx_normal(struct net_device *dev)
|
||||
if (unlikely(!new_skb))
|
||||
goto done;
|
||||
|
||||
pci_unmap_single(priv->pdev,
|
||||
*((dma_addr_t *)skb->cb),
|
||||
priv->rxbuffersize,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(&priv->pdev->dev, *((dma_addr_t *)skb->cb),
|
||||
priv->rxbuffersize, DMA_FROM_DEVICE);
|
||||
|
||||
skb_put(skb, pdesc->Length);
|
||||
skb_reserve(skb, stats.RxDrvInfoSize +
|
||||
@ -2074,12 +2073,10 @@ static void _rtl92e_rx_normal(struct net_device *dev)
|
||||
|
||||
priv->rx_buf[rx_queue_idx][priv->rx_idx[rx_queue_idx]] =
|
||||
skb;
|
||||
*((dma_addr_t *)skb->cb) = pci_map_single(priv->pdev,
|
||||
skb_tail_pointer_rsl(skb),
|
||||
priv->rxbuffersize,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(priv->pdev,
|
||||
*((dma_addr_t *)skb->cb))) {
|
||||
*((dma_addr_t *)skb->cb) = dma_map_single(&priv->pdev->dev,
|
||||
skb_tail_pointer_rsl(skb),
|
||||
priv->rxbuffersize, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&priv->pdev->dev, *((dma_addr_t *)skb->cb))) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
@ -2417,8 +2414,8 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
dev_info(&pdev->dev,
|
||||
"Unable to obtain 32bit DMA for consistent allocations\n");
|
||||
goto err_pci_disable;
|
||||
|
Loading…
Reference in New Issue
Block a user