forked from Minki/linux
sfc: Remove redundant efx_xmit() function
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9bc183d7f9
commit
497f5ba323
@ -23,9 +23,8 @@
|
||||
#define EFX_MEM_BAR 2
|
||||
|
||||
/* TX */
|
||||
extern netdev_tx_t efx_xmit(struct efx_nic *efx,
|
||||
struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb);
|
||||
extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb);
|
||||
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
|
||||
extern void efx_stop_queue(struct efx_nic *efx);
|
||||
extern void efx_wake_queue(struct efx_nic *efx);
|
||||
|
@ -425,7 +425,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
|
||||
|
||||
if (efx_dev_registered(efx))
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
rc = efx_xmit(efx, tx_queue, skb);
|
||||
rc = efx_enqueue_skb(tx_queue, skb);
|
||||
if (efx_dev_registered(efx))
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
|
@ -152,11 +152,13 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
|
||||
* If any DMA mapping fails, any mapped fragments will be unmapped,
|
||||
* the queue's insert pointer will be restored to its original value.
|
||||
*
|
||||
* This function is split out from efx_hard_start_xmit to allow the
|
||||
* loopback test to direct packets via specific TX queues.
|
||||
*
|
||||
* Returns NETDEV_TX_OK or NETDEV_TX_BUSY
|
||||
* You must hold netif_tx_lock() to call this function.
|
||||
*/
|
||||
static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb)
|
||||
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct pci_dev *pci_dev = efx->pci_dev;
|
||||
@ -352,24 +354,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
|
||||
}
|
||||
}
|
||||
|
||||
/* Initiate a packet transmission on the specified TX queue.
|
||||
* Note that returning anything other than NETDEV_TX_OK will cause the
|
||||
* OS to free the skb.
|
||||
*
|
||||
* This function is split out from efx_hard_start_xmit to allow the
|
||||
* loopback test to direct packets via specific TX queues. It is
|
||||
* therefore a non-static inline, so as not to penalise performance
|
||||
* for non-loopback transmissions.
|
||||
*
|
||||
* Context: netif_tx_lock held
|
||||
*/
|
||||
inline netdev_tx_t efx_xmit(struct efx_nic *efx,
|
||||
struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
||||
{
|
||||
/* Map fragments for DMA and add to TX queue */
|
||||
return efx_enqueue_skb(tx_queue, skb);
|
||||
}
|
||||
|
||||
/* Initiate a packet transmission. We use one channel per CPU
|
||||
* (sharing when we have more CPUs than channels). On Falcon, the TX
|
||||
* completion events will be directed back to the CPU that transmitted
|
||||
@ -393,7 +377,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
|
||||
else
|
||||
tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
|
||||
|
||||
return efx_xmit(efx, tx_queue, skb);
|
||||
return efx_enqueue_skb(tx_queue, skb);
|
||||
}
|
||||
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
|
Loading…
Reference in New Issue
Block a user