amd-xgbe: Add support for the skb->xmit_more flag

Add support to delay telling the hardware about data that is ready to
be transmitted if the skb->xmit_more flag is set.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Lendacky, Thomas 2014-11-20 11:04:08 -06:00 committed by David S. Miller
parent eb79e640fa
commit 16958a2b05
4 changed files with 78 additions and 22 deletions

View File

@ -378,7 +378,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
ring->cur = 0;
ring->dirty = 0;
ring->tx.queue_stopped = 0;
memset(&ring->tx, 0, sizeof(ring->tx));
hw_if->tx_desc_init(channel);
}
@ -422,8 +422,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
ring->cur = 0;
ring->dirty = 0;
ring->rx.realloc_index = 0;
ring->rx.realloc_threshold = 0;
memset(&ring->rx, 0, sizeof(ring->rx));
hw_if->rx_desc_init(channel);
}

View File

@ -1325,6 +1325,29 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
xgbe_config_flow_control(pdata);
}
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring_data *rdata;
/* Issue a poll command to Tx DMA by writing address
* of next immediate free descriptor */
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
lower_32_bits(rdata->rdesc_dma));
/* Start the Tx coalescing timer */
if (pdata->tx_usecs && !channel->tx_timer_active) {
channel->tx_timer_active = 1;
hrtimer_start(&channel->tx_timer,
ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
HRTIMER_MODE_REL);
}
ring->tx.xmit_more = 0;
}
static void xgbe_dev_xmit(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@ -1528,20 +1551,13 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
/* Make sure ownership is written to the descriptor */
wmb();
/* Issue a poll command to Tx DMA by writing address
* of next immediate free descriptor */
ring->cur++;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
lower_32_bits(rdata->rdesc_dma));
/* Start the Tx coalescing timer */
if (pdata->tx_usecs && !channel->tx_timer_active) {
channel->tx_timer_active = 1;
hrtimer_start(&channel->tx_timer,
ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
HRTIMER_MODE_REL);
}
if (!packet->skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xgbe_tx_start_xmit(channel, ring);
else
ring->tx.xmit_more = 1;
DBGPR(" %s: descriptors %u to %u written\n",
channel->name, start_index & (ring->rdesc_count - 1),
@ -2802,6 +2818,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_desc_reset = xgbe_rx_desc_reset;
hw_if->is_last_desc = xgbe_is_last_desc;
hw_if->is_context_desc = xgbe_is_context_desc;
hw_if->tx_start_xmit = xgbe_tx_start_xmit;
/* For FLOW ctrl */
hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;

View File

@ -225,6 +225,28 @@ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
return (ring->rdesc_count - (ring->cur - ring->dirty));
}
static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
struct xgbe_ring *ring, unsigned int count)
{
struct xgbe_prv_data *pdata = channel->pdata;
if (count > xgbe_tx_avail_desc(ring)) {
DBGPR(" Tx queue stopped, not enough descriptors available\n");
netif_stop_subqueue(pdata->netdev, channel->queue_index);
ring->tx.queue_stopped = 1;
/* If we haven't notified the hardware because of xmit_more
* support, tell it now
*/
if (ring->tx.xmit_more)
pdata->hw_if.tx_start_xmit(channel, ring);
return NETDEV_TX_BUSY;
}
return 0;
}
static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
{
unsigned int rx_buf_size;
@ -1199,6 +1221,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
unsigned int len;
unsigned int i;
packet->skb = skb;
context_desc = 0;
packet->rdesc_count = 0;
@ -1447,13 +1471,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
xgbe_packet_info(pdata, ring, skb, packet);
/* Check that there are enough descriptors available */
if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
DBGPR(" Tx queue stopped, not enough descriptors available\n");
netif_stop_subqueue(netdev, channel->queue_index);
ring->tx.queue_stopped = 1;
ret = NETDEV_TX_BUSY;
ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
if (ret)
goto tx_netdev_return;
}
ret = xgbe_prep_tso(skb, packet);
if (ret) {
@ -1480,6 +1500,11 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
xgbe_print_pkt(netdev, skb, true);
#endif
/* Stop the queue in advance if there may not be enough descriptors */
xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
ret = NETDEV_TX_OK;
tx_netdev_return:
spin_unlock_irqrestore(&ring->lock, flags);

View File

@ -140,6 +140,17 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contigous TSO/GSO packet */
#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for an SKB:
* - Maximum number of SKB frags
* - Maximum descriptors for contiguous TSO/GSO packet
* - Possible context descriptor
* - Possible TSO header descriptor
*/
#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256
@ -225,6 +236,8 @@
struct xgbe_prv_data;
struct xgbe_packet_data {
struct sk_buff *skb;
unsigned int attributes;
unsigned int errors;
@ -360,6 +373,7 @@ struct xgbe_ring {
union {
struct {
unsigned int queue_stopped;
unsigned int xmit_more;
unsigned short cur_mss;
unsigned short cur_vlan_ctag;
} tx;
@ -523,6 +537,7 @@ struct xgbe_hw_if {
void (*tx_desc_reset)(struct xgbe_ring_data *);
int (*is_last_desc)(struct xgbe_ring_desc *);
int (*is_context_desc)(struct xgbe_ring_desc *);
void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
/* For FLOW ctrl */
int (*config_tx_flow_control)(struct xgbe_prv_data *);