ixgbe: Write gso_segs and bytcount to the ring sooner

This change makes it so that gso_segs and bytecount are written to the ring
sooner.  This helps to simplify the logic for the two since segmentation
offloads can now update them within their own function.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Alexander Duyck 2012-02-08 07:51:01 +00:00 committed by Jeff Kirsher
parent fd0db0ed02
commit 091a624686
2 changed files with 21 additions and 25 deletions

View File

@ -532,9 +532,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
*hdr_len = sizeof(struct fcoe_crc_eof); *hdr_len = sizeof(struct fcoe_crc_eof);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */ /* hdr_len includes fc_hdr if FCoE LSO is enabled */
if (skb_is_gso(skb)) if (skb_is_gso(skb)) {
*hdr_len += (skb_transport_offset(skb) + *hdr_len += skb_transport_offset(skb) +
sizeof(struct fc_frame_header)); sizeof(struct fc_frame_header);
/* update gso_segs and bytecount */
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len;
}
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;

View File

@ -763,6 +763,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
/* clear next_to_watch to prevent false hangs */ /* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL; tx_buffer->next_to_watch = NULL;
/* update the statistics for this packet */
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
/* free the skb */ /* free the skb */
dev_kfree_skb_any(tx_buffer->skb); dev_kfree_skb_any(tx_buffer->skb);
@ -771,13 +775,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
do { do {
ixgbe_unmap_tx_resource(tx_ring, tx_buffer); ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
if (likely(tx_desc == eop_desc)) { if (likely(tx_desc == eop_desc))
eop_desc = NULL; eop_desc = NULL;
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
}
tx_buffer++; tx_buffer++;
tx_desc++; tx_desc++;
i++; i++;
@ -6593,9 +6593,14 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
0, IPPROTO_TCP, 0); 0, IPPROTO_TCP, 0);
} }
/* compute header lengths */
l4len = tcp_hdrlen(skb); l4len = tcp_hdrlen(skb);
*hdr_len = skb_transport_offset(skb) + l4len; *hdr_len = skb_transport_offset(skb) + l4len;
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 1 as index for TSO */ /* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@ -6757,7 +6762,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
u32 offset = 0; u32 offset = 0;
u32 paylen = skb->len - hdr_len; u32 paylen = skb->len - hdr_len;
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
u16 gso_segs;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (tx_flags & IXGBE_TX_FLAGS_FCOE) { if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
@ -6843,22 +6847,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
if (tx_flags & IXGBE_TX_FLAGS_TSO) netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
gso_segs = skb_shinfo(skb)->gso_segs;
#ifdef IXGBE_FCOE
/* adjust for FCoE Sequence Offload */
else if (tx_flags & IXGBE_TX_FLAGS_FSO)
gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
skb_shinfo(skb)->gso_size);
#endif /* IXGBE_FCOE */
else
gso_segs = 1;
/* multiply data chunks by size of headers */
tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
tx_buffer_info->gso_segs = gso_segs;
netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer_info->bytecount);
/* set the timestamp */ /* set the timestamp */
first->time_stamp = jiffies; first->time_stamp = jiffies;
@ -7071,6 +7060,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
/* record the location of the first descriptor for this packet */ /* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->skb = skb; first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
/* if we have a HW VLAN tag being added default to the HW one */ /* if we have a HW VLAN tag being added default to the HW one */
if (vlan_tx_tag_present(skb)) { if (vlan_tx_tag_present(skb)) {