mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
via-velocity: Re-enable transmit scatter-gather support
The velocity hardware can handle up to 7 memory segments. This can be
turned on and off via ethtool. The support was removed in commit
83c98a8cd0
but is re-enabled and cleaned up here. It's off by default.
Signed-off-by: Simon Kagstrom <simon.kagstrom@netinsight.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
2a5774f7d8
commit
c79992fdde
@ -9,7 +9,6 @@
|
|||||||
*
|
*
|
||||||
* TODO
|
* TODO
|
||||||
* rx_copybreak/alignment
|
* rx_copybreak/alignment
|
||||||
* Scatter gather
|
|
||||||
* More testing
|
* More testing
|
||||||
*
|
*
|
||||||
* The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
|
* The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
|
||||||
@ -1643,12 +1642,10 @@ out:
|
|||||||
*/
|
*/
|
||||||
static int velocity_init_td_ring(struct velocity_info *vptr)
|
static int velocity_init_td_ring(struct velocity_info *vptr)
|
||||||
{
|
{
|
||||||
dma_addr_t curr;
|
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
/* Init the TD ring entries */
|
/* Init the TD ring entries */
|
||||||
for (j = 0; j < vptr->tx.numq; j++) {
|
for (j = 0; j < vptr->tx.numq; j++) {
|
||||||
curr = vptr->tx.pool_dma[j];
|
|
||||||
|
|
||||||
vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
|
vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
|
||||||
sizeof(struct velocity_td_info),
|
sizeof(struct velocity_td_info),
|
||||||
@ -1714,21 +1711,27 @@ err_free_dma_rings_0:
|
|||||||
* Release an transmit buffer. If the buffer was preallocated then
|
* Release an transmit buffer. If the buffer was preallocated then
|
||||||
* recycle it, if not then unmap the buffer.
|
* recycle it, if not then unmap the buffer.
|
||||||
*/
|
*/
|
||||||
static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
|
static void velocity_free_tx_buf(struct velocity_info *vptr,
|
||||||
|
struct velocity_td_info *tdinfo, struct tx_desc *td)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb = tdinfo->skb;
|
struct sk_buff *skb = tdinfo->skb;
|
||||||
int i;
|
|
||||||
int pktlen;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't unmap the pre-allocated tx_bufs
|
* Don't unmap the pre-allocated tx_bufs
|
||||||
*/
|
*/
|
||||||
if (tdinfo->skb_dma) {
|
if (tdinfo->skb_dma) {
|
||||||
|
int i;
|
||||||
|
|
||||||
pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
|
|
||||||
for (i = 0; i < tdinfo->nskb_dma; i++) {
|
for (i = 0; i < tdinfo->nskb_dma; i++) {
|
||||||
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
|
size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
|
||||||
tdinfo->skb_dma[i] = 0;
|
|
||||||
|
/* For scatter-gather */
|
||||||
|
if (skb_shinfo(skb)->nr_frags > 0)
|
||||||
|
pktlen = max_t(size_t, pktlen,
|
||||||
|
td->td_buf[i].size & ~TD_QUEUE);
|
||||||
|
|
||||||
|
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
|
||||||
|
le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dev_kfree_skb_irq(skb);
|
dev_kfree_skb_irq(skb);
|
||||||
@ -1930,7 +1933,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
|
|||||||
stats->tx_packets++;
|
stats->tx_packets++;
|
||||||
stats->tx_bytes += tdinfo->skb->len;
|
stats->tx_bytes += tdinfo->skb->len;
|
||||||
}
|
}
|
||||||
velocity_free_tx_buf(vptr, tdinfo);
|
velocity_free_tx_buf(vptr, tdinfo, td);
|
||||||
vptr->tx.used[qnum]--;
|
vptr->tx.used[qnum]--;
|
||||||
}
|
}
|
||||||
vptr->tx.tail[qnum] = idx;
|
vptr->tx.tail[qnum] = idx;
|
||||||
@ -2529,14 +2532,22 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
|
|||||||
struct velocity_td_info *tdinfo;
|
struct velocity_td_info *tdinfo;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int pktlen;
|
int pktlen;
|
||||||
__le16 len;
|
int index, prev;
|
||||||
int index;
|
int i = 0;
|
||||||
|
|
||||||
if (skb_padto(skb, ETH_ZLEN))
|
if (skb_padto(skb, ETH_ZLEN))
|
||||||
goto out;
|
goto out;
|
||||||
pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
|
|
||||||
|
|
||||||
len = cpu_to_le16(pktlen);
|
/* The hardware can handle at most 7 memory segments, so merge
|
||||||
|
* the skb if there are more */
|
||||||
|
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
|
||||||
|
kfree_skb(skb);
|
||||||
|
return NETDEV_TX_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
pktlen = skb_shinfo(skb)->nr_frags == 0 ?
|
||||||
|
max_t(unsigned int, skb->len, ETH_ZLEN) :
|
||||||
|
skb_headlen(skb);
|
||||||
|
|
||||||
spin_lock_irqsave(&vptr->lock, flags);
|
spin_lock_irqsave(&vptr->lock, flags);
|
||||||
|
|
||||||
@ -2553,11 +2564,24 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
|
|||||||
*/
|
*/
|
||||||
tdinfo->skb = skb;
|
tdinfo->skb = skb;
|
||||||
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
|
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
|
||||||
td_ptr->tdesc0.len = len;
|
td_ptr->tdesc0.len = cpu_to_le16(pktlen);
|
||||||
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
|
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
|
||||||
td_ptr->td_buf[0].pa_high = 0;
|
td_ptr->td_buf[0].pa_high = 0;
|
||||||
td_ptr->td_buf[0].size = len;
|
td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
|
||||||
tdinfo->nskb_dma = 1;
|
|
||||||
|
/* Handle fragments */
|
||||||
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||||
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||||
|
|
||||||
|
tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
|
||||||
|
frag->page_offset, frag->size,
|
||||||
|
PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
|
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
|
||||||
|
td_ptr->td_buf[i + 1].pa_high = 0;
|
||||||
|
td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
|
||||||
|
}
|
||||||
|
tdinfo->nskb_dma = i + 1;
|
||||||
|
|
||||||
td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
|
td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
|
||||||
|
|
||||||
@ -2578,10 +2602,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
|
|||||||
td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
|
td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
|
||||||
td_ptr->tdesc1.TCR |= TCR0_IPCK;
|
td_ptr->tdesc1.TCR |= TCR0_IPCK;
|
||||||
}
|
}
|
||||||
{
|
|
||||||
|
|
||||||
int prev = index - 1;
|
|
||||||
|
|
||||||
|
prev = index - 1;
|
||||||
if (prev < 0)
|
if (prev < 0)
|
||||||
prev = vptr->options.numtx - 1;
|
prev = vptr->options.numtx - 1;
|
||||||
td_ptr->tdesc0.len |= OWNED_BY_NIC;
|
td_ptr->tdesc0.len |= OWNED_BY_NIC;
|
||||||
@ -2594,7 +2616,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
|
|||||||
td_ptr = &(vptr->tx.rings[qnum][prev]);
|
td_ptr = &(vptr->tx.rings[qnum][prev]);
|
||||||
td_ptr->td_buf[0].size |= TD_QUEUE;
|
td_ptr->td_buf[0].size |= TD_QUEUE;
|
||||||
mac_tx_queue_wake(vptr->mac_regs, qnum);
|
mac_tx_queue_wake(vptr->mac_regs, qnum);
|
||||||
}
|
|
||||||
dev->trans_start = jiffies;
|
dev->trans_start = jiffies;
|
||||||
spin_unlock_irqrestore(&vptr->lock, flags);
|
spin_unlock_irqrestore(&vptr->lock, flags);
|
||||||
out:
|
out:
|
||||||
@ -3374,6 +3396,7 @@ static const struct ethtool_ops velocity_ethtool_ops = {
|
|||||||
.set_wol = velocity_ethtool_set_wol,
|
.set_wol = velocity_ethtool_set_wol,
|
||||||
.get_msglevel = velocity_get_msglevel,
|
.get_msglevel = velocity_get_msglevel,
|
||||||
.set_msglevel = velocity_set_msglevel,
|
.set_msglevel = velocity_set_msglevel,
|
||||||
|
.set_sg = ethtool_op_set_sg,
|
||||||
.get_link = velocity_get_link,
|
.get_link = velocity_get_link,
|
||||||
.get_coalesce = velocity_get_coalesce,
|
.get_coalesce = velocity_get_coalesce,
|
||||||
.set_coalesce = velocity_set_coalesce,
|
.set_coalesce = velocity_set_coalesce,
|
||||||
|
Loading…
Reference in New Issue
Block a user