forked from Minki/linux
0e80bdc9a7
This patch is to fill the first descriptor just before granting the DMA engine so at the end of the xmit. The patch takes care about the algorithm adopted to mitigate the interrupts, then it fixes the last segment in case of no fragments. Moreover, this new implementation does not pass any "ter" field when prepare the descriptors because this is not necessary. The patch also details the memory barrier in the xmit. As final results, this patch guarantees the same performances but fixing a case if small datagram are sent. In fact, this kind of test is impacted if no coalesce is done. Signed-off-by: Fabrice Gasnier <fabrice.gasnier@st.com> Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: Alexandre TORGUE <alexandre.torgue@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
155 lines
4.6 KiB
C
155 lines
4.6 KiB
C
/*******************************************************************************
|
|
Specialised functions for managing Ring mode
|
|
|
|
Copyright(C) 2011 STMicroelectronics Ltd
|
|
|
|
It defines all the functions used to handle the normal/enhanced
|
|
descriptors in case of the DMA is configured to work in chained or
|
|
in ring mode.
|
|
|
|
This program is free software; you can redistribute it and/or modify it
|
|
under the terms and conditions of the GNU General Public License,
|
|
version 2, as published by the Free Software Foundation.
|
|
|
|
This program is distributed in the hope it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
more details.
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
|
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
The full GNU General Public License is included in this distribution in
|
|
the file called "COPYING".
|
|
|
|
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
|
*******************************************************************************/
|
|
|
|
#include "stmmac.h"
|
|
|
|
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|
{
|
|
struct stmmac_priv *priv = (struct stmmac_priv *)p;
|
|
unsigned int entry = priv->cur_tx;
|
|
struct dma_desc *desc;
|
|
unsigned int nopaged_len = skb_headlen(skb);
|
|
unsigned int bmax, len;
|
|
|
|
if (priv->extend_desc)
|
|
desc = (struct dma_desc *)(priv->dma_etx + entry);
|
|
else
|
|
desc = priv->dma_tx + entry;
|
|
|
|
if (priv->plat->enh_desc)
|
|
bmax = BUF_SIZE_8KiB;
|
|
else
|
|
bmax = BUF_SIZE_2KiB;
|
|
|
|
len = nopaged_len - bmax;
|
|
|
|
if (nopaged_len > BUF_SIZE_8KiB) {
|
|
|
|
desc->des2 = dma_map_single(priv->device, skb->data,
|
|
bmax, DMA_TO_DEVICE);
|
|
if (dma_mapping_error(priv->device, desc->des2))
|
|
return -1;
|
|
|
|
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
|
priv->tx_skbuff_dma[entry].len = bmax;
|
|
priv->tx_skbuff_dma[entry].is_jumbo = true;
|
|
|
|
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
|
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
|
|
STMMAC_RING_MODE, 0, false);
|
|
priv->tx_skbuff[entry] = NULL;
|
|
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
|
|
|
|
if (priv->extend_desc)
|
|
desc = (struct dma_desc *)(priv->dma_etx + entry);
|
|
else
|
|
desc = priv->dma_tx + entry;
|
|
|
|
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
|
|
len, DMA_TO_DEVICE);
|
|
if (dma_mapping_error(priv->device, desc->des2))
|
|
return -1;
|
|
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
|
priv->tx_skbuff_dma[entry].len = len;
|
|
priv->tx_skbuff_dma[entry].is_jumbo = true;
|
|
|
|
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
|
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
|
|
STMMAC_RING_MODE, 1, true);
|
|
} else {
|
|
desc->des2 = dma_map_single(priv->device, skb->data,
|
|
nopaged_len, DMA_TO_DEVICE);
|
|
if (dma_mapping_error(priv->device, desc->des2))
|
|
return -1;
|
|
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
|
priv->tx_skbuff_dma[entry].len = nopaged_len;
|
|
priv->tx_skbuff_dma[entry].is_jumbo = true;
|
|
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
|
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
|
|
STMMAC_RING_MODE, 0, true);
|
|
}
|
|
|
|
priv->cur_tx = entry;
|
|
|
|
return entry;
|
|
}
|
|
|
|
static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
|
|
{
|
|
unsigned int ret = 0;
|
|
|
|
if (len >= BUF_SIZE_4KiB)
|
|
ret = 1;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
|
|
{
|
|
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
|
|
|
|
/* Fill DES3 in case of RING mode */
|
|
if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
|
|
p->des3 = p->des2 + BUF_SIZE_8KiB;
|
|
}
|
|
|
|
/* In ring mode we need to fill the desc3 because it is used as buffer */
|
|
static void stmmac_init_desc3(struct dma_desc *p)
|
|
{
|
|
p->des3 = p->des2 + BUF_SIZE_8KiB;
|
|
}
|
|
|
|
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
|
|
{
|
|
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
|
|
unsigned int entry = priv->dirty_tx;
|
|
|
|
/* des3 is only used for jumbo frames tx or time stamping */
|
|
if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
|
|
(priv->tx_skbuff_dma[entry].last_segment &&
|
|
!priv->extend_desc && priv->hwts_tx_en)))
|
|
p->des3 = 0;
|
|
}
|
|
|
|
static int stmmac_set_16kib_bfsize(int mtu)
|
|
{
|
|
int ret = 0;
|
|
if (unlikely(mtu >= BUF_SIZE_8KiB))
|
|
ret = BUF_SIZE_16KiB;
|
|
return ret;
|
|
}
|
|
|
|
const struct stmmac_mode_ops ring_mode_ops = {
|
|
.is_jumbo_frm = stmmac_is_jumbo_frm,
|
|
.jumbo_frm = stmmac_jumbo_frm,
|
|
.refill_desc3 = stmmac_refill_desc3,
|
|
.init_desc3 = stmmac_init_desc3,
|
|
.clean_desc3 = stmmac_clean_desc3,
|
|
.set_16kib_bfsize = stmmac_set_16kib_bfsize,
|
|
};
|