|
|
|
@ -63,8 +63,8 @@ static int phyaddr = -1;
|
|
|
|
|
module_param(phyaddr, int, 0444);
|
|
|
|
|
MODULE_PARM_DESC(phyaddr, "Physical device address");
|
|
|
|
|
|
|
|
|
|
#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
|
|
|
|
|
#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
|
|
|
|
|
#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
|
|
|
|
|
#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
|
|
|
|
|
|
|
|
|
|
static int flow_ctrl = FLOW_AUTO;
|
|
|
|
|
module_param(flow_ctrl, int, 0644);
|
|
|
|
@ -271,7 +271,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
|
|
|
|
|
if (tx_q->dirty_tx > tx_q->cur_tx)
|
|
|
|
|
avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
|
|
|
|
|
else
|
|
|
|
|
avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
|
|
|
|
|
avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
|
|
|
|
|
|
|
|
|
|
return avail;
|
|
|
|
|
}
|
|
|
|
@ -289,7 +289,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
|
|
|
|
|
if (rx_q->dirty_rx <= rx_q->cur_rx)
|
|
|
|
|
dirty = rx_q->cur_rx - rx_q->dirty_rx;
|
|
|
|
|
else
|
|
|
|
|
dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
|
|
|
|
|
dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
|
|
|
|
|
|
|
|
|
|
return dirty;
|
|
|
|
|
}
|
|
|
|
@ -1120,7 +1120,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
|
|
|
|
|
head_rx = (void *)rx_q->dma_rx;
|
|
|
|
|
|
|
|
|
|
/* Display RX ring */
|
|
|
|
|
stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
|
|
|
|
|
stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1143,7 +1143,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
|
|
|
|
|
else
|
|
|
|
|
head_tx = (void *)tx_q->dma_tx;
|
|
|
|
|
|
|
|
|
|
stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
|
|
|
|
|
stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1187,16 +1187,16 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Clear the RX descriptors */
|
|
|
|
|
for (i = 0; i < DMA_RX_SIZE; i++)
|
|
|
|
|
for (i = 0; i < priv->dma_rx_size; i++)
|
|
|
|
|
if (priv->extend_desc)
|
|
|
|
|
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
|
|
|
|
|
priv->use_riwt, priv->mode,
|
|
|
|
|
(i == DMA_RX_SIZE - 1),
|
|
|
|
|
(i == priv->dma_rx_size - 1),
|
|
|
|
|
priv->dma_buf_sz);
|
|
|
|
|
else
|
|
|
|
|
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
|
|
|
|
|
priv->use_riwt, priv->mode,
|
|
|
|
|
(i == DMA_RX_SIZE - 1),
|
|
|
|
|
(i == priv->dma_rx_size - 1),
|
|
|
|
|
priv->dma_buf_sz);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1213,8 +1213,8 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Clear the TX descriptors */
|
|
|
|
|
for (i = 0; i < DMA_TX_SIZE; i++) {
|
|
|
|
|
int last = (i == (DMA_TX_SIZE - 1));
|
|
|
|
|
for (i = 0; i < priv->dma_tx_size; i++) {
|
|
|
|
|
int last = (i == (priv->dma_tx_size - 1));
|
|
|
|
|
struct dma_desc *p;
|
|
|
|
|
|
|
|
|
|
if (priv->extend_desc)
|
|
|
|
@ -1368,7 +1368,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
|
|
|
|
|
|
|
|
|
|
stmmac_clear_rx_descriptors(priv, queue);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < DMA_RX_SIZE; i++) {
|
|
|
|
|
for (i = 0; i < priv->dma_rx_size; i++) {
|
|
|
|
|
struct dma_desc *p;
|
|
|
|
|
|
|
|
|
|
if (priv->extend_desc)
|
|
|
|
@ -1383,16 +1383,18 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rx_q->cur_rx = 0;
|
|
|
|
|
rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
|
|
|
|
|
rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
|
|
|
|
|
|
|
|
|
|
/* Setup the chained descriptor addresses */
|
|
|
|
|
if (priv->mode == STMMAC_CHAIN_MODE) {
|
|
|
|
|
if (priv->extend_desc)
|
|
|
|
|
stmmac_mode_init(priv, rx_q->dma_erx,
|
|
|
|
|
rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
|
|
|
|
|
rx_q->dma_rx_phy,
|
|
|
|
|
priv->dma_rx_size, 1);
|
|
|
|
|
else
|
|
|
|
|
stmmac_mode_init(priv, rx_q->dma_rx,
|
|
|
|
|
rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
|
|
|
|
|
rx_q->dma_rx_phy,
|
|
|
|
|
priv->dma_rx_size, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1406,7 +1408,7 @@ err_init_rx_buffers:
|
|
|
|
|
if (queue == 0)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
i = DMA_RX_SIZE;
|
|
|
|
|
i = priv->dma_rx_size;
|
|
|
|
|
queue--;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1438,13 +1440,15 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
|
|
|
|
|
if (priv->mode == STMMAC_CHAIN_MODE) {
|
|
|
|
|
if (priv->extend_desc)
|
|
|
|
|
stmmac_mode_init(priv, tx_q->dma_etx,
|
|
|
|
|
tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
|
|
|
|
|
tx_q->dma_tx_phy,
|
|
|
|
|
priv->dma_tx_size, 1);
|
|
|
|
|
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
|
|
|
|
|
stmmac_mode_init(priv, tx_q->dma_tx,
|
|
|
|
|
tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
|
|
|
|
|
tx_q->dma_tx_phy,
|
|
|
|
|
priv->dma_tx_size, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < DMA_TX_SIZE; i++) {
|
|
|
|
|
for (i = 0; i < priv->dma_tx_size; i++) {
|
|
|
|
|
struct dma_desc *p;
|
|
|
|
|
if (priv->extend_desc)
|
|
|
|
|
p = &((tx_q->dma_etx + i)->basic);
|
|
|
|
@ -1508,7 +1512,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < DMA_RX_SIZE; i++)
|
|
|
|
|
for (i = 0; i < priv->dma_rx_size; i++)
|
|
|
|
|
stmmac_free_rx_buffer(priv, queue, i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1521,7 +1525,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < DMA_TX_SIZE; i++)
|
|
|
|
|
for (i = 0; i < priv->dma_tx_size; i++)
|
|
|
|
|
stmmac_free_tx_buffer(priv, queue, i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1543,11 +1547,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|
|
|
|
|
|
|
|
|
/* Free DMA regions of consistent memory previously allocated */
|
|
|
|
|
if (!priv->extend_desc)
|
|
|
|
|
dma_free_coherent(priv->device,
|
|
|
|
|
DMA_RX_SIZE * sizeof(struct dma_desc),
|
|
|
|
|
dma_free_coherent(priv->device, priv->dma_rx_size *
|
|
|
|
|
sizeof(struct dma_desc),
|
|
|
|
|
rx_q->dma_rx, rx_q->dma_rx_phy);
|
|
|
|
|
else
|
|
|
|
|
dma_free_coherent(priv->device, DMA_RX_SIZE *
|
|
|
|
|
dma_free_coherent(priv->device, priv->dma_rx_size *
|
|
|
|
|
sizeof(struct dma_extended_desc),
|
|
|
|
|
rx_q->dma_erx, rx_q->dma_rx_phy);
|
|
|
|
|
|
|
|
|
@ -1586,7 +1590,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
|
|
|
|
|
addr = tx_q->dma_tx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size *= DMA_TX_SIZE;
|
|
|
|
|
size *= priv->dma_tx_size;
|
|
|
|
|
|
|
|
|
|
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
|
|
|
|
|
|
|
|
|
@ -1619,7 +1623,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|
|
|
|
rx_q->priv_data = priv;
|
|
|
|
|
|
|
|
|
|
pp_params.flags = PP_FLAG_DMA_MAP;
|
|
|
|
|
pp_params.pool_size = DMA_RX_SIZE;
|
|
|
|
|
pp_params.pool_size = priv->dma_rx_size;
|
|
|
|
|
num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
|
|
|
|
|
pp_params.order = ilog2(num_pages);
|
|
|
|
|
pp_params.nid = dev_to_node(priv->device);
|
|
|
|
@ -1633,14 +1637,16 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|
|
|
|
goto err_dma;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
|
|
|
|
|
rx_q->buf_pool = kcalloc(priv->dma_rx_size,
|
|
|
|
|
sizeof(*rx_q->buf_pool),
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!rx_q->buf_pool)
|
|
|
|
|
goto err_dma;
|
|
|
|
|
|
|
|
|
|
if (priv->extend_desc) {
|
|
|
|
|
rx_q->dma_erx = dma_alloc_coherent(priv->device,
|
|
|
|
|
DMA_RX_SIZE * sizeof(struct dma_extended_desc),
|
|
|
|
|
priv->dma_rx_size *
|
|
|
|
|
sizeof(struct dma_extended_desc),
|
|
|
|
|
&rx_q->dma_rx_phy,
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!rx_q->dma_erx)
|
|
|
|
@ -1648,7 +1654,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
rx_q->dma_rx = dma_alloc_coherent(priv->device,
|
|
|
|
|
DMA_RX_SIZE * sizeof(struct dma_desc),
|
|
|
|
|
priv->dma_rx_size *
|
|
|
|
|
sizeof(struct dma_desc),
|
|
|
|
|
&rx_q->dma_rx_phy,
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!rx_q->dma_rx)
|
|
|
|
@ -1687,13 +1694,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
|
|
|
|
|
tx_q->queue_index = queue;
|
|
|
|
|
tx_q->priv_data = priv;
|
|
|
|
|
|
|
|
|
|
tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
|
|
|
|
|
tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
|
|
|
|
|
sizeof(*tx_q->tx_skbuff_dma),
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!tx_q->tx_skbuff_dma)
|
|
|
|
|
goto err_dma;
|
|
|
|
|
|
|
|
|
|
tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
|
|
|
|
|
tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
|
|
|
|
|
sizeof(struct sk_buff *),
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!tx_q->tx_skbuff)
|
|
|
|
@ -1706,7 +1713,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
|
|
|
|
|
else
|
|
|
|
|
size = sizeof(struct dma_desc);
|
|
|
|
|
|
|
|
|
|
size *= DMA_TX_SIZE;
|
|
|
|
|
size *= priv->dma_tx_size;
|
|
|
|
|
|
|
|
|
|
addr = dma_alloc_coherent(priv->device, size,
|
|
|
|
|
&tx_q->dma_tx_phy, GFP_KERNEL);
|
|
|
|
@ -2016,7 +2023,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
|
|
|
|
|
|
|
|
|
|
stmmac_release_tx_desc(priv, p, priv->mode);
|
|
|
|
|
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
|
|
|
|
|
}
|
|
|
|
|
tx_q->dirty_tx = entry;
|
|
|
|
|
|
|
|
|
@ -2025,7 +2032,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
|
|
|
|
|
|
|
|
|
|
if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
|
|
|
|
|
queue))) &&
|
|
|
|
|
stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
|
|
|
|
|
stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
|
|
|
|
|
|
|
|
|
|
netif_dbg(priv, tx_done, priv->dev,
|
|
|
|
|
"%s: restart transmit\n", __func__);
|
|
|
|
@ -2298,7 +2305,8 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|
|
|
|
rx_q->dma_rx_phy, chan);
|
|
|
|
|
|
|
|
|
|
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
|
|
|
|
|
(DMA_RX_SIZE * sizeof(struct dma_desc));
|
|
|
|
|
(priv->dma_rx_size *
|
|
|
|
|
sizeof(struct dma_desc));
|
|
|
|
|
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
|
|
|
|
|
rx_q->rx_tail_addr, chan);
|
|
|
|
|
}
|
|
|
|
@ -2382,12 +2390,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
|
|
|
|
|
/* set TX ring length */
|
|
|
|
|
for (chan = 0; chan < tx_channels_count; chan++)
|
|
|
|
|
stmmac_set_tx_ring_len(priv, priv->ioaddr,
|
|
|
|
|
(DMA_TX_SIZE - 1), chan);
|
|
|
|
|
(priv->dma_tx_size - 1), chan);
|
|
|
|
|
|
|
|
|
|
/* set RX ring length */
|
|
|
|
|
for (chan = 0; chan < rx_channels_count; chan++)
|
|
|
|
|
stmmac_set_rx_ring_len(priv, priv->ioaddr,
|
|
|
|
|
(DMA_RX_SIZE - 1), chan);
|
|
|
|
|
(priv->dma_rx_size - 1), chan);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -2767,6 +2775,11 @@ static int stmmac_open(struct net_device *dev)
|
|
|
|
|
|
|
|
|
|
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
|
|
|
|
|
|
|
|
|
|
if (!priv->dma_tx_size)
|
|
|
|
|
priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
|
|
|
|
|
if (!priv->dma_rx_size)
|
|
|
|
|
priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
|
|
|
|
|
|
|
|
|
|
/* Earlier check for TBS */
|
|
|
|
|
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
|
|
|
|
|
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
|
|
|
|
@ -2936,7 +2949,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
stmmac_set_tx_owner(priv, p);
|
|
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
|
|
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2964,7 +2977,8 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
|
|
|
|
|
while (tmp_len > 0) {
|
|
|
|
|
dma_addr_t curr_addr;
|
|
|
|
|
|
|
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
|
|
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
|
|
|
|
|
priv->dma_tx_size);
|
|
|
|
|
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
|
|
|
|
|
|
|
|
|
|
if (tx_q->tbs & STMMAC_TBS_AVAIL)
|
|
|
|
@ -3071,7 +3085,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
|
|
|
|
stmmac_set_mss(priv, mss_desc, mss);
|
|
|
|
|
tx_q->mss = mss;
|
|
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
|
|
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
|
|
|
|
|
priv->dma_tx_size);
|
|
|
|
|
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -3178,7 +3193,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
* ndo_start_xmit will fill this descriptor the next time it's
|
|
|
|
|
* called and stmmac_tx_clean may clean up to this descriptor.
|
|
|
|
|
*/
|
|
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
|
|
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
|
|
|
|
|
|
|
|
|
|
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
|
|
|
|
|
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
|
|
|
|
@ -3341,7 +3356,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
int len = skb_frag_size(frag);
|
|
|
|
|
bool last_segment = (i == (nfrags - 1));
|
|
|
|
|
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
|
|
|
|
|
WARN_ON(tx_q->tx_skbuff[entry]);
|
|
|
|
|
|
|
|
|
|
if (likely(priv->extend_desc))
|
|
|
|
@ -3409,7 +3424,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
* ndo_start_xmit will fill this descriptor the next time it's
|
|
|
|
|
* called and stmmac_tx_clean may clean up to this descriptor.
|
|
|
|
|
*/
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
|
|
|
|
|
tx_q->cur_tx = entry;
|
|
|
|
|
|
|
|
|
|
if (netif_msg_pktdata(priv)) {
|
|
|
|
@ -3594,7 +3609,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
|
|
|
|
dma_wmb();
|
|
|
|
|
stmmac_set_rx_owner(priv, p, use_rx_wd);
|
|
|
|
|
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
|
|
|
|
|
}
|
|
|
|
|
rx_q->dirty_rx = entry;
|
|
|
|
|
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
|
|
|
|
@ -3677,7 +3692,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|
|
|
|
else
|
|
|
|
|
rx_head = (void *)rx_q->dma_rx;
|
|
|
|
|
|
|
|
|
|
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
|
|
|
|
|
stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
|
|
|
|
|
}
|
|
|
|
|
while (count < limit) {
|
|
|
|
|
unsigned int buf1_len = 0, buf2_len = 0;
|
|
|
|
@ -3719,7 +3734,8 @@ read_again:
|
|
|
|
|
if (unlikely(status & dma_own))
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
|
|
|
|
|
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
|
|
|
|
|
priv->dma_rx_size);
|
|
|
|
|
next_entry = rx_q->cur_rx;
|
|
|
|
|
|
|
|
|
|
if (priv->extend_desc)
|
|
|
|
@ -3894,7 +3910,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
|
|
|
|
|
|
|
|
|
|
priv->xstats.napi_poll++;
|
|
|
|
|
|
|
|
|
|
work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
|
|
|
|
|
work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
|
|
|
|
|
work_done = min(work_done, budget);
|
|
|
|
|
|
|
|
|
|
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
|
|
|
@ -4287,11 +4303,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
|
|
|
|
|
if (priv->extend_desc) {
|
|
|
|
|
seq_printf(seq, "Extended descriptor ring:\n");
|
|
|
|
|
sysfs_display_ring((void *)rx_q->dma_erx,
|
|
|
|
|
DMA_RX_SIZE, 1, seq);
|
|
|
|
|
priv->dma_rx_size, 1, seq);
|
|
|
|
|
} else {
|
|
|
|
|
seq_printf(seq, "Descriptor ring:\n");
|
|
|
|
|
sysfs_display_ring((void *)rx_q->dma_rx,
|
|
|
|
|
DMA_RX_SIZE, 0, seq);
|
|
|
|
|
priv->dma_rx_size, 0, seq);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -4303,11 +4319,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
|
|
|
|
|
if (priv->extend_desc) {
|
|
|
|
|
seq_printf(seq, "Extended descriptor ring:\n");
|
|
|
|
|
sysfs_display_ring((void *)tx_q->dma_etx,
|
|
|
|
|
DMA_TX_SIZE, 1, seq);
|
|
|
|
|
priv->dma_tx_size, 1, seq);
|
|
|
|
|
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
|
|
|
|
|
seq_printf(seq, "Descriptor ring:\n");
|
|
|
|
|
sysfs_display_ring((void *)tx_q->dma_tx,
|
|
|
|
|
DMA_TX_SIZE, 0, seq);
|
|
|
|
|
priv->dma_tx_size, 0, seq);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -4778,6 +4794,23 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
|
|
|
|
|
{
|
|
|
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
if (netif_running(dev))
|
|
|
|
|
stmmac_release(dev);
|
|
|
|
|
|
|
|
|
|
priv->dma_rx_size = rx_size;
|
|
|
|
|
priv->dma_tx_size = tx_size;
|
|
|
|
|
|
|
|
|
|
if (netif_running(dev))
|
|
|
|
|
ret = stmmac_open(dev);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* stmmac_dvr_probe
|
|
|
|
|
* @device: device pointer
|
|
|
|
|