2006-11-09 13:51:17 +00:00
|
|
|
/*
|
2011-11-08 10:12:32 +00:00
|
|
|
* Cadence MACB/GEM Ethernet Controller driver
|
2006-11-09 13:51:17 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 2004-2006 Atmel Corporation
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
2011-03-08 20:27:08 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
2006-11-09 13:51:17 +00:00
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
2012-11-19 06:00:21 +00:00
|
|
|
#include <linux/circ_buf.h>
|
2006-11-09 13:51:17 +00:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
2013-12-11 00:07:21 +00:00
|
|
|
#include <linux/io.h>
|
2012-11-11 13:56:27 +00:00
|
|
|
#include <linux/gpio.h>
|
2011-06-06 10:43:46 +00:00
|
|
|
#include <linux/interrupt.h>
|
2006-11-09 13:51:17 +00:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2011-03-08 20:17:06 +00:00
|
|
|
#include <linux/platform_data/macb.h>
|
2006-11-09 13:51:17 +00:00
|
|
|
#include <linux/platform_device.h>
|
2007-07-12 17:07:24 +00:00
|
|
|
#include <linux/phy.h>
|
2011-12-20 21:13:07 +00:00
|
|
|
#include <linux/of.h>
|
2011-11-18 14:29:25 +00:00
|
|
|
#include <linux/of_device.h>
|
2013-08-22 15:57:28 +00:00
|
|
|
#include <linux/of_mdio.h>
|
2011-11-18 14:29:25 +00:00
|
|
|
#include <linux/of_net.h>
|
2012-10-31 06:04:59 +00:00
|
|
|
#include <linux/pinctrl/consumer.h>
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
#include "macb.h"
|
|
|
|
|
2013-06-04 21:57:11 +00:00
|
|
|
#define MACB_RX_BUFFER_SIZE 128
|
|
|
|
#define RX_BUFFER_MULTIPLE 64 /* bytes */
|
2012-10-31 06:04:55 +00:00
|
|
|
#define RX_RING_SIZE 512 /* must be power of 2 */
|
|
|
|
#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
#define TX_RING_SIZE 128 /* must be power of 2 */
|
|
|
|
#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-11-19 06:00:21 +00:00
|
|
|
/* level of occupied TX descriptors under which we wake up TX process */
|
|
|
|
#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
|
|
|
|
| MACB_BIT(ISR_ROVR))
|
2012-10-31 06:04:57 +00:00
|
|
|
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
|
|
|
|
| MACB_BIT(ISR_RLE) \
|
|
|
|
| MACB_BIT(TXERR))
|
|
|
|
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
|
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
|
|
|
|
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
|
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
/*
|
|
|
|
* Graceful stop timeouts in us. We should allow up to
|
|
|
|
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
|
|
|
|
*/
|
|
|
|
#define MACB_HALT_TIMEOUT 1230
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
/* Ring buffer accessors */
|
|
|
|
static unsigned int macb_tx_ring_wrap(unsigned int index)
|
|
|
|
{
|
|
|
|
return index & (TX_RING_SIZE - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
|
|
|
|
{
|
|
|
|
return &bp->tx_ring[macb_tx_ring_wrap(index)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
|
|
|
|
{
|
|
|
|
return &bp->tx_skb[macb_tx_ring_wrap(index)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
|
|
|
|
{
|
|
|
|
dma_addr_t offset;
|
|
|
|
|
|
|
|
offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
|
|
|
|
|
|
|
|
return bp->tx_ring_dma + offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int macb_rx_ring_wrap(unsigned int index)
|
|
|
|
{
|
|
|
|
return index & (RX_RING_SIZE - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
|
|
|
|
{
|
|
|
|
return &bp->rx_ring[macb_rx_ring_wrap(index)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *macb_rx_buffer(struct macb *bp, unsigned int index)
|
|
|
|
{
|
2013-06-04 21:57:11 +00:00
|
|
|
return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
|
2012-10-31 06:04:55 +00:00
|
|
|
}
|
|
|
|
|
2012-11-07 08:14:52 +00:00
|
|
|
void macb_set_hwaddr(struct macb *bp)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
|
|
|
u32 bottom;
|
|
|
|
u16 top;
|
|
|
|
|
|
|
|
bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, SA1B, bottom);
|
2006-11-09 13:51:17 +00:00
|
|
|
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, SA1T, top);
|
2012-11-11 13:56:28 +00:00
|
|
|
|
|
|
|
/* Clear unused address register sets */
|
|
|
|
macb_or_gem_writel(bp, SA2B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA2T, 0);
|
|
|
|
macb_or_gem_writel(bp, SA3B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA3T, 0);
|
|
|
|
macb_or_gem_writel(bp, SA4B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA4T, 0);
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
2012-11-07 08:14:52 +00:00
|
|
|
EXPORT_SYMBOL_GPL(macb_set_hwaddr);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-11-07 08:14:52 +00:00
|
|
|
void macb_get_hwaddr(struct macb *bp)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
2012-11-07 08:14:51 +00:00
|
|
|
struct macb_platform_data *pdata;
|
2006-11-09 13:51:17 +00:00
|
|
|
u32 bottom;
|
|
|
|
u16 top;
|
|
|
|
u8 addr[6];
|
2012-11-07 08:14:50 +00:00
|
|
|
int i;
|
|
|
|
|
2013-08-30 05:12:21 +00:00
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
2012-11-07 08:14:51 +00:00
|
|
|
|
2012-11-07 08:14:50 +00:00
|
|
|
/* Check all 4 address register for vaild address */
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
|
|
|
|
top = macb_or_gem_readl(bp, SA1T + i * 8);
|
|
|
|
|
2012-11-07 08:14:51 +00:00
|
|
|
if (pdata && pdata->rev_eth_addr) {
|
|
|
|
addr[5] = bottom & 0xff;
|
|
|
|
addr[4] = (bottom >> 8) & 0xff;
|
|
|
|
addr[3] = (bottom >> 16) & 0xff;
|
|
|
|
addr[2] = (bottom >> 24) & 0xff;
|
|
|
|
addr[1] = top & 0xff;
|
|
|
|
addr[0] = (top & 0xff00) >> 8;
|
|
|
|
} else {
|
|
|
|
addr[0] = bottom & 0xff;
|
|
|
|
addr[1] = (bottom >> 8) & 0xff;
|
|
|
|
addr[2] = (bottom >> 16) & 0xff;
|
|
|
|
addr[3] = (bottom >> 24) & 0xff;
|
|
|
|
addr[4] = top & 0xff;
|
|
|
|
addr[5] = (top >> 8) & 0xff;
|
|
|
|
}
|
2012-11-07 08:14:50 +00:00
|
|
|
|
|
|
|
if (is_valid_ether_addr(addr)) {
|
|
|
|
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
|
|
|
|
return;
|
|
|
|
}
|
2008-06-09 23:33:57 +00:00
|
|
|
}
|
2012-11-07 08:14:50 +00:00
|
|
|
|
|
|
|
netdev_info(bp->dev, "invalid hw address, using random\n");
|
|
|
|
eth_hw_addr_random(bp->dev);
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
2012-11-07 08:14:52 +00:00
|
|
|
EXPORT_SYMBOL_GPL(macb_get_hwaddr);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
2007-07-12 17:07:24 +00:00
|
|
|
struct macb *bp = bus->priv;
|
2006-11-09 13:51:17 +00:00
|
|
|
int value;
|
|
|
|
|
|
|
|
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
|
|
|
|
| MACB_BF(RW, MACB_MAN_READ)
|
2007-07-12 17:07:24 +00:00
|
|
|
| MACB_BF(PHYA, mii_id)
|
|
|
|
| MACB_BF(REGA, regnum)
|
2006-11-09 13:51:17 +00:00
|
|
|
| MACB_BF(CODE, MACB_MAN_CODE)));
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
/* wait for end of transfer */
|
|
|
|
while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
|
|
|
|
cpu_relax();
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
|
|
|
|
u16 value)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
2007-07-12 17:07:24 +00:00
|
|
|
struct macb *bp = bus->priv;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
|
|
|
|
| MACB_BF(RW, MACB_MAN_WRITE)
|
2007-07-12 17:07:24 +00:00
|
|
|
| MACB_BF(PHYA, mii_id)
|
|
|
|
| MACB_BF(REGA, regnum)
|
2006-11-09 13:51:17 +00:00
|
|
|
| MACB_BF(CODE, MACB_MAN_CODE)
|
2007-07-12 17:07:24 +00:00
|
|
|
| MACB_BF(DATA, value)));
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
/* wait for end of transfer */
|
|
|
|
while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2013-12-11 00:07:23 +00:00
|
|
|
/**
|
|
|
|
* macb_set_tx_clk() - Set a clock to a new frequency
|
|
|
|
* @clk Pointer to the clock to change
|
|
|
|
* @rate New frequency in Hz
|
|
|
|
* @dev Pointer to the struct net_device
|
|
|
|
*/
|
|
|
|
static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
|
|
|
|
{
|
|
|
|
long ferr, rate, rate_rounded;
|
|
|
|
|
|
|
|
switch (speed) {
|
|
|
|
case SPEED_10:
|
|
|
|
rate = 2500000;
|
|
|
|
break;
|
|
|
|
case SPEED_100:
|
|
|
|
rate = 25000000;
|
|
|
|
break;
|
|
|
|
case SPEED_1000:
|
|
|
|
rate = 125000000;
|
|
|
|
break;
|
|
|
|
default:
|
2013-12-11 04:57:57 +00:00
|
|
|
return;
|
2013-12-11 00:07:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rate_rounded = clk_round_rate(clk, rate);
|
|
|
|
if (rate_rounded < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* RGMII allows 50 ppm frequency error. Test and warn if this limit
|
|
|
|
* is not satisfied.
|
|
|
|
*/
|
|
|
|
ferr = abs(rate_rounded - rate);
|
|
|
|
ferr = DIV_ROUND_UP(ferr, rate / 100000);
|
|
|
|
if (ferr > 5)
|
|
|
|
netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
|
|
|
|
rate);
|
|
|
|
|
|
|
|
if (clk_set_rate(clk, rate_rounded))
|
|
|
|
netdev_err(dev, "adjusting tx_clk failed.\n");
|
|
|
|
}
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
static void macb_handle_link_change(struct net_device *dev)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
2007-07-12 17:07:24 +00:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
struct phy_device *phydev = bp->phy_dev;
|
|
|
|
unsigned long flags;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
int status_change = 0;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
|
|
|
|
if (phydev->link) {
|
|
|
|
if ((bp->speed != phydev->speed) ||
|
|
|
|
(bp->duplex != phydev->duplex)) {
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = macb_readl(bp, NCFGR);
|
|
|
|
reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
|
2012-10-31 06:04:50 +00:00
|
|
|
if (macb_is_gem(bp))
|
|
|
|
reg &= ~GEM_BIT(GBE);
|
2007-07-12 17:07:24 +00:00
|
|
|
|
|
|
|
if (phydev->duplex)
|
|
|
|
reg |= MACB_BIT(FD);
|
2008-02-21 13:50:54 +00:00
|
|
|
if (phydev->speed == SPEED_100)
|
2007-07-12 17:07:24 +00:00
|
|
|
reg |= MACB_BIT(SPD);
|
2014-07-24 11:50:58 +00:00
|
|
|
if (phydev->speed == SPEED_1000 &&
|
|
|
|
bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
|
2012-10-31 06:04:50 +00:00
|
|
|
reg |= GEM_BIT(GBE);
|
2007-07-12 17:07:24 +00:00
|
|
|
|
2012-10-31 06:04:50 +00:00
|
|
|
macb_or_gem_writel(bp, NCFGR, reg);
|
2007-07-12 17:07:24 +00:00
|
|
|
|
|
|
|
bp->speed = phydev->speed;
|
|
|
|
bp->duplex = phydev->duplex;
|
|
|
|
status_change = 1;
|
|
|
|
}
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
if (phydev->link != bp->link) {
|
2008-07-22 22:41:24 +00:00
|
|
|
if (!phydev->link) {
|
2007-07-12 17:07:24 +00:00
|
|
|
bp->speed = 0;
|
|
|
|
bp->duplex = -1;
|
|
|
|
}
|
|
|
|
bp->link = phydev->link;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
status_change = 1;
|
|
|
|
}
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
|
|
2013-12-11 00:07:23 +00:00
|
|
|
if (!IS_ERR(bp->tx_clk))
|
|
|
|
macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
if (status_change) {
|
2012-07-03 23:14:13 +00:00
|
|
|
if (phydev->link) {
|
|
|
|
netif_carrier_on(dev);
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_info(dev, "link up (%d/%s)\n",
|
|
|
|
phydev->speed,
|
|
|
|
phydev->duplex == DUPLEX_FULL ?
|
|
|
|
"Full" : "Half");
|
2012-07-03 23:14:13 +00:00
|
|
|
} else {
|
|
|
|
netif_carrier_off(dev);
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_info(dev, "link down\n");
|
2012-07-03 23:14:13 +00:00
|
|
|
}
|
2007-07-12 17:07:24 +00:00
|
|
|
}
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
/* based on au1000_eth. c*/
|
|
|
|
static int macb_mii_probe(struct net_device *dev)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
2007-07-12 17:07:24 +00:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
2012-11-11 13:56:27 +00:00
|
|
|
struct macb_platform_data *pdata;
|
2010-02-08 05:12:08 +00:00
|
|
|
struct phy_device *phydev;
|
2012-11-11 13:56:27 +00:00
|
|
|
int phy_irq;
|
2010-02-08 05:12:08 +00:00
|
|
|
int ret;
|
2007-07-12 17:07:24 +00:00
|
|
|
|
2010-02-08 05:12:08 +00:00
|
|
|
phydev = phy_find_first(bp->mii_bus);
|
2007-07-12 17:07:24 +00:00
|
|
|
if (!phydev) {
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_err(dev, "no PHY found\n");
|
2013-08-27 12:36:14 +00:00
|
|
|
return -ENXIO;
|
2007-07-12 17:07:24 +00:00
|
|
|
}
|
|
|
|
|
2012-11-11 13:56:27 +00:00
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
|
|
|
if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
|
|
|
|
ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
|
|
|
|
if (!ret) {
|
|
|
|
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
|
|
|
|
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
|
|
|
|
}
|
|
|
|
}
|
2007-07-12 17:07:24 +00:00
|
|
|
|
|
|
|
/* attach the mac to the phy */
|
2013-01-14 00:52:52 +00:00
|
|
|
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
|
2011-11-18 14:29:25 +00:00
|
|
|
bp->phy_interface);
|
2010-02-08 05:12:08 +00:00
|
|
|
if (ret) {
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_err(dev, "Could not attach to PHY\n");
|
2010-02-08 05:12:08 +00:00
|
|
|
return ret;
|
2007-07-12 17:07:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* mask with MAC supported features */
|
2014-07-24 11:50:58 +00:00
|
|
|
if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
|
2012-10-31 06:04:50 +00:00
|
|
|
phydev->supported &= PHY_GBIT_FEATURES;
|
|
|
|
else
|
|
|
|
phydev->supported &= PHY_BASIC_FEATURES;
|
2007-07-12 17:07:24 +00:00
|
|
|
|
|
|
|
phydev->advertising = phydev->supported;
|
|
|
|
|
|
|
|
bp->link = 0;
|
|
|
|
bp->speed = 0;
|
|
|
|
bp->duplex = -1;
|
|
|
|
bp->phy_dev = phydev;
|
|
|
|
|
|
|
|
return 0;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2012-10-18 11:01:12 +00:00
|
|
|
int macb_mii_init(struct macb *bp)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
2011-03-08 20:17:06 +00:00
|
|
|
struct macb_platform_data *pdata;
|
2013-08-22 15:57:28 +00:00
|
|
|
struct device_node *np;
|
2007-07-12 17:07:24 +00:00
|
|
|
int err = -ENXIO, i;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2009-07-23 06:31:31 +00:00
|
|
|
/* Enable management port */
|
2007-07-12 17:07:24 +00:00
|
|
|
macb_writel(bp, NCR, MACB_BIT(MPE));
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2008-10-08 23:29:57 +00:00
|
|
|
bp->mii_bus = mdiobus_alloc();
|
|
|
|
if (bp->mii_bus == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
bp->mii_bus->name = "MACB_mii_bus";
|
|
|
|
bp->mii_bus->read = &macb_mdio_read;
|
|
|
|
bp->mii_bus->write = &macb_mdio_write;
|
2012-01-09 23:59:11 +00:00
|
|
|
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
|
|
|
bp->pdev->name, bp->pdev->id);
|
2008-10-08 23:29:57 +00:00
|
|
|
bp->mii_bus->priv = bp;
|
|
|
|
bp->mii_bus->parent = &bp->dev->dev;
|
2013-08-30 05:12:21 +00:00
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2008-10-08 23:29:57 +00:00
|
|
|
bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
|
|
|
|
if (!bp->mii_bus->irq) {
|
2007-07-12 17:07:24 +00:00
|
|
|
err = -ENOMEM;
|
2008-10-08 23:29:57 +00:00
|
|
|
goto err_out_free_mdiobus;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2011-02-28 04:05:25 +00:00
|
|
|
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2013-08-22 15:57:28 +00:00
|
|
|
np = bp->pdev->dev.of_node;
|
|
|
|
if (np) {
|
|
|
|
/* try dt phy registration */
|
|
|
|
err = of_mdiobus_register(bp->mii_bus, np);
|
|
|
|
|
|
|
|
/* fallback to standard phy registration if no phy were
|
|
|
|
found during dt phy registration */
|
|
|
|
if (!err && !phy_find_first(bp->mii_bus)) {
|
|
|
|
for (i = 0; i < PHY_MAX_ADDR; i++) {
|
|
|
|
struct phy_device *phydev;
|
|
|
|
|
|
|
|
phydev = mdiobus_scan(bp->mii_bus, i);
|
|
|
|
if (IS_ERR(phydev)) {
|
|
|
|
err = PTR_ERR(phydev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto err_out_unregister_bus;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < PHY_MAX_ADDR; i++)
|
|
|
|
bp->mii_bus->irq[i] = PHY_POLL;
|
|
|
|
|
|
|
|
if (pdata)
|
|
|
|
bp->mii_bus->phy_mask = pdata->phy_mask;
|
|
|
|
|
|
|
|
err = mdiobus_register(bp->mii_bus);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
2007-07-12 17:07:24 +00:00
|
|
|
goto err_out_free_mdio_irq;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2013-08-27 12:36:14 +00:00
|
|
|
err = macb_mii_probe(bp->dev);
|
|
|
|
if (err)
|
2007-07-12 17:07:24 +00:00
|
|
|
goto err_out_unregister_bus;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
return 0;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
err_out_unregister_bus:
|
2008-10-08 23:29:57 +00:00
|
|
|
mdiobus_unregister(bp->mii_bus);
|
2007-07-12 17:07:24 +00:00
|
|
|
err_out_free_mdio_irq:
|
2008-10-08 23:29:57 +00:00
|
|
|
kfree(bp->mii_bus->irq);
|
|
|
|
err_out_free_mdiobus:
|
|
|
|
mdiobus_free(bp->mii_bus);
|
2007-07-12 17:07:24 +00:00
|
|
|
err_out:
|
|
|
|
return err;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
2012-10-18 11:01:12 +00:00
|
|
|
EXPORT_SYMBOL_GPL(macb_mii_init);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
static void macb_update_stats(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 __iomem *reg = bp->regs + MACB_PFR;
|
2011-03-09 16:26:35 +00:00
|
|
|
u32 *p = &bp->hw_stats.macb.rx_pause_frames;
|
|
|
|
u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
|
|
|
|
|
|
|
|
for(; p < end; p++, reg++)
|
2006-12-08 13:38:30 +00:00
|
|
|
*p += __raw_readl(reg);
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
static int macb_halt_tx(struct macb *bp)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
2012-10-31 06:04:57 +00:00
|
|
|
unsigned long halt_time, timeout;
|
|
|
|
u32 status;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
|
|
|
|
do {
|
|
|
|
halt_time = jiffies;
|
|
|
|
status = macb_readl(bp, TSR);
|
|
|
|
if (!(status & MACB_BIT(TGO)))
|
|
|
|
return 0;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
usleep_range(10, 250);
|
|
|
|
} while (time_before(halt_time, timeout));
|
2007-12-19 17:23:44 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2009-01-19 05:57:35 +00:00
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
|
|
|
|
{
|
|
|
|
if (tx_skb->mapping) {
|
|
|
|
if (tx_skb->mapped_as_page)
|
|
|
|
dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
|
|
|
|
tx_skb->size, DMA_TO_DEVICE);
|
|
|
|
else
|
|
|
|
dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
|
|
|
|
tx_skb->size, DMA_TO_DEVICE);
|
|
|
|
tx_skb->mapping = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx_skb->skb) {
|
|
|
|
dev_kfree_skb_any(tx_skb->skb);
|
|
|
|
tx_skb->skb = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
static void macb_tx_error_task(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct macb *bp = container_of(work, struct macb, tx_error_task);
|
|
|
|
struct macb_tx_skb *tx_skb;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned int tail;
|
2007-12-19 17:23:44 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
|
|
|
|
bp->tx_tail, bp->tx_head);
|
2007-12-19 17:23:44 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
/* Make sure nobody is trying to queue up new packets */
|
|
|
|
netif_stop_queue(bp->dev);
|
2011-08-03 22:11:47 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
/*
|
|
|
|
* Stop transmission now
|
|
|
|
* (in case we have just queued new packets)
|
|
|
|
*/
|
|
|
|
if (macb_halt_tx(bp))
|
|
|
|
/* Just complain for now, reinitializing TX path can be good */
|
|
|
|
netdev_err(bp->dev, "BUG: halt tx timed out\n");
|
2007-12-19 17:23:44 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
/* No need for the lock here as nobody will interrupt us anymore */
|
2007-12-19 17:23:44 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
/*
|
|
|
|
* Treat frames in TX queue including the ones that caused the error.
|
|
|
|
* Free transmit buffers in upper layer.
|
|
|
|
*/
|
|
|
|
for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
u32 ctrl;
|
2012-10-31 06:04:55 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
desc = macb_tx_desc(bp, tail);
|
|
|
|
ctrl = desc->ctrl;
|
|
|
|
tx_skb = macb_tx_skb(bp, tail);
|
|
|
|
skb = tx_skb->skb;
|
2007-12-19 17:23:44 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
if (ctrl & MACB_BIT(TX_USED)) {
|
2014-07-24 11:50:59 +00:00
|
|
|
/* skb is set for the last buffer of the frame */
|
|
|
|
while (!skb) {
|
|
|
|
macb_tx_unmap(bp, tx_skb);
|
|
|
|
tail++;
|
|
|
|
tx_skb = macb_tx_skb(bp, tail);
|
|
|
|
skb = tx_skb->skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ctrl still refers to the first buffer descriptor
|
|
|
|
* since it's the only one written back by the hardware
|
|
|
|
*/
|
|
|
|
if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
|
|
|
|
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
|
|
|
|
macb_tx_ring_wrap(tail), skb->data);
|
|
|
|
bp->stats.tx_packets++;
|
|
|
|
bp->stats.tx_bytes += skb->len;
|
|
|
|
}
|
2012-10-31 06:04:57 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* "Buffers exhausted mid-frame" errors may only happen
|
|
|
|
* if the driver is buggy, so complain loudly about those.
|
|
|
|
* Statistics are updated by hardware.
|
|
|
|
*/
|
|
|
|
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"BUG: TX buffers exhausted mid-frame\n");
|
2009-01-19 05:57:35 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
desc->ctrl = ctrl | MACB_BIT(TX_USED);
|
|
|
|
}
|
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
macb_tx_unmap(bp, tx_skb);
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
/* Reinitialize the TX desc queue */
|
|
|
|
macb_writel(bp, TBQP, bp->tx_ring_dma);
|
|
|
|
/* Make TX ring reflect state of hardware */
|
|
|
|
bp->tx_head = bp->tx_tail = 0;
|
|
|
|
|
|
|
|
/* Now we are ready to start transmission again */
|
|
|
|
netif_wake_queue(bp->dev);
|
|
|
|
|
|
|
|
/* Housework before enabling TX IRQ */
|
|
|
|
macb_writel(bp, TSR, macb_readl(bp, TSR));
|
|
|
|
macb_writel(bp, IER, MACB_TX_INT_FLAGS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_tx_interrupt(struct macb *bp)
|
|
|
|
{
|
|
|
|
unsigned int tail;
|
|
|
|
unsigned int head;
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = macb_readl(bp, TSR);
|
|
|
|
macb_writel(bp, TSR, status);
|
|
|
|
|
2013-05-14 03:00:16 +00:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
macb_writel(bp, ISR, MACB_BIT(TCOMP));
|
2013-03-27 23:07:05 +00:00
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
|
|
|
|
(unsigned long)status);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
head = bp->tx_head;
|
2012-10-31 06:04:55 +00:00
|
|
|
for (tail = bp->tx_tail; tail != head; tail++) {
|
|
|
|
struct macb_tx_skb *tx_skb;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
u32 ctrl;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
desc = macb_tx_desc(bp, tail);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:51 +00:00
|
|
|
/* Make hw descriptor updates visible to CPU */
|
2006-11-09 13:51:17 +00:00
|
|
|
rmb();
|
2012-10-31 06:04:51 +00:00
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
ctrl = desc->ctrl;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
/* TX_USED bit is only set by hardware on the very first buffer
|
|
|
|
* descriptor of the transmitted frame.
|
|
|
|
*/
|
2012-10-31 06:04:55 +00:00
|
|
|
if (!(ctrl & MACB_BIT(TX_USED)))
|
2006-11-09 13:51:17 +00:00
|
|
|
break;
|
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
/* Process all buffers of the current transmitted frame */
|
|
|
|
for (;; tail++) {
|
|
|
|
tx_skb = macb_tx_skb(bp, tail);
|
|
|
|
skb = tx_skb->skb;
|
|
|
|
|
|
|
|
/* First, update TX stats if needed */
|
|
|
|
if (skb) {
|
|
|
|
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
|
|
|
|
macb_tx_ring_wrap(tail), skb->data);
|
|
|
|
bp->stats.tx_packets++;
|
|
|
|
bp->stats.tx_bytes += skb->len;
|
|
|
|
}
|
2012-10-31 06:04:55 +00:00
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
/* Now we can safely release resources */
|
|
|
|
macb_tx_unmap(bp, tx_skb);
|
|
|
|
|
|
|
|
/* skb is set only for the last buffer of the frame.
|
|
|
|
* WARNING: at this point skb has been freed by
|
|
|
|
* macb_tx_unmap().
|
|
|
|
*/
|
|
|
|
if (skb)
|
|
|
|
break;
|
|
|
|
}
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bp->tx_tail = tail;
|
2012-10-31 06:04:55 +00:00
|
|
|
if (netif_queue_stopped(bp->dev)
|
2012-11-19 06:00:21 +00:00
|
|
|
&& CIRC_CNT(bp->tx_head, bp->tx_tail,
|
|
|
|
TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
|
2006-11-09 13:51:17 +00:00
|
|
|
netif_wake_queue(bp->dev);
|
|
|
|
}
|
|
|
|
|
2013-06-04 21:57:12 +00:00
|
|
|
static void gem_rx_refill(struct macb *bp)
|
|
|
|
{
|
|
|
|
unsigned int entry;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
dma_addr_t paddr;
|
|
|
|
|
|
|
|
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
|
|
|
|
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
|
|
|
|
|
|
|
|
/* Make hw descriptor updates visible to CPU */
|
|
|
|
rmb();
|
|
|
|
|
|
|
|
bp->rx_prepared_head++;
|
|
|
|
|
|
|
|
if (bp->rx_skbuff[entry] == NULL) {
|
|
|
|
/* allocate sk_buff for this free entry in ring */
|
|
|
|
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
|
|
|
|
if (unlikely(skb == NULL)) {
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"Unable to allocate sk_buff\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now fill corresponding descriptor entry */
|
|
|
|
paddr = dma_map_single(&bp->pdev->dev, skb->data,
|
|
|
|
bp->rx_buffer_size, DMA_FROM_DEVICE);
|
2014-03-04 16:46:39 +00:00
|
|
|
if (dma_mapping_error(&bp->pdev->dev, paddr)) {
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bp->rx_skbuff[entry] = skb;
|
2013-06-04 21:57:12 +00:00
|
|
|
|
|
|
|
if (entry == RX_RING_SIZE - 1)
|
|
|
|
paddr |= MACB_BIT(RX_WRAP);
|
|
|
|
bp->rx_ring[entry].addr = paddr;
|
|
|
|
bp->rx_ring[entry].ctrl = 0;
|
|
|
|
|
|
|
|
/* properly align Ethernet header */
|
|
|
|
skb_reserve(skb, NET_IP_ALIGN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
|
|
|
|
bp->rx_prepared_head, bp->rx_tail);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark DMA descriptors from begin up to and not including end as unused */
|
|
|
|
static void discard_partial_frame(struct macb *bp, unsigned int begin,
|
|
|
|
unsigned int end)
|
|
|
|
{
|
|
|
|
unsigned int frag;
|
|
|
|
|
|
|
|
for (frag = begin; frag != end; frag++) {
|
|
|
|
struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
|
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When this happens, the hardware stats registers for
|
|
|
|
* whatever caused this is updated, so we don't have to record
|
|
|
|
* anything.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_rx(struct macb *bp, int budget)
|
|
|
|
{
|
|
|
|
unsigned int len;
|
|
|
|
unsigned int entry;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
while (count < budget) {
|
|
|
|
u32 addr, ctrl;
|
|
|
|
|
|
|
|
entry = macb_rx_ring_wrap(bp->rx_tail);
|
|
|
|
desc = &bp->rx_ring[entry];
|
|
|
|
|
|
|
|
/* Make hw descriptor updates visible to CPU */
|
|
|
|
rmb();
|
|
|
|
|
|
|
|
addr = desc->addr;
|
|
|
|
ctrl = desc->ctrl;
|
|
|
|
|
|
|
|
if (!(addr & MACB_BIT(RX_USED)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
bp->rx_tail++;
|
|
|
|
count++;
|
|
|
|
|
|
|
|
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"not whole frame pointed by descriptor\n");
|
|
|
|
bp->stats.rx_dropped++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
skb = bp->rx_skbuff[entry];
|
|
|
|
if (unlikely(!skb)) {
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"inconsistent Rx descriptor chain\n");
|
|
|
|
bp->stats.rx_dropped++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* now everything is ready for receiving packet */
|
|
|
|
bp->rx_skbuff[entry] = NULL;
|
|
|
|
len = MACB_BFEXT(RX_FRMLEN, ctrl);
|
|
|
|
|
|
|
|
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
|
|
|
|
|
|
|
|
skb_put(skb, len);
|
|
|
|
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
|
|
|
|
dma_unmap_single(&bp->pdev->dev, addr,
|
2014-03-04 16:46:40 +00:00
|
|
|
bp->rx_buffer_size, DMA_FROM_DEVICE);
|
2013-06-04 21:57:12 +00:00
|
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
|
|
|
skb_checksum_none_assert(skb);
|
2014-07-24 11:51:01 +00:00
|
|
|
if (bp->dev->features & NETIF_F_RXCSUM &&
|
|
|
|
!(bp->dev->flags & IFF_PROMISC) &&
|
|
|
|
GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
2013-06-04 21:57:12 +00:00
|
|
|
|
|
|
|
bp->stats.rx_packets++;
|
|
|
|
bp->stats.rx_bytes += skb->len;
|
|
|
|
|
|
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
|
|
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
|
|
|
|
skb->len, skb->csum);
|
|
|
|
print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
|
|
|
|
skb->mac_header, 16, true);
|
|
|
|
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
|
|
|
|
skb->data, 32, true);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
netif_receive_skb(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
gem_rx_refill(bp);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
|
|
|
|
unsigned int last_frag)
|
|
|
|
{
|
|
|
|
unsigned int len;
|
|
|
|
unsigned int frag;
|
2012-10-31 06:04:58 +00:00
|
|
|
unsigned int offset;
|
2006-11-09 13:51:17 +00:00
|
|
|
struct sk_buff *skb;
|
2012-10-31 06:04:55 +00:00
|
|
|
struct macb_dma_desc *desc;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
desc = macb_rx_desc(bp, last_frag);
|
|
|
|
len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:52 +00:00
|
|
|
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
|
2012-10-31 06:04:55 +00:00
|
|
|
macb_rx_ring_wrap(first_frag),
|
|
|
|
macb_rx_ring_wrap(last_frag), len);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:58 +00:00
|
|
|
/*
|
|
|
|
* The ethernet header starts NET_IP_ALIGN bytes into the
|
|
|
|
* first buffer. Since the header is 14 bytes, this makes the
|
|
|
|
* payload word-aligned.
|
|
|
|
*
|
|
|
|
* Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
|
|
|
|
* the two padding bytes into the skb so that we avoid hitting
|
|
|
|
* the slowpath in memcpy(), and pull them off afterwards.
|
|
|
|
*/
|
|
|
|
skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
|
2006-11-09 13:51:17 +00:00
|
|
|
if (!skb) {
|
|
|
|
bp->stats.rx_dropped++;
|
2012-10-31 06:04:55 +00:00
|
|
|
for (frag = first_frag; ; frag++) {
|
|
|
|
desc = macb_rx_desc(bp, frag);
|
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
2006-11-09 13:51:17 +00:00
|
|
|
if (frag == last_frag)
|
|
|
|
break;
|
|
|
|
}
|
2012-10-31 06:04:51 +00:00
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
2006-11-09 13:51:17 +00:00
|
|
|
wmb();
|
2012-10-31 06:04:51 +00:00
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:58 +00:00
|
|
|
offset = 0;
|
|
|
|
len += NET_IP_ALIGN;
|
2010-09-02 20:07:41 +00:00
|
|
|
skb_checksum_none_assert(skb);
|
2006-11-09 13:51:17 +00:00
|
|
|
skb_put(skb, len);
|
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
for (frag = first_frag; ; frag++) {
|
2013-06-04 21:57:11 +00:00
|
|
|
unsigned int frag_len = bp->rx_buffer_size;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
if (offset + frag_len > len) {
|
|
|
|
BUG_ON(frag != last_frag);
|
|
|
|
frag_len = len - offset;
|
|
|
|
}
|
2007-03-31 14:55:19 +00:00
|
|
|
skb_copy_to_linear_data_offset(skb, offset,
|
2012-10-31 06:04:55 +00:00
|
|
|
macb_rx_buffer(bp, frag), frag_len);
|
2013-06-04 21:57:11 +00:00
|
|
|
offset += bp->rx_buffer_size;
|
2012-10-31 06:04:55 +00:00
|
|
|
desc = macb_rx_desc(bp, frag);
|
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
if (frag == last_frag)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:51 +00:00
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
2012-10-31 06:04:58 +00:00
|
|
|
__skb_pull(skb, NET_IP_ALIGN);
|
2006-11-09 13:51:17 +00:00
|
|
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
|
|
|
|
|
|
|
bp->stats.rx_packets++;
|
2012-10-31 06:04:58 +00:00
|
|
|
bp->stats.rx_bytes += skb->len;
|
2012-10-31 06:04:52 +00:00
|
|
|
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
|
2011-03-08 20:27:08 +00:00
|
|
|
skb->len, skb->csum);
|
2006-11-09 13:51:17 +00:00
|
|
|
netif_receive_skb(skb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_rx(struct macb *bp, int budget)
|
|
|
|
{
|
|
|
|
int received = 0;
|
2012-10-31 06:04:55 +00:00
|
|
|
unsigned int tail;
|
2006-11-09 13:51:17 +00:00
|
|
|
int first_frag = -1;
|
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
for (tail = bp->rx_tail; budget > 0; tail++) {
|
|
|
|
struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
|
2006-11-09 13:51:17 +00:00
|
|
|
u32 addr, ctrl;
|
|
|
|
|
2012-10-31 06:04:51 +00:00
|
|
|
/* Make hw descriptor updates visible to CPU */
|
2006-11-09 13:51:17 +00:00
|
|
|
rmb();
|
2012-10-31 06:04:51 +00:00
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
addr = desc->addr;
|
|
|
|
ctrl = desc->ctrl;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
if (!(addr & MACB_BIT(RX_USED)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (ctrl & MACB_BIT(RX_SOF)) {
|
|
|
|
if (first_frag != -1)
|
|
|
|
discard_partial_frame(bp, first_frag, tail);
|
|
|
|
first_frag = tail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrl & MACB_BIT(RX_EOF)) {
|
|
|
|
int dropped;
|
|
|
|
BUG_ON(first_frag == -1);
|
|
|
|
|
|
|
|
dropped = macb_rx_frame(bp, first_frag, tail);
|
|
|
|
first_frag = -1;
|
|
|
|
if (!dropped) {
|
|
|
|
received++;
|
|
|
|
budget--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (first_frag != -1)
|
|
|
|
bp->rx_tail = first_frag;
|
|
|
|
else
|
|
|
|
bp->rx_tail = tail;
|
|
|
|
|
|
|
|
return received;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
static int macb_poll(struct napi_struct *napi, int budget)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
struct macb *bp = container_of(napi, struct macb, napi);
|
|
|
|
int work_done;
|
2006-11-09 13:51:17 +00:00
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = macb_readl(bp, RSR);
|
|
|
|
macb_writel(bp, RSR, status);
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
work_done = 0;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:52 +00:00
|
|
|
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
|
2011-03-08 20:27:08 +00:00
|
|
|
(unsigned long)status, budget);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2013-06-04 21:57:12 +00:00
|
|
|
work_done = bp->macbgem_ops.mog_rx(bp, budget);
|
2010-10-25 01:44:22 +00:00
|
|
|
if (work_done < budget) {
|
2009-01-20 00:43:59 +00:00
|
|
|
napi_complete(napi);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2013-02-12 10:08:48 +00:00
|
|
|
/* Packets received while interrupts were disabled */
|
|
|
|
status = macb_readl(bp, RSR);
|
2014-05-04 22:43:01 +00:00
|
|
|
if (status) {
|
2014-05-04 22:43:00 +00:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
macb_writel(bp, ISR, MACB_BIT(RCOMP));
|
2013-02-12 10:08:48 +00:00
|
|
|
napi_reschedule(napi);
|
2014-05-04 22:43:00 +00:00
|
|
|
} else {
|
|
|
|
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
|
|
|
|
}
|
2010-10-25 01:44:22 +00:00
|
|
|
}
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
/* TODO: Handle errors */
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
return work_done;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = macb_readl(bp, ISR);
|
|
|
|
|
|
|
|
if (unlikely(!status))
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
spin_lock(&bp->lock);
|
|
|
|
|
|
|
|
while (status) {
|
|
|
|
/* close possible race with dev_close */
|
|
|
|
if (unlikely(!netif_running(dev))) {
|
2012-10-22 08:45:31 +00:00
|
|
|
macb_writel(bp, IDR, -1);
|
2006-11-09 13:51:17 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:52 +00:00
|
|
|
netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
if (status & MACB_RX_INT_FLAGS) {
|
2010-10-25 01:44:22 +00:00
|
|
|
/*
|
|
|
|
* There's no point taking any more interrupts
|
|
|
|
* until we have processed the buffers. The
|
|
|
|
* scheduling call may fail if the poll routine
|
|
|
|
* is already scheduled, so disable interrupts
|
|
|
|
* now.
|
|
|
|
*/
|
|
|
|
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
|
2013-05-14 03:00:16 +00:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
macb_writel(bp, ISR, MACB_BIT(RCOMP));
|
2010-10-25 01:44:22 +00:00
|
|
|
|
2009-01-20 00:43:59 +00:00
|
|
|
if (napi_schedule_prep(&bp->napi)) {
|
2012-10-31 06:04:52 +00:00
|
|
|
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
|
2009-01-20 00:43:59 +00:00
|
|
|
__napi_schedule(&bp->napi);
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
|
|
|
|
macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
|
|
|
|
schedule_work(&bp->tx_error_task);
|
2014-05-04 22:42:59 +00:00
|
|
|
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
|
|
|
|
|
2012-10-31 06:04:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status & MACB_BIT(TCOMP))
|
|
|
|
macb_tx_interrupt(bp);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Link change detection isn't possible with RMII, so we'll
|
|
|
|
* add that if/when we get our hands on a full-blown MII PHY.
|
|
|
|
*/
|
|
|
|
|
2011-04-13 05:03:24 +00:00
|
|
|
if (status & MACB_BIT(ISR_ROVR)) {
|
|
|
|
/* We missed at least one packet */
|
2011-11-08 10:12:32 +00:00
|
|
|
if (macb_is_gem(bp))
|
|
|
|
bp->hw_stats.gem.rx_overruns++;
|
|
|
|
else
|
|
|
|
bp->hw_stats.macb.rx_overruns++;
|
2014-05-04 22:42:59 +00:00
|
|
|
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
|
2011-04-13 05:03:24 +00:00
|
|
|
}
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
if (status & MACB_BIT(HRESP)) {
|
|
|
|
/*
|
2011-03-08 20:27:08 +00:00
|
|
|
* TODO: Reset the hardware, and maybe move the
|
|
|
|
* netdev_err to a lower-priority context as well
|
|
|
|
* (work queue?)
|
2006-11-09 13:51:17 +00:00
|
|
|
*/
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_err(dev, "DMA bus error: HRESP not OK\n");
|
2014-05-04 22:42:59 +00:00
|
|
|
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
macb_writel(bp, ISR, MACB_BIT(HRESP));
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
status = macb_readl(bp, ISR);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&bp->lock);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2009-05-04 18:08:41 +00:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
/*
|
|
|
|
* Polling receive - used by netconsole and other diagnostic tools
|
|
|
|
* to allow network i/o with interrupts disabled.
|
|
|
|
*/
|
|
|
|
static void macb_poll_controller(struct net_device *dev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
macb_interrupt(dev->irq, dev);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
return (len + bp->max_tx_length - 1) / bp->max_tx_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int macb_tx_map(struct macb *bp,
|
|
|
|
struct sk_buff *skb)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
|
|
|
dma_addr_t mapping;
|
2014-07-24 11:50:59 +00:00
|
|
|
unsigned int len, entry, i, tx_head = bp->tx_head;
|
|
|
|
struct macb_tx_skb *tx_skb = NULL;
|
2012-10-31 06:04:55 +00:00
|
|
|
struct macb_dma_desc *desc;
|
2014-07-24 11:50:59 +00:00
|
|
|
unsigned int offset, size, count = 0;
|
|
|
|
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
unsigned int eof = 1;
|
2006-11-09 13:51:17 +00:00
|
|
|
u32 ctrl;
|
2014-07-24 11:50:59 +00:00
|
|
|
|
|
|
|
/* First, map non-paged data */
|
|
|
|
len = skb_headlen(skb);
|
|
|
|
offset = 0;
|
|
|
|
while (len) {
|
|
|
|
size = min(len, bp->max_tx_length);
|
|
|
|
entry = macb_tx_ring_wrap(tx_head);
|
|
|
|
tx_skb = &bp->tx_skb[entry];
|
|
|
|
|
|
|
|
mapping = dma_map_single(&bp->pdev->dev,
|
|
|
|
skb->data + offset,
|
|
|
|
size, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(&bp->pdev->dev, mapping))
|
|
|
|
goto dma_error;
|
|
|
|
|
|
|
|
/* Save info to properly release resources */
|
|
|
|
tx_skb->skb = NULL;
|
|
|
|
tx_skb->mapping = mapping;
|
|
|
|
tx_skb->size = size;
|
|
|
|
tx_skb->mapped_as_page = false;
|
|
|
|
|
|
|
|
len -= size;
|
|
|
|
offset += size;
|
|
|
|
count++;
|
|
|
|
tx_head++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Then, map paged data from fragments */
|
|
|
|
for (f = 0; f < nr_frags; f++) {
|
|
|
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
|
|
|
|
|
|
|
|
len = skb_frag_size(frag);
|
|
|
|
offset = 0;
|
|
|
|
while (len) {
|
|
|
|
size = min(len, bp->max_tx_length);
|
|
|
|
entry = macb_tx_ring_wrap(tx_head);
|
|
|
|
tx_skb = &bp->tx_skb[entry];
|
|
|
|
|
|
|
|
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
|
|
|
|
offset, size, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(&bp->pdev->dev, mapping))
|
|
|
|
goto dma_error;
|
|
|
|
|
|
|
|
/* Save info to properly release resources */
|
|
|
|
tx_skb->skb = NULL;
|
|
|
|
tx_skb->mapping = mapping;
|
|
|
|
tx_skb->size = size;
|
|
|
|
tx_skb->mapped_as_page = true;
|
|
|
|
|
|
|
|
len -= size;
|
|
|
|
offset += size;
|
|
|
|
count++;
|
|
|
|
tx_head++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Should never happen */
|
|
|
|
if (unlikely(tx_skb == NULL)) {
|
|
|
|
netdev_err(bp->dev, "BUG! empty skb!\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This is the last buffer of the frame: save socket buffer */
|
|
|
|
tx_skb->skb = skb;
|
|
|
|
|
|
|
|
/* Update TX ring: update buffer descriptors in reverse order
|
|
|
|
* to avoid race condition
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Set 'TX_USED' bit in buffer descriptor at tx_head position
|
|
|
|
* to set the end of TX queue
|
|
|
|
*/
|
|
|
|
i = tx_head;
|
|
|
|
entry = macb_tx_ring_wrap(i);
|
|
|
|
ctrl = MACB_BIT(TX_USED);
|
|
|
|
desc = &bp->tx_ring[entry];
|
|
|
|
desc->ctrl = ctrl;
|
|
|
|
|
|
|
|
do {
|
|
|
|
i--;
|
|
|
|
entry = macb_tx_ring_wrap(i);
|
|
|
|
tx_skb = &bp->tx_skb[entry];
|
|
|
|
desc = &bp->tx_ring[entry];
|
|
|
|
|
|
|
|
ctrl = (u32)tx_skb->size;
|
|
|
|
if (eof) {
|
|
|
|
ctrl |= MACB_BIT(TX_LAST);
|
|
|
|
eof = 0;
|
|
|
|
}
|
|
|
|
if (unlikely(entry == (TX_RING_SIZE - 1)))
|
|
|
|
ctrl |= MACB_BIT(TX_WRAP);
|
|
|
|
|
|
|
|
/* Set TX buffer descriptor */
|
|
|
|
desc->addr = tx_skb->mapping;
|
|
|
|
/* desc->addr must be visible to hardware before clearing
|
|
|
|
* 'TX_USED' bit in desc->ctrl.
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
desc->ctrl = ctrl;
|
|
|
|
} while (i != bp->tx_head);
|
|
|
|
|
|
|
|
bp->tx_head = tx_head;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
|
|
|
|
dma_error:
|
|
|
|
netdev_err(bp->dev, "TX DMA map failed\n");
|
|
|
|
|
|
|
|
for (i = bp->tx_head; i != tx_head; i++) {
|
|
|
|
tx_skb = macb_tx_skb(bp, i);
|
|
|
|
|
|
|
|
macb_tx_unmap(bp, tx_skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2009-08-24 02:49:07 +00:00
|
|
|
unsigned long flags;
|
2014-07-24 11:50:59 +00:00
|
|
|
unsigned int count, nr_frags, frag_size, f;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-31 06:04:52 +00:00
|
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
|
|
netdev_vdbg(bp->dev,
|
2011-03-08 20:27:08 +00:00
|
|
|
"start_xmit: len %u head %p data %p tail %p end %p\n",
|
|
|
|
skb->len, skb->head, skb->data,
|
|
|
|
skb_tail_pointer(skb), skb_end_pointer(skb));
|
|
|
|
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
|
|
|
|
skb->data, 16, true);
|
2006-11-09 13:51:17 +00:00
|
|
|
#endif
|
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
/* Count how many TX buffer descriptors are needed to send this
|
|
|
|
* socket buffer: skb fragments of jumbo frames may need to be
|
|
|
|
* splitted into many buffer descriptors.
|
|
|
|
*/
|
|
|
|
count = macb_count_tx_descriptors(bp, skb_headlen(skb));
|
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
for (f = 0; f < nr_frags; f++) {
|
|
|
|
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
|
|
|
|
count += macb_count_tx_descriptors(bp, frag_size);
|
|
|
|
}
|
|
|
|
|
2009-08-24 02:49:07 +00:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
/* This is a hard error, log it. */
|
2014-07-24 11:50:59 +00:00
|
|
|
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) {
|
2006-11-09 13:51:17 +00:00
|
|
|
netif_stop_queue(dev);
|
2009-08-24 02:49:07 +00:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
|
|
|
|
bp->tx_head, bp->tx_tail);
|
2009-06-12 06:22:29 +00:00
|
|
|
return NETDEV_TX_BUSY;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
/* Map socket buffer for DMA transfer */
|
|
|
|
if (!macb_tx_map(bp, skb)) {
|
2014-03-15 23:08:27 +00:00
|
|
|
dev_kfree_skb_any(skb);
|
2014-03-04 16:46:39 +00:00
|
|
|
goto unlock;
|
|
|
|
}
|
2012-10-31 06:04:55 +00:00
|
|
|
|
2012-10-31 06:04:51 +00:00
|
|
|
/* Make newly initialized descriptor visible to hardware */
|
2006-11-09 13:51:17 +00:00
|
|
|
wmb();
|
|
|
|
|
2011-06-19 21:51:28 +00:00
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
|
|
|
|
|
2012-11-19 06:00:21 +00:00
|
|
|
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
|
2006-11-09 13:51:17 +00:00
|
|
|
netif_stop_queue(dev);
|
|
|
|
|
2014-03-04 16:46:39 +00:00
|
|
|
unlock:
|
2009-08-24 02:49:07 +00:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2009-06-23 06:03:08 +00:00
|
|
|
return NETDEV_TX_OK;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2013-06-04 21:57:12 +00:00
|
|
|
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
|
2013-06-04 21:57:11 +00:00
|
|
|
{
|
|
|
|
if (!macb_is_gem(bp)) {
|
|
|
|
bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
|
|
|
|
} else {
|
2013-06-04 21:57:12 +00:00
|
|
|
bp->rx_buffer_size = size;
|
2013-06-04 21:57:11 +00:00
|
|
|
|
|
|
|
if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
|
2013-06-04 21:57:12 +00:00
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"RX buffer must be multiple of %d bytes, expanding\n",
|
2013-06-04 21:57:11 +00:00
|
|
|
RX_BUFFER_MULTIPLE);
|
|
|
|
bp->rx_buffer_size =
|
2013-06-04 21:57:12 +00:00
|
|
|
roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
|
2013-06-04 21:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
2013-06-04 21:57:12 +00:00
|
|
|
|
|
|
|
netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
|
|
|
|
bp->dev->mtu, bp->rx_buffer_size);
|
2013-06-04 21:57:11 +00:00
|
|
|
}
|
|
|
|
|
2013-06-04 21:57:12 +00:00
|
|
|
static void gem_free_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
dma_addr_t addr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!bp->rx_skbuff)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
|
|
|
skb = bp->rx_skbuff[i];
|
|
|
|
|
|
|
|
if (skb == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
desc = &bp->rx_ring[i];
|
|
|
|
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
|
2014-05-04 22:42:58 +00:00
|
|
|
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
|
2013-06-04 21:57:12 +00:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
skb = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(bp->rx_skbuff);
|
|
|
|
bp->rx_skbuff = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_free_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
if (bp->rx_buffers) {
|
|
|
|
dma_free_coherent(&bp->pdev->dev,
|
|
|
|
RX_RING_SIZE * bp->rx_buffer_size,
|
|
|
|
bp->rx_buffers, bp->rx_buffers_dma);
|
|
|
|
bp->rx_buffers = NULL;
|
|
|
|
}
|
|
|
|
}
|
2013-06-04 21:57:11 +00:00
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
static void macb_free_consistent(struct macb *bp)
|
|
|
|
{
|
|
|
|
if (bp->tx_skb) {
|
|
|
|
kfree(bp->tx_skb);
|
|
|
|
bp->tx_skb = NULL;
|
|
|
|
}
|
2013-06-04 21:57:12 +00:00
|
|
|
bp->macbgem_ops.mog_free_rx_buffers(bp);
|
2006-11-09 13:51:17 +00:00
|
|
|
if (bp->rx_ring) {
|
|
|
|
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
|
|
|
|
bp->rx_ring, bp->rx_ring_dma);
|
|
|
|
bp->rx_ring = NULL;
|
|
|
|
}
|
|
|
|
if (bp->tx_ring) {
|
|
|
|
dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
|
|
|
|
bp->tx_ring, bp->tx_ring_dma);
|
|
|
|
bp->tx_ring = NULL;
|
|
|
|
}
|
2013-06-04 21:57:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_alloc_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
|
|
|
|
size = RX_RING_SIZE * sizeof(struct sk_buff *);
|
|
|
|
bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!bp->rx_skbuff)
|
|
|
|
return -ENOMEM;
|
|
|
|
else
|
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated %d RX struct sk_buff entries at %p\n",
|
|
|
|
RX_RING_SIZE, bp->rx_skbuff);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_alloc_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
|
|
|
|
size = RX_RING_SIZE * bp->rx_buffer_size;
|
|
|
|
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&bp->rx_buffers_dma, GFP_KERNEL);
|
|
|
|
if (!bp->rx_buffers)
|
|
|
|
return -ENOMEM;
|
|
|
|
else
|
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
|
|
|
|
size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
|
|
|
|
return 0;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_alloc_consistent(struct macb *bp)
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
|
2012-10-31 06:04:55 +00:00
|
|
|
size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
|
2006-11-09 13:51:17 +00:00
|
|
|
bp->tx_skb = kmalloc(size, GFP_KERNEL);
|
|
|
|
if (!bp->tx_skb)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
size = RX_RING_BYTES;
|
|
|
|
bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&bp->rx_ring_dma, GFP_KERNEL);
|
|
|
|
if (!bp->rx_ring)
|
|
|
|
goto out_err;
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
|
|
|
|
size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
size = TX_RING_BYTES;
|
|
|
|
bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&bp->tx_ring_dma, GFP_KERNEL);
|
|
|
|
if (!bp->tx_ring)
|
|
|
|
goto out_err;
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
|
|
|
|
size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2013-06-04 21:57:12 +00:00
|
|
|
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
|
2006-11-09 13:51:17 +00:00
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
macb_free_consistent(bp);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-06-04 21:57:12 +00:00
|
|
|
static void gem_init_rings(struct macb *bp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
|
|
bp->tx_ring[i].addr = 0;
|
|
|
|
bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
|
|
|
|
}
|
|
|
|
bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
|
|
|
|
|
|
|
|
bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
|
|
|
|
|
|
|
|
gem_rx_refill(bp);
|
|
|
|
}
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
static void macb_init_rings(struct macb *bp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
|
|
|
addr = bp->rx_buffers_dma;
|
|
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
|
|
|
bp->rx_ring[i].addr = addr;
|
|
|
|
bp->rx_ring[i].ctrl = 0;
|
2013-06-04 21:57:11 +00:00
|
|
|
addr += bp->rx_buffer_size;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
|
|
|
|
|
|
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
|
|
bp->tx_ring[i].addr = 0;
|
|
|
|
bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
|
|
|
|
}
|
|
|
|
bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
|
|
|
|
|
|
|
|
bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_reset_hw(struct macb *bp)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Disable RX and TX (XXX: Should we halt the transmission
|
|
|
|
* more gracefully?)
|
|
|
|
*/
|
|
|
|
macb_writel(bp, NCR, 0);
|
|
|
|
|
|
|
|
/* Clear the stats registers (XXX: Update stats first?) */
|
|
|
|
macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
|
|
|
|
|
|
|
|
/* Clear all status flags */
|
2012-10-22 08:45:31 +00:00
|
|
|
macb_writel(bp, TSR, -1);
|
|
|
|
macb_writel(bp, RSR, -1);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
/* Disable all interrupts */
|
2012-10-22 08:45:31 +00:00
|
|
|
macb_writel(bp, IDR, -1);
|
2006-11-09 13:51:17 +00:00
|
|
|
macb_readl(bp, ISR);
|
|
|
|
}
|
|
|
|
|
2011-03-09 16:22:54 +00:00
|
|
|
static u32 gem_mdc_clk_div(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
unsigned long pclk_hz = clk_get_rate(bp->pclk);
|
|
|
|
|
|
|
|
if (pclk_hz <= 20000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV8);
|
|
|
|
else if (pclk_hz <= 40000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV16);
|
|
|
|
else if (pclk_hz <= 80000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV32);
|
|
|
|
else if (pclk_hz <= 120000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV48);
|
|
|
|
else if (pclk_hz <= 160000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV64);
|
|
|
|
else
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV96);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 macb_mdc_clk_div(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
unsigned long pclk_hz;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp))
|
|
|
|
return gem_mdc_clk_div(bp);
|
|
|
|
|
|
|
|
pclk_hz = clk_get_rate(bp->pclk);
|
|
|
|
if (pclk_hz <= 20000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV8);
|
|
|
|
else if (pclk_hz <= 40000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV16);
|
|
|
|
else if (pclk_hz <= 80000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV32);
|
|
|
|
else
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV64);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
2011-03-09 16:29:59 +00:00
|
|
|
/*
|
|
|
|
* Get the DMA bus width field of the network configuration register that we
|
|
|
|
* should program. We find the width from decoding the design configuration
|
|
|
|
* register to find the maximum supported data bus width.
|
|
|
|
*/
|
|
|
|
static u32 macb_dbw(struct macb *bp)
|
|
|
|
{
|
|
|
|
if (!macb_is_gem(bp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
|
|
|
|
case 4:
|
|
|
|
return GEM_BF(DBW, GEM_DBW128);
|
|
|
|
case 2:
|
|
|
|
return GEM_BF(DBW, GEM_DBW64);
|
|
|
|
case 1:
|
|
|
|
default:
|
|
|
|
return GEM_BF(DBW, GEM_DBW32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-14 17:38:30 +00:00
|
|
|
/*
|
2012-11-23 03:49:01 +00:00
|
|
|
* Configure the receive DMA engine
|
|
|
|
* - use the correct receive buffer size
|
2014-07-24 11:50:58 +00:00
|
|
|
* - set best burst length for DMA operations
|
2012-11-23 03:49:01 +00:00
|
|
|
* (if not supported by FIFO, it will fallback to default)
|
|
|
|
* - set both rx/tx packet buffers to full memory size
|
|
|
|
* These are configurable parameters for GEM.
|
2011-03-14 17:38:30 +00:00
|
|
|
*/
|
|
|
|
static void macb_configure_dma(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 dmacfg;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp)) {
|
|
|
|
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
|
2013-06-04 21:57:11 +00:00
|
|
|
dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
|
2014-07-24 11:50:58 +00:00
|
|
|
if (bp->dma_burst_length)
|
|
|
|
dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
|
2012-11-23 03:49:01 +00:00
|
|
|
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
|
2013-03-27 23:07:06 +00:00
|
|
|
dmacfg &= ~GEM_BIT(ENDIA);
|
2014-07-24 11:51:00 +00:00
|
|
|
if (bp->dev->features & NETIF_F_HW_CSUM)
|
|
|
|
dmacfg |= GEM_BIT(TXCOEN);
|
|
|
|
else
|
|
|
|
dmacfg &= ~GEM_BIT(TXCOEN);
|
2014-07-24 11:50:58 +00:00
|
|
|
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
|
|
|
|
dmacfg);
|
2011-03-14 17:38:30 +00:00
|
|
|
gem_writel(bp, DMACFG, dmacfg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
static void macb_init_hw(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
|
|
|
|
macb_reset_hw(bp);
|
2012-11-07 08:14:52 +00:00
|
|
|
macb_set_hwaddr(bp);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2011-03-09 16:22:54 +00:00
|
|
|
config = macb_mdc_clk_div(bp);
|
2012-10-31 06:04:58 +00:00
|
|
|
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
|
2006-11-09 13:51:17 +00:00
|
|
|
config |= MACB_BIT(PAE); /* PAuse Enable */
|
|
|
|
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
|
2010-04-08 04:53:41 +00:00
|
|
|
config |= MACB_BIT(BIG); /* Receive oversized frames */
|
2006-11-09 13:51:17 +00:00
|
|
|
if (bp->dev->flags & IFF_PROMISC)
|
|
|
|
config |= MACB_BIT(CAF); /* Copy All Frames */
|
2014-07-24 11:51:01 +00:00
|
|
|
else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
|
|
|
|
config |= GEM_BIT(RXCOEN);
|
2006-11-09 13:51:17 +00:00
|
|
|
if (!(bp->dev->flags & IFF_BROADCAST))
|
|
|
|
config |= MACB_BIT(NBC); /* No BroadCast */
|
2011-03-09 16:29:59 +00:00
|
|
|
config |= macb_dbw(bp);
|
2006-11-09 13:51:17 +00:00
|
|
|
macb_writel(bp, NCFGR, config);
|
2012-11-02 07:09:24 +00:00
|
|
|
bp->speed = SPEED_10;
|
|
|
|
bp->duplex = DUPLEX_HALF;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2011-03-14 17:38:30 +00:00
|
|
|
macb_configure_dma(bp);
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
/* Initialize TX and RX buffers */
|
|
|
|
macb_writel(bp, RBQP, bp->rx_ring_dma);
|
|
|
|
macb_writel(bp, TBQP, bp->tx_ring_dma);
|
|
|
|
|
|
|
|
/* Enable TX and RX */
|
2007-07-12 17:07:24 +00:00
|
|
|
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
/* Enable interrupts */
|
2012-10-31 06:04:57 +00:00
|
|
|
macb_writel(bp, IER, (MACB_RX_INT_FLAGS
|
|
|
|
| MACB_TX_INT_FLAGS
|
2006-11-09 13:51:17 +00:00
|
|
|
| MACB_BIT(HRESP)));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-07-12 17:07:25 +00:00
|
|
|
/*
|
|
|
|
* The hash address register is 64 bits long and takes up two
|
|
|
|
* locations in the memory map. The least significant bits are stored
|
|
|
|
* in EMAC_HSL and the most significant bits in EMAC_HSH.
|
|
|
|
*
|
|
|
|
* The unicast hash enable and the multicast hash enable bits in the
|
|
|
|
* network configuration register enable the reception of hash matched
|
|
|
|
* frames. The destination address is reduced to a 6 bit index into
|
|
|
|
* the 64 bit hash register using the following hash function. The
|
|
|
|
* hash function is an exclusive or of every sixth bit of the
|
|
|
|
* destination address.
|
|
|
|
*
|
|
|
|
* hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
|
|
|
|
* hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
|
|
|
|
* hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
|
|
|
|
* hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
|
|
|
|
* hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
|
|
|
|
* hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
|
|
|
|
*
|
|
|
|
* da[0] represents the least significant bit of the first byte
|
|
|
|
* received, that is, the multicast/unicast indicator, and da[47]
|
|
|
|
* represents the most significant bit of the last byte received. If
|
|
|
|
* the hash index, hi[n], points to a bit that is set in the hash
|
|
|
|
* register then the frame will be matched according to whether the
|
|
|
|
* frame is multicast or unicast. A multicast match will be signalled
|
|
|
|
* if the multicast hash enable bit is set, da[0] is 1 and the hash
|
|
|
|
* index points to a bit set in the hash register. A unicast match
|
|
|
|
* will be signalled if the unicast hash enable bit is set, da[0] is 0
|
|
|
|
* and the hash index points to a bit set in the hash register. To
|
|
|
|
* receive all multicast frames, the hash register should be set with
|
|
|
|
* all ones and the multicast hash enable bit should be set in the
|
|
|
|
* network configuration register.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline int hash_bit_value(int bitnr, __u8 *addr)
|
|
|
|
{
|
|
|
|
if (addr[bitnr / 8] & (1 << (bitnr % 8)))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the hash index value for the specified address.
|
|
|
|
*/
|
|
|
|
static int hash_get_index(__u8 *addr)
|
|
|
|
{
|
|
|
|
int i, j, bitval;
|
|
|
|
int hash_index = 0;
|
|
|
|
|
|
|
|
for (j = 0; j < 6; j++) {
|
|
|
|
for (i = 0, bitval = 0; i < 8; i++)
|
|
|
|
bitval ^= hash_bit_value(i*6 + j, addr);
|
|
|
|
|
|
|
|
hash_index |= (bitval << j);
|
|
|
|
}
|
|
|
|
|
|
|
|
return hash_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add multicast addresses to the internal multicast-hash table.
|
|
|
|
*/
|
|
|
|
static void macb_sethashtable(struct net_device *dev)
|
|
|
|
{
|
2010-04-01 21:22:57 +00:00
|
|
|
struct netdev_hw_addr *ha;
|
2007-07-12 17:07:25 +00:00
|
|
|
unsigned long mc_filter[2];
|
2010-02-23 09:19:49 +00:00
|
|
|
unsigned int bitnr;
|
2007-07-12 17:07:25 +00:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
|
|
|
|
mc_filter[0] = mc_filter[1] = 0;
|
|
|
|
|
2010-04-01 21:22:57 +00:00
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
|
|
bitnr = hash_get_index(ha->addr);
|
2007-07-12 17:07:25 +00:00
|
|
|
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
|
|
|
|
}
|
|
|
|
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, HRB, mc_filter[0]);
|
|
|
|
macb_or_gem_writel(bp, HRT, mc_filter[1]);
|
2007-07-12 17:07:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable/Disable promiscuous and multicast modes.
|
|
|
|
*/
|
2012-10-18 11:01:15 +00:00
|
|
|
void macb_set_rx_mode(struct net_device *dev)
|
2007-07-12 17:07:25 +00:00
|
|
|
{
|
|
|
|
unsigned long cfg;
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
|
|
|
|
cfg = macb_readl(bp, NCFGR);
|
|
|
|
|
2014-07-24 11:51:01 +00:00
|
|
|
if (dev->flags & IFF_PROMISC) {
|
2007-07-12 17:07:25 +00:00
|
|
|
/* Enable promiscuous mode */
|
|
|
|
cfg |= MACB_BIT(CAF);
|
2014-07-24 11:51:01 +00:00
|
|
|
|
|
|
|
/* Disable RX checksum offload */
|
|
|
|
if (macb_is_gem(bp))
|
|
|
|
cfg &= ~GEM_BIT(RXCOEN);
|
|
|
|
} else {
|
|
|
|
/* Disable promiscuous mode */
|
2007-07-12 17:07:25 +00:00
|
|
|
cfg &= ~MACB_BIT(CAF);
|
|
|
|
|
2014-07-24 11:51:01 +00:00
|
|
|
/* Enable RX checksum offload only if requested */
|
|
|
|
if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
|
|
|
|
cfg |= GEM_BIT(RXCOEN);
|
|
|
|
}
|
|
|
|
|
2007-07-12 17:07:25 +00:00
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
|
/* Enable all multicast mode */
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, HRB, -1);
|
|
|
|
macb_or_gem_writel(bp, HRT, -1);
|
2007-07-12 17:07:25 +00:00
|
|
|
cfg |= MACB_BIT(NCFGR_MTI);
|
2010-02-08 04:30:35 +00:00
|
|
|
} else if (!netdev_mc_empty(dev)) {
|
2007-07-12 17:07:25 +00:00
|
|
|
/* Enable specific multicasts */
|
|
|
|
macb_sethashtable(dev);
|
|
|
|
cfg |= MACB_BIT(NCFGR_MTI);
|
|
|
|
} else if (dev->flags & (~IFF_ALLMULTI)) {
|
|
|
|
/* Disable all multicast mode */
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, HRB, 0);
|
|
|
|
macb_or_gem_writel(bp, HRT, 0);
|
2007-07-12 17:07:25 +00:00
|
|
|
cfg &= ~MACB_BIT(NCFGR_MTI);
|
|
|
|
}
|
|
|
|
|
|
|
|
macb_writel(bp, NCFGR, cfg);
|
|
|
|
}
|
2012-10-18 11:01:15 +00:00
|
|
|
EXPORT_SYMBOL_GPL(macb_set_rx_mode);
|
2007-07-12 17:07:25 +00:00
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
static int macb_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2013-06-04 21:57:12 +00:00
|
|
|
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
|
2006-11-09 13:51:17 +00:00
|
|
|
int err;
|
|
|
|
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_dbg(bp->dev, "open\n");
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-07-03 23:14:13 +00:00
|
|
|
/* carrier starts down */
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
/* if the phy is not yet register, retry later*/
|
|
|
|
if (!bp->phy_dev)
|
|
|
|
return -EAGAIN;
|
2013-06-04 21:57:11 +00:00
|
|
|
|
|
|
|
/* RX buffers initialization */
|
2013-06-04 21:57:12 +00:00
|
|
|
macb_init_rx_buffer_size(bp, bufsz);
|
2007-07-12 17:07:24 +00:00
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
err = macb_alloc_consistent(bp);
|
|
|
|
if (err) {
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
|
|
|
|
err);
|
2006-11-09 13:51:17 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
napi_enable(&bp->napi);
|
|
|
|
|
2013-06-04 21:57:12 +00:00
|
|
|
bp->macbgem_ops.mog_init_rings(bp);
|
2006-11-09 13:51:17 +00:00
|
|
|
macb_init_hw(bp);
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
/* schedule a link state check */
|
|
|
|
phy_start(bp->phy_dev);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
netif_start_queue(dev);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
netif_stop_queue(dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
napi_disable(&bp->napi);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
if (bp->phy_dev)
|
|
|
|
phy_stop(bp->phy_dev);
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
macb_reset_hw(bp);
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
|
|
|
|
|
macb_free_consistent(bp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-09 16:26:35 +00:00
|
|
|
static void gem_update_stats(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 __iomem *reg = bp->regs + GEM_OTX;
|
|
|
|
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
|
|
|
|
u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
|
|
|
|
|
|
|
|
for (; p < end; p++, reg++)
|
|
|
|
*p += __raw_readl(reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_device_stats *gem_get_stats(struct macb *bp)
|
|
|
|
{
|
|
|
|
struct gem_stats *hwstat = &bp->hw_stats.gem;
|
|
|
|
struct net_device_stats *nstat = &bp->stats;
|
|
|
|
|
|
|
|
gem_update_stats(bp);
|
|
|
|
|
|
|
|
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
|
|
|
|
hwstat->rx_alignment_errors +
|
|
|
|
hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns +
|
|
|
|
hwstat->rx_oversize_frames +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersized_frames +
|
|
|
|
hwstat->rx_length_field_frame_errors);
|
|
|
|
nstat->tx_errors = (hwstat->tx_late_collisions +
|
|
|
|
hwstat->tx_excessive_collisions +
|
|
|
|
hwstat->tx_underrun +
|
|
|
|
hwstat->tx_carrier_sense_errors);
|
|
|
|
nstat->multicast = hwstat->rx_multicast_frames;
|
|
|
|
nstat->collisions = (hwstat->tx_single_collision_frames +
|
|
|
|
hwstat->tx_multiple_collision_frames +
|
|
|
|
hwstat->tx_excessive_collisions);
|
|
|
|
nstat->rx_length_errors = (hwstat->rx_oversize_frames +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersized_frames +
|
|
|
|
hwstat->rx_length_field_frame_errors);
|
|
|
|
nstat->rx_over_errors = hwstat->rx_resource_errors;
|
|
|
|
nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
|
|
|
|
nstat->rx_frame_errors = hwstat->rx_alignment_errors;
|
|
|
|
nstat->rx_fifo_errors = hwstat->rx_overruns;
|
|
|
|
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
|
|
|
|
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
|
|
|
|
nstat->tx_fifo_errors = hwstat->tx_underrun;
|
|
|
|
|
|
|
|
return nstat;
|
|
|
|
}
|
|
|
|
|
2012-11-07 08:14:54 +00:00
|
|
|
struct net_device_stats *macb_get_stats(struct net_device *dev)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
struct net_device_stats *nstat = &bp->stats;
|
2011-03-09 16:26:35 +00:00
|
|
|
struct macb_stats *hwstat = &bp->hw_stats.macb;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp))
|
|
|
|
return gem_get_stats(bp);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
/* read stats from hardware */
|
|
|
|
macb_update_stats(bp);
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
/* Convert HW stats into netdevice stats */
|
|
|
|
nstat->rx_errors = (hwstat->rx_fcs_errors +
|
|
|
|
hwstat->rx_align_errors +
|
|
|
|
hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns +
|
|
|
|
hwstat->rx_oversize_pkts +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersize_pkts +
|
|
|
|
hwstat->sqe_test_errors +
|
|
|
|
hwstat->rx_length_mismatch);
|
|
|
|
nstat->tx_errors = (hwstat->tx_late_cols +
|
|
|
|
hwstat->tx_excessive_cols +
|
|
|
|
hwstat->tx_underruns +
|
|
|
|
hwstat->tx_carrier_errors);
|
|
|
|
nstat->collisions = (hwstat->tx_single_cols +
|
|
|
|
hwstat->tx_multiple_cols +
|
|
|
|
hwstat->tx_excessive_cols);
|
|
|
|
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersize_pkts +
|
|
|
|
hwstat->rx_length_mismatch);
|
2011-04-13 05:03:24 +00:00
|
|
|
nstat->rx_over_errors = hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns;
|
2006-11-09 13:51:17 +00:00
|
|
|
nstat->rx_crc_errors = hwstat->rx_fcs_errors;
|
|
|
|
nstat->rx_frame_errors = hwstat->rx_align_errors;
|
|
|
|
nstat->rx_fifo_errors = hwstat->rx_overruns;
|
|
|
|
/* XXX: What does "missed" mean? */
|
|
|
|
nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
|
|
|
|
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
|
|
|
|
nstat->tx_fifo_errors = hwstat->tx_underruns;
|
|
|
|
/* Don't know about heartbeat or window errors... */
|
|
|
|
|
|
|
|
return nstat;
|
|
|
|
}
|
2012-11-07 08:14:54 +00:00
|
|
|
EXPORT_SYMBOL_GPL(macb_get_stats);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2007-07-12 17:07:24 +00:00
|
|
|
struct phy_device *phydev = bp->phy_dev;
|
|
|
|
|
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
return phy_ethtool_gset(phydev, cmd);
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2007-07-12 17:07:24 +00:00
|
|
|
struct phy_device *phydev = bp->phy_dev;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return phy_ethtool_sset(phydev, cmd);
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:56 +00:00
|
|
|
static int macb_get_regs_len(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
return MACB_GREGS_NBR * sizeof(u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|
|
|
void *p)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
unsigned int tail, head;
|
|
|
|
u32 *regs_buff = p;
|
|
|
|
|
|
|
|
regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
|
|
|
|
| MACB_GREGS_VERSION;
|
|
|
|
|
|
|
|
tail = macb_tx_ring_wrap(bp->tx_tail);
|
|
|
|
head = macb_tx_ring_wrap(bp->tx_head);
|
|
|
|
|
|
|
|
regs_buff[0] = macb_readl(bp, NCR);
|
|
|
|
regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
|
|
|
|
regs_buff[2] = macb_readl(bp, NSR);
|
|
|
|
regs_buff[3] = macb_readl(bp, TSR);
|
|
|
|
regs_buff[4] = macb_readl(bp, RBQP);
|
|
|
|
regs_buff[5] = macb_readl(bp, TBQP);
|
|
|
|
regs_buff[6] = macb_readl(bp, RSR);
|
|
|
|
regs_buff[7] = macb_readl(bp, IMR);
|
|
|
|
|
|
|
|
regs_buff[8] = tail;
|
|
|
|
regs_buff[9] = head;
|
|
|
|
regs_buff[10] = macb_tx_dma(bp, tail);
|
|
|
|
regs_buff[11] = macb_tx_dma(bp, head);
|
|
|
|
|
|
|
|
if (macb_is_gem(bp)) {
|
|
|
|
regs_buff[12] = gem_readl(bp, USRIO);
|
|
|
|
regs_buff[13] = gem_readl(bp, DMACFG);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-18 11:01:12 +00:00
|
|
|
const struct ethtool_ops macb_ethtool_ops = {
|
2006-11-09 13:51:17 +00:00
|
|
|
.get_settings = macb_get_settings,
|
|
|
|
.set_settings = macb_set_settings,
|
2012-10-31 06:04:56 +00:00
|
|
|
.get_regs_len = macb_get_regs_len,
|
|
|
|
.get_regs = macb_get_regs,
|
2006-11-09 13:51:17 +00:00
|
|
|
.get_link = ethtool_op_get_link,
|
2012-04-03 22:59:31 +00:00
|
|
|
.get_ts_info = ethtool_op_get_ts_info,
|
2006-11-09 13:51:17 +00:00
|
|
|
};
|
2012-10-18 11:01:12 +00:00
|
|
|
EXPORT_SYMBOL_GPL(macb_ethtool_ops);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-10-18 11:01:12 +00:00
|
|
|
int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2007-07-12 17:07:24 +00:00
|
|
|
struct phy_device *phydev = bp->phy_dev;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
if (!netif_running(dev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2010-07-17 08:48:55 +00:00
|
|
|
return phy_mii_ioctl(phydev, rq, cmd);
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
2012-10-18 11:01:12 +00:00
|
|
|
EXPORT_SYMBOL_GPL(macb_ioctl);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2014-07-24 11:51:00 +00:00
|
|
|
static int macb_set_features(struct net_device *netdev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
netdev_features_t changed = features ^ netdev->features;
|
|
|
|
|
|
|
|
/* TX checksum offload */
|
|
|
|
if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
|
|
|
|
u32 dmacfg;
|
|
|
|
|
|
|
|
dmacfg = gem_readl(bp, DMACFG);
|
|
|
|
if (features & NETIF_F_HW_CSUM)
|
|
|
|
dmacfg |= GEM_BIT(TXCOEN);
|
|
|
|
else
|
|
|
|
dmacfg &= ~GEM_BIT(TXCOEN);
|
|
|
|
gem_writel(bp, DMACFG, dmacfg);
|
|
|
|
}
|
|
|
|
|
2014-07-24 11:51:01 +00:00
|
|
|
/* RX checksum offload */
|
|
|
|
if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
|
|
|
|
u32 netcfg;
|
|
|
|
|
|
|
|
netcfg = gem_readl(bp, NCFGR);
|
|
|
|
if (features & NETIF_F_RXCSUM &&
|
|
|
|
!(netdev->flags & IFF_PROMISC))
|
|
|
|
netcfg |= GEM_BIT(RXCOEN);
|
|
|
|
else
|
|
|
|
netcfg &= ~GEM_BIT(RXCOEN);
|
|
|
|
gem_writel(bp, NCFGR, netcfg);
|
|
|
|
}
|
|
|
|
|
2014-07-24 11:51:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-11 07:42:26 +00:00
|
|
|
static const struct net_device_ops macb_netdev_ops = {
|
|
|
|
.ndo_open = macb_open,
|
|
|
|
.ndo_stop = macb_close,
|
|
|
|
.ndo_start_xmit = macb_start_xmit,
|
2011-08-16 06:29:01 +00:00
|
|
|
.ndo_set_rx_mode = macb_set_rx_mode,
|
2009-04-11 07:42:26 +00:00
|
|
|
.ndo_get_stats = macb_get_stats,
|
|
|
|
.ndo_do_ioctl = macb_ioctl,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_change_mtu = eth_change_mtu,
|
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
2009-05-04 18:08:41 +00:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = macb_poll_controller,
|
|
|
|
#endif
|
2014-07-24 11:51:00 +00:00
|
|
|
.ndo_set_features = macb_set_features,
|
2009-04-11 07:42:26 +00:00
|
|
|
};
|
|
|
|
|
2011-11-18 14:29:25 +00:00
|
|
|
#if defined(CONFIG_OF)
|
2014-07-24 11:50:58 +00:00
|
|
|
static struct macb_config pc302gem_config = {
|
2014-07-24 11:50:59 +00:00
|
|
|
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
|
|
|
|
.dma_burst_length = 16,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct macb_config sama5d3_config = {
|
|
|
|
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
|
2014-07-24 11:50:58 +00:00
|
|
|
.dma_burst_length = 16,
|
|
|
|
};
|
|
|
|
|
2014-07-24 11:51:03 +00:00
|
|
|
static struct macb_config sama5d4_config = {
|
|
|
|
.caps = 0,
|
|
|
|
.dma_burst_length = 4,
|
|
|
|
};
|
|
|
|
|
2011-11-18 14:29:25 +00:00
|
|
|
static const struct of_device_id macb_dt_ids[] = {
|
|
|
|
{ .compatible = "cdns,at32ap7000-macb" },
|
|
|
|
{ .compatible = "cdns,at91sam9260-macb" },
|
|
|
|
{ .compatible = "cdns,macb" },
|
2014-07-24 11:50:58 +00:00
|
|
|
{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
|
|
|
|
{ .compatible = "cdns,gem", .data = &pc302gem_config },
|
2014-07-24 11:50:59 +00:00
|
|
|
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
|
2014-07-24 11:51:03 +00:00
|
|
|
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
|
2011-11-18 14:29:25 +00:00
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, macb_dt_ids);
|
|
|
|
#endif
|
|
|
|
|
2014-07-24 11:50:58 +00:00
|
|
|
/*
|
|
|
|
* Configure peripheral capacities according to device tree
|
|
|
|
* and integration options used
|
|
|
|
*/
|
|
|
|
static void macb_configure_caps(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 dcfg;
|
|
|
|
const struct of_device_id *match;
|
|
|
|
const struct macb_config *config;
|
|
|
|
|
|
|
|
if (bp->pdev->dev.of_node) {
|
|
|
|
match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
|
|
|
|
if (match && match->data) {
|
|
|
|
config = (const struct macb_config *)match->data;
|
|
|
|
|
|
|
|
bp->caps = config->caps;
|
|
|
|
/*
|
|
|
|
* As we have access to the matching node, configure
|
|
|
|
* DMA burst length as well
|
|
|
|
*/
|
|
|
|
bp->dma_burst_length = config->dma_burst_length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
|
|
|
|
bp->caps |= MACB_CAPS_MACB_IS_GEM;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp)) {
|
|
|
|
dcfg = gem_readl(bp, DCFG1);
|
|
|
|
if (GEM_BFEXT(IRQCOR, dcfg) == 0)
|
|
|
|
bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
|
|
|
|
dcfg = gem_readl(bp, DCFG2);
|
|
|
|
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
|
|
|
|
bp->caps |= MACB_CAPS_FIFO_MODE;
|
|
|
|
}
|
|
|
|
|
|
|
|
netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
|
|
|
|
}
|
|
|
|
|
2008-01-31 12:10:22 +00:00
|
|
|
static int __init macb_probe(struct platform_device *pdev)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
2011-03-08 20:17:06 +00:00
|
|
|
struct macb_platform_data *pdata;
|
2006-11-09 13:51:17 +00:00
|
|
|
struct resource *regs;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macb *bp;
|
2007-07-12 17:07:24 +00:00
|
|
|
struct phy_device *phydev;
|
2006-11-09 13:51:17 +00:00
|
|
|
u32 config;
|
|
|
|
int err = -ENXIO;
|
2012-10-31 06:04:59 +00:00
|
|
|
struct pinctrl *pinctrl;
|
2013-04-02 09:35:09 +00:00
|
|
|
const char *mac;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
if (!regs) {
|
|
|
|
dev_err(&pdev->dev, "no mmio resource defined\n");
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2012-10-31 06:04:59 +00:00
|
|
|
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
|
|
|
|
if (IS_ERR(pinctrl)) {
|
|
|
|
err = PTR_ERR(pinctrl);
|
|
|
|
if (err == -EPROBE_DEFER)
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
dev_warn(&pdev->dev, "No pinctrl provided\n");
|
|
|
|
}
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
err = -ENOMEM;
|
|
|
|
dev = alloc_etherdev(sizeof(*bp));
|
2012-01-29 13:47:52 +00:00
|
|
|
if (!dev)
|
2006-11-09 13:51:17 +00:00
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
|
|
|
|
|
bp = netdev_priv(dev);
|
|
|
|
bp->pdev = pdev;
|
|
|
|
bp->dev = dev;
|
|
|
|
|
|
|
|
spin_lock_init(&bp->lock);
|
2012-10-31 06:04:57 +00:00
|
|
|
INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2013-12-11 00:07:20 +00:00
|
|
|
bp->pclk = devm_clk_get(&pdev->dev, "pclk");
|
2007-02-07 15:40:44 +00:00
|
|
|
if (IS_ERR(bp->pclk)) {
|
2013-12-11 00:07:20 +00:00
|
|
|
err = PTR_ERR(bp->pclk);
|
|
|
|
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
|
2007-02-07 15:40:44 +00:00
|
|
|
goto err_out_free_dev;
|
|
|
|
}
|
2011-03-08 20:19:23 +00:00
|
|
|
|
2013-12-11 00:07:20 +00:00
|
|
|
bp->hclk = devm_clk_get(&pdev->dev, "hclk");
|
2006-11-09 13:51:17 +00:00
|
|
|
if (IS_ERR(bp->hclk)) {
|
2013-12-11 00:07:20 +00:00
|
|
|
err = PTR_ERR(bp->hclk);
|
|
|
|
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
|
|
|
|
goto err_out_free_dev;
|
|
|
|
}
|
|
|
|
|
2013-12-11 00:07:23 +00:00
|
|
|
bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
|
|
|
|
|
2013-12-11 00:07:20 +00:00
|
|
|
err = clk_prepare_enable(bp->pclk);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
|
|
|
|
goto err_out_free_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = clk_prepare_enable(bp->hclk);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
|
|
|
|
goto err_out_disable_pclk;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2013-12-11 00:07:23 +00:00
|
|
|
if (!IS_ERR(bp->tx_clk)) {
|
|
|
|
err = clk_prepare_enable(bp->tx_clk);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
|
|
|
|
err);
|
|
|
|
goto err_out_disable_hclk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-11 00:07:21 +00:00
|
|
|
bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
|
2006-11-09 13:51:17 +00:00
|
|
|
if (!bp->regs) {
|
|
|
|
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out_disable_clocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->irq = platform_get_irq(pdev, 0);
|
2013-12-11 00:07:22 +00:00
|
|
|
err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
|
|
|
|
dev->name, dev);
|
2006-11-09 13:51:17 +00:00
|
|
|
if (err) {
|
2011-03-08 20:27:08 +00:00
|
|
|
dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
|
|
|
|
dev->irq, err);
|
2013-12-11 00:07:21 +00:00
|
|
|
goto err_out_disable_clocks;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2009-04-11 07:42:26 +00:00
|
|
|
dev->netdev_ops = &macb_netdev_ops;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
netif_napi_add(dev, &bp->napi, macb_poll, 64);
|
2006-11-09 13:51:17 +00:00
|
|
|
dev->ethtool_ops = &macb_ethtool_ops;
|
|
|
|
|
|
|
|
dev->base_addr = regs->start;
|
|
|
|
|
2014-07-24 11:50:58 +00:00
|
|
|
/* setup capacities */
|
|
|
|
macb_configure_caps(bp);
|
|
|
|
|
2013-06-04 21:57:12 +00:00
|
|
|
/* setup appropriated routines according to adapter type */
|
|
|
|
if (macb_is_gem(bp)) {
|
2014-07-24 11:50:59 +00:00
|
|
|
bp->max_tx_length = GEM_MAX_TX_LEN;
|
2013-06-04 21:57:12 +00:00
|
|
|
bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_init_rings = gem_init_rings;
|
|
|
|
bp->macbgem_ops.mog_rx = gem_rx;
|
|
|
|
} else {
|
2014-07-24 11:50:59 +00:00
|
|
|
bp->max_tx_length = MACB_MAX_TX_LEN;
|
2013-06-04 21:57:12 +00:00
|
|
|
bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_init_rings = macb_init_rings;
|
|
|
|
bp->macbgem_ops.mog_rx = macb_rx;
|
|
|
|
}
|
|
|
|
|
2014-07-24 11:50:59 +00:00
|
|
|
/* Set features */
|
|
|
|
dev->hw_features = NETIF_F_SG;
|
2014-07-24 11:51:00 +00:00
|
|
|
/* Checksum offload is only available on gem with packet buffer */
|
|
|
|
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
|
2014-07-24 11:51:01 +00:00
|
|
|
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
2014-07-24 11:50:59 +00:00
|
|
|
if (bp->caps & MACB_CAPS_SG_DISABLED)
|
|
|
|
dev->hw_features &= ~NETIF_F_SG;
|
|
|
|
dev->features = dev->hw_features;
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
/* Set MII management clock divider */
|
2011-03-09 16:22:54 +00:00
|
|
|
config = macb_mdc_clk_div(bp);
|
2011-03-09 16:29:59 +00:00
|
|
|
config |= macb_dbw(bp);
|
2006-11-09 13:51:17 +00:00
|
|
|
macb_writel(bp, NCFGR, config);
|
|
|
|
|
2013-04-02 09:35:09 +00:00
|
|
|
mac = of_get_mac_address(pdev->dev.of_node);
|
|
|
|
if (mac)
|
|
|
|
memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
|
|
|
|
else
|
2011-11-18 14:29:25 +00:00
|
|
|
macb_get_hwaddr(bp);
|
|
|
|
|
2013-04-02 09:35:09 +00:00
|
|
|
err = of_get_phy_mode(pdev->dev.of_node);
|
2011-11-18 14:29:25 +00:00
|
|
|
if (err < 0) {
|
2013-08-30 05:12:21 +00:00
|
|
|
pdata = dev_get_platdata(&pdev->dev);
|
2011-11-18 14:29:25 +00:00
|
|
|
if (pdata && pdata->is_rmii)
|
|
|
|
bp->phy_interface = PHY_INTERFACE_MODE_RMII;
|
|
|
|
else
|
|
|
|
bp->phy_interface = PHY_INTERFACE_MODE_MII;
|
|
|
|
} else {
|
|
|
|
bp->phy_interface = err;
|
|
|
|
}
|
2007-07-12 17:07:24 +00:00
|
|
|
|
2012-10-31 06:04:50 +00:00
|
|
|
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
|
|
|
|
macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
|
|
|
|
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
|
2007-02-07 15:40:44 +00:00
|
|
|
#if defined(CONFIG_ARCH_AT91)
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
|
|
|
|
MACB_BIT(CLKEN)));
|
2007-02-07 15:40:44 +00:00
|
|
|
#else
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, USRIO, 0);
|
2007-02-07 15:40:44 +00:00
|
|
|
#endif
|
2006-11-09 13:51:17 +00:00
|
|
|
else
|
2007-02-07 15:40:44 +00:00
|
|
|
#if defined(CONFIG_ARCH_AT91)
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
|
2007-02-07 15:40:44 +00:00
|
|
|
#else
|
2011-11-08 10:12:32 +00:00
|
|
|
macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
|
2007-02-07 15:40:44 +00:00
|
|
|
#endif
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
err = register_netdev(dev);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
|
2013-12-11 00:07:22 +00:00
|
|
|
goto err_out_disable_clocks;
|
2006-11-09 13:51:17 +00:00
|
|
|
}
|
|
|
|
|
2013-04-14 22:04:33 +00:00
|
|
|
err = macb_mii_init(bp);
|
|
|
|
if (err)
|
2007-07-12 17:07:24 +00:00
|
|
|
goto err_out_unregister_netdev;
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
platform_set_drvdata(pdev, dev);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2012-07-03 23:14:13 +00:00
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2014-09-12 23:57:49 +00:00
|
|
|
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
|
|
|
|
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
|
|
|
|
dev->base_addr, dev->irq, dev->dev_addr);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
phydev = bp->phy_dev;
|
2011-03-08 20:27:08 +00:00
|
|
|
netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
|
|
|
|
phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
|
2007-07-12 17:07:24 +00:00
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
return 0;
|
|
|
|
|
2007-07-12 17:07:24 +00:00
|
|
|
err_out_unregister_netdev:
|
|
|
|
unregister_netdev(dev);
|
2006-11-09 13:51:17 +00:00
|
|
|
err_out_disable_clocks:
|
2013-12-11 00:07:23 +00:00
|
|
|
if (!IS_ERR(bp->tx_clk))
|
|
|
|
clk_disable_unprepare(bp->tx_clk);
|
|
|
|
err_out_disable_hclk:
|
2013-03-27 23:07:07 +00:00
|
|
|
clk_disable_unprepare(bp->hclk);
|
2013-12-11 00:07:20 +00:00
|
|
|
err_out_disable_pclk:
|
2013-03-27 23:07:07 +00:00
|
|
|
clk_disable_unprepare(bp->pclk);
|
2006-11-09 13:51:17 +00:00
|
|
|
err_out_free_dev:
|
|
|
|
free_netdev(dev);
|
|
|
|
err_out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-01-31 12:10:22 +00:00
|
|
|
static int __exit macb_remove(struct platform_device *pdev)
|
2006-11-09 13:51:17 +00:00
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macb *bp;
|
|
|
|
|
|
|
|
dev = platform_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (dev) {
|
|
|
|
bp = netdev_priv(dev);
|
2008-04-10 14:30:07 +00:00
|
|
|
if (bp->phy_dev)
|
|
|
|
phy_disconnect(bp->phy_dev);
|
2008-10-08 23:29:57 +00:00
|
|
|
mdiobus_unregister(bp->mii_bus);
|
|
|
|
kfree(bp->mii_bus->irq);
|
|
|
|
mdiobus_free(bp->mii_bus);
|
2006-11-09 13:51:17 +00:00
|
|
|
unregister_netdev(dev);
|
2013-12-11 00:07:23 +00:00
|
|
|
if (!IS_ERR(bp->tx_clk))
|
|
|
|
clk_disable_unprepare(bp->tx_clk);
|
2013-03-27 23:07:07 +00:00
|
|
|
clk_disable_unprepare(bp->hclk);
|
|
|
|
clk_disable_unprepare(bp->pclk);
|
2006-11-09 13:51:17 +00:00
|
|
|
free_netdev(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-04 12:39:29 +00:00
|
|
|
#ifdef CONFIG_PM
|
2013-12-11 00:07:19 +00:00
|
|
|
static int macb_suspend(struct device *dev)
|
2008-03-04 12:39:29 +00:00
|
|
|
{
|
2013-12-11 00:07:19 +00:00
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
2008-03-04 12:39:29 +00:00
|
|
|
struct net_device *netdev = platform_get_drvdata(pdev);
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
2012-07-03 23:14:13 +00:00
|
|
|
netif_carrier_off(netdev);
|
2008-03-04 12:39:29 +00:00
|
|
|
netif_device_detach(netdev);
|
|
|
|
|
2013-12-11 00:07:23 +00:00
|
|
|
if (!IS_ERR(bp->tx_clk))
|
|
|
|
clk_disable_unprepare(bp->tx_clk);
|
2013-03-27 23:07:07 +00:00
|
|
|
clk_disable_unprepare(bp->hclk);
|
|
|
|
clk_disable_unprepare(bp->pclk);
|
2008-03-04 12:39:29 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-11 00:07:19 +00:00
|
|
|
static int macb_resume(struct device *dev)
|
2008-03-04 12:39:29 +00:00
|
|
|
{
|
2013-12-11 00:07:19 +00:00
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
2008-03-04 12:39:29 +00:00
|
|
|
struct net_device *netdev = platform_get_drvdata(pdev);
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
2013-03-27 23:07:07 +00:00
|
|
|
clk_prepare_enable(bp->pclk);
|
|
|
|
clk_prepare_enable(bp->hclk);
|
2013-12-11 00:07:23 +00:00
|
|
|
if (!IS_ERR(bp->tx_clk))
|
|
|
|
clk_prepare_enable(bp->tx_clk);
|
2008-03-04 12:39:29 +00:00
|
|
|
|
|
|
|
netif_device_attach(netdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-12-11 00:07:19 +00:00
|
|
|
static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
|
|
|
|
|
2006-11-09 13:51:17 +00:00
|
|
|
static struct platform_driver macb_driver = {
|
2008-01-31 12:10:22 +00:00
|
|
|
.remove = __exit_p(macb_remove),
|
2006-11-09 13:51:17 +00:00
|
|
|
.driver = {
|
|
|
|
.name = "macb",
|
2008-04-18 20:50:44 +00:00
|
|
|
.owner = THIS_MODULE,
|
2011-11-18 14:29:25 +00:00
|
|
|
.of_match_table = of_match_ptr(macb_dt_ids),
|
2013-12-11 00:07:19 +00:00
|
|
|
.pm = &macb_pm_ops,
|
2006-11-09 13:51:17 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2013-03-04 16:43:18 +00:00
|
|
|
module_platform_driver_probe(macb_driver, macb_probe);
|
2006-11-09 13:51:17 +00:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2011-11-08 10:12:32 +00:00
|
|
|
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
|
2011-05-18 14:49:24 +00:00
|
|
|
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
|
2008-04-18 20:50:44 +00:00
|
|
|
MODULE_ALIAS("platform:macb");
|