diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 412c0340fed9..24bf7f68375f 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -105,43 +105,6 @@ const char gfar_driver_version[] = "2.0"; -static int gfar_enet_open(struct net_device *dev); -static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); -static void gfar_reset_task(struct work_struct *work); -static void gfar_timeout(struct net_device *dev); -static int gfar_close(struct net_device *dev); -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, - int alloc_cnt); -static int gfar_set_mac_address(struct net_device *dev); -static int gfar_change_mtu(struct net_device *dev, int new_mtu); -static irqreturn_t gfar_error(int irq, void *dev_id); -static irqreturn_t gfar_transmit(int irq, void *dev_id); -static irqreturn_t gfar_interrupt(int irq, void *dev_id); -static void adjust_link(struct net_device *dev); -static noinline void gfar_update_link_state(struct gfar_private *priv); -static int init_phy(struct net_device *dev); -static int gfar_probe(struct platform_device *ofdev); -static int gfar_remove(struct platform_device *ofdev); -static void free_skb_resources(struct gfar_private *priv); -static void gfar_set_multi(struct net_device *dev); -static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); -static void gfar_configure_serdes(struct net_device *dev); -static int gfar_poll_rx(struct napi_struct *napi, int budget); -static int gfar_poll_tx(struct napi_struct *napi, int budget); -static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); -static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void gfar_netpoll(struct net_device *dev); -#endif -int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); -static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); -static void gfar_halt_nodisable(struct gfar_private *priv); -static void gfar_clear_exact_match(struct net_device *dev); -static void gfar_set_mac_for_addr(struct net_device *dev, int num, - const u8 *addr); -static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); - MODULE_AUTHOR("Freescale Semiconductor, Inc"); MODULE_DESCRIPTION("Gianfar Ethernet Driver"); MODULE_LICENSE("GPL"); @@ -162,138 +125,6 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, bdp->lstatus = cpu_to_be32(lstatus); } -static void gfar_init_bds(struct net_device *ndev) -{ - struct gfar_private *priv = netdev_priv(ndev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - struct gfar_priv_tx_q *tx_queue = NULL; - struct gfar_priv_rx_q *rx_queue = NULL; - struct txbd8 *txbdp; - u32 __iomem *rfbptr; - int i, j; - - for (i = 0; i < priv->num_tx_queues; i++) { - tx_queue = priv->tx_queue[i]; - /* Initialize some variables in our dev structure */ - tx_queue->num_txbdfree = tx_queue->tx_ring_size; - tx_queue->dirty_tx = tx_queue->tx_bd_base; - tx_queue->cur_tx = tx_queue->tx_bd_base; - tx_queue->skb_curtx = 0; - tx_queue->skb_dirtytx = 0; - - /* Initialize Transmit Descriptor Ring */ - txbdp = tx_queue->tx_bd_base; - for (j = 0; j < tx_queue->tx_ring_size; j++) { - txbdp->lstatus = 0; - txbdp->bufPtr = 0; - txbdp++; - } - - /* Set the last descriptor in the ring to indicate wrap */ - txbdp--; - txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | - TXBD_WRAP); - } - - rfbptr = ®s->rfbptr0; - for (i = 0; i < priv->num_rx_queues; i++) { - rx_queue = priv->rx_queue[i]; - - rx_queue->next_to_clean = 0; - rx_queue->next_to_use = 0; - rx_queue->next_to_alloc = 0; - - /* make sure next_to_clean != next_to_use after this - * by leaving at least 1 unused descriptor - */ - gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); - - rx_queue->rfbptr = rfbptr; - rfbptr += 2; - } -} - -static int gfar_alloc_skb_resources(struct net_device *ndev) -{ - void *vaddr; - dma_addr_t addr; - int i, j; - struct gfar_private *priv = netdev_priv(ndev); - struct device *dev = priv->dev; - struct gfar_priv_tx_q *tx_queue = NULL; - struct gfar_priv_rx_q *rx_queue = NULL; - - priv->total_tx_ring_size = 0; - for (i = 0; i < priv->num_tx_queues; i++) - priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; - - priv->total_rx_ring_size = 0; - for (i = 0; i < priv->num_rx_queues; i++) - priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; - - /* Allocate memory for the buffer descriptors */ - vaddr = dma_alloc_coherent(dev, - (priv->total_tx_ring_size * - sizeof(struct txbd8)) + - (priv->total_rx_ring_size * - sizeof(struct rxbd8)), - &addr, GFP_KERNEL); - if (!vaddr) - return -ENOMEM; - - for (i = 0; i < priv->num_tx_queues; i++) { - tx_queue = priv->tx_queue[i]; - tx_queue->tx_bd_base = vaddr; - tx_queue->tx_bd_dma_base = addr; - tx_queue->dev = ndev; - /* enet DMA only understands physical addresses */ - addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; - vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; - } - - /* Start the rx descriptor ring where the tx ring leaves off */ - for (i = 0; i < priv->num_rx_queues; i++) { - rx_queue = priv->rx_queue[i]; - rx_queue->rx_bd_base = vaddr; - rx_queue->rx_bd_dma_base = addr; - rx_queue->ndev = ndev; - rx_queue->dev = dev; - addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; - vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; - } - - /* Setup the skbuff rings */ - for (i = 0; i < priv->num_tx_queues; i++) { - tx_queue = priv->tx_queue[i]; - tx_queue->tx_skbuff = - kmalloc_array(tx_queue->tx_ring_size, - sizeof(*tx_queue->tx_skbuff), - GFP_KERNEL); - if (!tx_queue->tx_skbuff) - goto cleanup; - - for (j = 0; j < tx_queue->tx_ring_size; j++) - tx_queue->tx_skbuff[j] = NULL; - } - - for (i = 0; i < priv->num_rx_queues; i++) { - rx_queue = priv->rx_queue[i]; - rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, - sizeof(*rx_queue->rx_buff), - GFP_KERNEL); - if (!rx_queue->rx_buff) - goto cleanup; - } - - gfar_init_bds(ndev); - - return 0; - -cleanup: - free_skb_resources(priv); - return -ENOMEM; -} - static void gfar_init_tx_rx_base(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; @@ -444,7 +275,7 @@ static void gfar_configure_coalescing(struct gfar_private *priv, } } -void gfar_configure_coalescing_all(struct gfar_private *priv) +static void gfar_configure_coalescing_all(struct gfar_private *priv) { gfar_configure_coalescing(priv, 0xFF, 0xFF); } @@ -477,6 +308,62 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) return &dev->stats; } +/* Set the appropriate hash bit for the given addr */ +/* The algorithm works like so: + * 1) Take the Destination Address (ie the multicast address), and + * do a CRC on it (little endian), and reverse the bits of the + * result. + * 2) Use the 8 most significant bits as a hash into a 256-entry + * table. The table is controlled through 8 32-bit registers: + * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is + * gaddr7. This means that the 3 most significant bits in the + * hash index which gaddr register to use, and the 5 other bits + * indicate which bit (assuming an IBM numbering scheme, which + * for PowerPC (tm) is usually the case) in the register holds + * the entry. + */ +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) +{ + u32 tempval; + struct gfar_private *priv = netdev_priv(dev); + u32 result = ether_crc(ETH_ALEN, addr); + int width = priv->hash_width; + u8 whichbit = (result >> (32 - width)) & 0x1f; + u8 whichreg = result >> (32 - width + 5); + u32 value = (1 << (31-whichbit)); + + tempval = gfar_read(priv->hash_regs[whichreg]); + tempval |= value; + gfar_write(priv->hash_regs[whichreg], tempval); +} + +/* There are multiple MAC Address register pairs on some controllers + * This function sets the numth pair to a given address + */ +static void gfar_set_mac_for_addr(struct net_device *dev, int num, + const u8 *addr) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + u32 __iomem *macptr = ®s->macstnaddr1; + + macptr += num*2; + + /* For a station address of 0x12345678ABCD in transmission + * order (BE), MACnADDR1 is set to 0xCDAB7856 and + * MACnADDR2 is set to 0x34120000. + */ + tempval = (addr[5] << 24) | (addr[4] << 16) | + (addr[3] << 8) | addr[2]; + + gfar_write(macptr, tempval); + + tempval = (addr[1] << 24) | (addr[0] << 16); + + gfar_write(macptr+1, tempval); +} + static int gfar_set_mac_addr(struct net_device *dev, void *p) { eth_mac_addr(dev, p); @@ -486,24 +373,6 @@ static int gfar_set_mac_addr(struct net_device *dev, void *p) return 0; } -static const struct net_device_ops gfar_netdev_ops = { - .ndo_open = gfar_enet_open, - .ndo_start_xmit = gfar_start_xmit, - .ndo_stop = gfar_close, - .ndo_change_mtu = gfar_change_mtu, - .ndo_set_features = gfar_set_features, - .ndo_set_rx_mode = gfar_set_multi, - .ndo_tx_timeout = gfar_timeout, - .ndo_do_ioctl = gfar_ioctl, - .ndo_get_stats = gfar_get_stats, - .ndo_change_carrier = fixed_phy_change_carrier, - .ndo_set_mac_address = gfar_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = gfar_netpoll, -#endif -}; - static void gfar_ints_disable(struct gfar_private *priv) { int i; @@ -723,10 +592,53 @@ static int gfar_of_group_count(struct device_node *np) return num; } +/* Reads the controller's registers to determine what interface + * connects it to the PHY. + */ +static phy_interface_t gfar_get_interface(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 ecntrl; + + ecntrl = gfar_read(®s->ecntrl); + + if (ecntrl & ECNTRL_SGMII_MODE) + return PHY_INTERFACE_MODE_SGMII; + + if (ecntrl & ECNTRL_TBI_MODE) { + if (ecntrl & ECNTRL_REDUCED_MODE) + return PHY_INTERFACE_MODE_RTBI; + else + return PHY_INTERFACE_MODE_TBI; + } + + if (ecntrl & ECNTRL_REDUCED_MODE) { + if (ecntrl & ECNTRL_REDUCED_MII_MODE) { + return PHY_INTERFACE_MODE_RMII; + } + else { + phy_interface_t interface = priv->interface; + + /* This isn't autodetected right now, so it must + * be set by the device tree or platform code. + */ + if (interface == PHY_INTERFACE_MODE_RGMII_ID) + return PHY_INTERFACE_MODE_RGMII_ID; + + return PHY_INTERFACE_MODE_RGMII; + } + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) + return PHY_INTERFACE_MODE_GMII; + + return PHY_INTERFACE_MODE_MII; +} + static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) { const char *model; - const char *ctype; const void *mac_addr; int err = 0, i; struct net_device *dev = NULL; @@ -889,13 +801,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_TIMER | FSL_GIANFAR_DEV_HAS_RX_FILER; - err = of_property_read_string(np, "phy-connection-type", &ctype); - - /* We only care about rgmii-id. The rest are autodetected */ - if (err == 0 && !strcmp(ctype, "rgmii-id")) - priv->interface = PHY_INTERFACE_MODE_RGMII_ID; + /* Use PHY connection type from the DT node if one is specified there. + * rgmii-id really needs to be specified. Other types can be + * detected by hardware + */ + err = of_get_phy_mode(np); + if (err >= 0) + priv->interface = err; else - priv->interface = PHY_INTERFACE_MODE_MII; + priv->interface = gfar_get_interface(dev); if (of_find_property(np, "fsl,magic-packet", NULL)) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; @@ -931,85 +845,6 @@ tx_alloc_failed: return err; } -static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) -{ - struct hwtstamp_config config; - struct gfar_private *priv = netdev_priv(netdev); - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - priv->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) - return -ERANGE; - priv->hwts_tx_en = 1; - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - if (priv->hwts_rx_en) { - priv->hwts_rx_en = 0; - reset_gfar(netdev); - } - break; - default: - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) - return -ERANGE; - if (!priv->hwts_rx_en) { - priv->hwts_rx_en = 1; - reset_gfar(netdev); - } - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - } - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) -{ - struct hwtstamp_config config; - struct gfar_private *priv = netdev_priv(netdev); - - config.flags = 0; - config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = (priv->hwts_rx_en ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!netif_running(dev)) - return -EINVAL; - - if (cmd == SIOCSHWTSTAMP) - return gfar_hwtstamp_set(dev, rq); - if (cmd == SIOCGHWTSTAMP) - return gfar_hwtstamp_get(dev, rq); - - if (!phydev) - return -ENODEV; - - return phy_mii_ioctl(phydev, rq, cmd); -} - static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, u32 class) { @@ -1133,6 +968,2183 @@ static void gfar_detect_errata(struct gfar_private *priv) priv->errata); } +static void gfar_init_addr_hash_table(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { + priv->extended_hash = 1; + priv->hash_width = 9; + + priv->hash_regs[0] = ®s->igaddr0; + priv->hash_regs[1] = ®s->igaddr1; + priv->hash_regs[2] = ®s->igaddr2; + priv->hash_regs[3] = ®s->igaddr3; + priv->hash_regs[4] = ®s->igaddr4; + priv->hash_regs[5] = ®s->igaddr5; + priv->hash_regs[6] = ®s->igaddr6; + priv->hash_regs[7] = ®s->igaddr7; + priv->hash_regs[8] = ®s->gaddr0; + priv->hash_regs[9] = ®s->gaddr1; + priv->hash_regs[10] = ®s->gaddr2; + priv->hash_regs[11] = ®s->gaddr3; + priv->hash_regs[12] = ®s->gaddr4; + priv->hash_regs[13] = ®s->gaddr5; + priv->hash_regs[14] = ®s->gaddr6; + priv->hash_regs[15] = ®s->gaddr7; + + } else { + priv->extended_hash = 0; + priv->hash_width = 8; + + priv->hash_regs[0] = ®s->gaddr0; + priv->hash_regs[1] = ®s->gaddr1; + priv->hash_regs[2] = ®s->gaddr2; + priv->hash_regs[3] = ®s->gaddr3; + priv->hash_regs[4] = ®s->gaddr4; + priv->hash_regs[5] = ®s->gaddr5; + priv->hash_regs[6] = ®s->gaddr6; + priv->hash_regs[7] = ®s->gaddr7; + } +} + +static int __gfar_is_rx_idle(struct gfar_private *priv) +{ + u32 res; + + /* Normaly TSEC should not hang on GRS commands, so we should + * actually wait for IEVENT_GRSC flag. + */ + if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) + return 0; + + /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are + * the same as bits 23-30, the eTSEC Rx is assumed to be idle + * and the Rx can be safely reset. + */ + res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); + res &= 0x7f807f80; + if ((res & 0xffff) == (res >> 16)) + return 1; + + return 0; +} + +/* Halt the receive and transmit queues */ +static void gfar_halt_nodisable(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + unsigned int timeout; + int stopped; + + gfar_ints_disable(priv); + + if (gfar_is_dma_stopped(priv)) + return; + + /* Stop the DMA, and wait for it to stop */ + tempval = gfar_read(®s->dmactrl); + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(®s->dmactrl, tempval); + +retry: + timeout = 1000; + while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { + cpu_relax(); + timeout--; + } + + if (!timeout) + stopped = gfar_is_dma_stopped(priv); + + if (!stopped && !gfar_is_rx_dma_stopped(priv) && + !__gfar_is_rx_idle(priv)) + goto retry; +} + +/* Halt the receive and transmit queues */ +static void gfar_halt(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + + /* Dissable the Rx/Tx hw queues */ + gfar_write(®s->rqueue, 0); + gfar_write(®s->tqueue, 0); + + mdelay(10); + + gfar_halt_nodisable(priv); + + /* Disable Rx/Tx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); +} + +static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) +{ + struct txbd8 *txbdp; + struct gfar_private *priv = netdev_priv(tx_queue->dev); + int i, j; + + txbdp = tx_queue->tx_bd_base; + + for (i = 0; i < tx_queue->tx_ring_size; i++) { + if (!tx_queue->tx_skbuff[i]) + continue; + + dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), DMA_TO_DEVICE); + txbdp->lstatus = 0; + for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; + j++) { + txbdp++; + dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), + DMA_TO_DEVICE); + } + txbdp++; + dev_kfree_skb_any(tx_queue->tx_skbuff[i]); + tx_queue->tx_skbuff[i] = NULL; + } + kfree(tx_queue->tx_skbuff); + tx_queue->tx_skbuff = NULL; +} + +static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) +{ + int i; + + struct rxbd8 *rxbdp = rx_queue->rx_bd_base; + + dev_kfree_skb(rx_queue->skb); + + for (i = 0; i < rx_queue->rx_ring_size; i++) { + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; + + rxbdp->lstatus = 0; + rxbdp->bufPtr = 0; + rxbdp++; + + if (!rxb->page) + continue; + + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rxb->page); + + rxb->page = NULL; + } + + kfree(rx_queue->rx_buff); + rx_queue->rx_buff = NULL; +} + +/* If there are any tx skbs or rx skbs still around, free them. + * Then free tx_skbuff and rx_skbuff + */ +static void free_skb_resources(struct gfar_private *priv) +{ + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; + + /* Go through all the buffer descriptors and free their data buffers */ + for (i = 0; i < priv->num_tx_queues; i++) { + struct netdev_queue *txq; + + tx_queue = priv->tx_queue[i]; + txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); + if (tx_queue->tx_skbuff) + free_skb_tx_queue(tx_queue); + netdev_tx_reset_queue(txq); + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + if (rx_queue->rx_buff) + free_skb_rx_queue(rx_queue); + } + + dma_free_coherent(priv->dev, + sizeof(struct txbd8) * priv->total_tx_ring_size + + sizeof(struct rxbd8) * priv->total_rx_ring_size, + priv->tx_queue[0]->tx_bd_base, + priv->tx_queue[0]->tx_bd_dma_base); +} + +void stop_gfar(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + netif_tx_stop_all_queues(dev); + + smp_mb__before_atomic(); + set_bit(GFAR_DOWN, &priv->state); + smp_mb__after_atomic(); + + disable_napi(priv); + + /* disable ints and gracefully shut down Rx/Tx DMA */ + gfar_halt(priv); + + phy_stop(dev->phydev); + + free_skb_resources(priv); +} + +static void gfar_start(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + int i = 0; + + /* Enable Rx/Tx hw queues */ + gfar_write(®s->rqueue, priv->rqueue); + gfar_write(®s->tqueue, priv->tqueue); + + /* Initialize DMACTRL to have WWR and WOP */ + tempval = gfar_read(®s->dmactrl); + tempval |= DMACTRL_INIT_SETTINGS; + gfar_write(®s->dmactrl, tempval); + + /* Make sure we aren't stopped */ + tempval = gfar_read(®s->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(®s->dmactrl, tempval); + + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear THLT/RHLT, so that the DMA starts polling now */ + gfar_write(®s->tstat, priv->gfargrp[i].tstat); + gfar_write(®s->rstat, priv->gfargrp[i].rstat); + } + + /* Enable Rx/Tx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); + + gfar_ints_enable(priv); + + netif_trans_update(priv->ndev); /* prevent tx timeout */ +} + +static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) +{ + struct page *page; + dma_addr_t addr; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rxq->dev, addr))) { + __free_page(page); + + return false; + } + + rxb->dma = addr; + rxb->page = page; + rxb->page_offset = 0; + + return true; +} + +static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) +{ + struct gfar_private *priv = netdev_priv(rx_queue->ndev); + struct gfar_extra_stats *estats = &priv->extra_stats; + + netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); + atomic64_inc(&estats->rx_alloc_err); +} + +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt) +{ + struct rxbd8 *bdp; + struct gfar_rx_buff *rxb; + int i; + + i = rx_queue->next_to_use; + bdp = &rx_queue->rx_bd_base[i]; + rxb = &rx_queue->rx_buff[i]; + + while (alloc_cnt--) { + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!gfar_new_page(rx_queue, rxb))) { + gfar_rx_alloc_err(rx_queue); + break; + } + } + + /* Setup the new RxBD */ + gfar_init_rxbdp(rx_queue, bdp, + rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); + + /* Update to the next pointer */ + bdp++; + rxb++; + + if (unlikely(++i == rx_queue->rx_ring_size)) { + i = 0; + bdp = rx_queue->rx_bd_base; + rxb = rx_queue->rx_buff; + } + } + + rx_queue->next_to_use = i; + rx_queue->next_to_alloc = i; +} + +static void gfar_init_bds(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + struct txbd8 *txbdp; + u32 __iomem *rfbptr; + int i, j; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + /* Initialize some variables in our dev structure */ + tx_queue->num_txbdfree = tx_queue->tx_ring_size; + tx_queue->dirty_tx = tx_queue->tx_bd_base; + tx_queue->cur_tx = tx_queue->tx_bd_base; + tx_queue->skb_curtx = 0; + tx_queue->skb_dirtytx = 0; + + /* Initialize Transmit Descriptor Ring */ + txbdp = tx_queue->tx_bd_base; + for (j = 0; j < tx_queue->tx_ring_size; j++) { + txbdp->lstatus = 0; + txbdp->bufPtr = 0; + txbdp++; + } + + /* Set the last descriptor in the ring to indicate wrap */ + txbdp--; + txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | + TXBD_WRAP); + } + + rfbptr = ®s->rfbptr0; + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + + rx_queue->next_to_clean = 0; + rx_queue->next_to_use = 0; + rx_queue->next_to_alloc = 0; + + /* make sure next_to_clean != next_to_use after this + * by leaving at least 1 unused descriptor + */ + gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); + + rx_queue->rfbptr = rfbptr; + rfbptr += 2; + } +} + +static int gfar_alloc_skb_resources(struct net_device *ndev) +{ + void *vaddr; + dma_addr_t addr; + int i, j; + struct gfar_private *priv = netdev_priv(ndev); + struct device *dev = priv->dev; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + + priv->total_tx_ring_size = 0; + for (i = 0; i < priv->num_tx_queues; i++) + priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; + + priv->total_rx_ring_size = 0; + for (i = 0; i < priv->num_rx_queues; i++) + priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; + + /* Allocate memory for the buffer descriptors */ + vaddr = dma_alloc_coherent(dev, + (priv->total_tx_ring_size * + sizeof(struct txbd8)) + + (priv->total_rx_ring_size * + sizeof(struct rxbd8)), + &addr, GFP_KERNEL); + if (!vaddr) + return -ENOMEM; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_bd_base = vaddr; + tx_queue->tx_bd_dma_base = addr; + tx_queue->dev = ndev; + /* enet DMA only understands physical addresses */ + addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; + vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; + } + + /* Start the rx descriptor ring where the tx ring leaves off */ + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_bd_base = vaddr; + rx_queue->rx_bd_dma_base = addr; + rx_queue->ndev = ndev; + rx_queue->dev = dev; + addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; + vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; + } + + /* Setup the skbuff rings */ + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_skbuff = + kmalloc_array(tx_queue->tx_ring_size, + sizeof(*tx_queue->tx_skbuff), + GFP_KERNEL); + if (!tx_queue->tx_skbuff) + goto cleanup; + + for (j = 0; j < tx_queue->tx_ring_size; j++) + tx_queue->tx_skbuff[j] = NULL; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_buff), + GFP_KERNEL); + if (!rx_queue->rx_buff) + goto cleanup; + } + + gfar_init_bds(ndev); + + return 0; + +cleanup: + free_skb_resources(priv); + return -ENOMEM; +} + +/* Bring the controller up and running */ +int startup_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + int err; + + gfar_mac_reset(priv); + + err = gfar_alloc_skb_resources(ndev); + if (err) + return err; + + gfar_init_tx_rx_base(priv); + + smp_mb__before_atomic(); + clear_bit(GFAR_DOWN, &priv->state); + smp_mb__after_atomic(); + + /* Start Rx/Tx DMA and enable the interrupts */ + gfar_start(priv); + + /* force link state update after mac reset */ + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + phy_start(ndev->phydev); + + enable_napi(priv); + + netif_tx_wake_all_queues(ndev); + + return 0; +} + +static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) +{ + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; + u32 val = 0; + + if (!phydev->duplex) + return val; + + if (!priv->pause_aneg_en) { + if (priv->tx_pause_en) + val |= MACCFG1_TX_FLOW; + if (priv->rx_pause_en) + val |= MACCFG1_RX_FLOW; + } else { + u16 lcl_adv, rmt_adv; + u8 flowctrl; + /* get link partner capabilities */ + rmt_adv = 0; + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (flowctrl & FLOW_CTRL_TX) + val |= MACCFG1_TX_FLOW; + if (flowctrl & FLOW_CTRL_RX) + val |= MACCFG1_RX_FLOW; + } + + return val; +} + +static noinline void gfar_update_link_state(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; + + if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) + return; + + if (phydev->link) { + u32 tempval1 = gfar_read(®s->maccfg1); + u32 tempval = gfar_read(®s->maccfg2); + u32 ecntrl = gfar_read(®s->ecntrl); + u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); + + if (phydev->duplex != priv->oldduplex) { + if (!(phydev->duplex)) + tempval &= ~(MACCFG2_FULL_DUPLEX); + else + tempval |= MACCFG2_FULL_DUPLEX; + + priv->oldduplex = phydev->duplex; + } + + if (phydev->speed != priv->oldspeed) { + switch (phydev->speed) { + case 1000: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + + ecntrl &= ~(ECNTRL_R100); + break; + case 100: + case 10: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + + /* Reduced mode distinguishes + * between 10 and 100 + */ + if (phydev->speed == SPEED_100) + ecntrl |= ECNTRL_R100; + else + ecntrl &= ~(ECNTRL_R100); + break; + default: + netif_warn(priv, link, priv->ndev, + "Ack! Speed (%d) is not 10/100/1000!\n", + phydev->speed); + break; + } + + priv->oldspeed = phydev->speed; + } + + tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + tempval1 |= gfar_get_flowctrl_cfg(priv); + + /* Turn last free buffer recording on */ + if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { + for (i = 0; i < priv->num_rx_queues; i++) { + u32 bdp_dma; + + rx_queue = priv->rx_queue[i]; + bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + gfar_write(rx_queue->rfbptr, bdp_dma); + } + + priv->tx_actual_en = 1; + } + + if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) + priv->tx_actual_en = 0; + + gfar_write(®s->maccfg1, tempval1); + gfar_write(®s->maccfg2, tempval); + gfar_write(®s->ecntrl, ecntrl); + + if (!priv->oldlink) + priv->oldlink = 1; + + } else if (priv->oldlink) { + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (netif_msg_link(priv)) + phy_print_status(phydev); +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the phydev structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void adjust_link(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + + if (unlikely(phydev->link != priv->oldlink || + (phydev->link && (phydev->duplex != priv->oldduplex || + phydev->speed != priv->oldspeed)))) + gfar_update_link_state(priv); +} + +/* Initialize TBI PHY interface for communicating with the + * SERDES lynx PHY on the chip. We communicate with this PHY + * through the MDIO bus on each controller, treating it as a + * "normal" PHY at the address found in the TBIPA register. We assume + * that the TBIPA register is valid. Either the MDIO bus code will set + * it to a value that doesn't conflict with other PHYs on the bus, or the + * value doesn't matter, as there are no other PHYs on the bus. + */ +static void gfar_configure_serdes(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *tbiphy; + + if (!priv->tbi_node) { + dev_warn(&dev->dev, "error: SGMII mode requires that the " + "device tree specify a tbi-handle\n"); + return; + } + + tbiphy = of_phy_find_device(priv->tbi_node); + if (!tbiphy) { + dev_err(&dev->dev, "error: Could not get TBI device\n"); + return; + } + + /* If the link is already up, we must already be ok, and don't need to + * configure and reset the TBI<->SerDes link. Maybe U-Boot configured + * everything for us? Resetting it takes the link down and requires + * several seconds for it to come back. + */ + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { + put_device(&tbiphy->mdio.dev); + return; + } + + /* Single clk mode, mii mode off(for serdes communication) */ + phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); + + phy_write(tbiphy, MII_ADVERTISE, + ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM); + + phy_write(tbiphy, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | + BMCR_SPEED1000); + + put_device(&tbiphy->mdio.dev); +} + +/* Initializes driver's PHY state, and attaches to the PHY. + * Returns 0 on success. + */ +static int init_phy(struct net_device *dev) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct gfar_private *priv = netdev_priv(dev); + phy_interface_t interface = priv->interface; + struct phy_device *phydev; + struct ethtool_eee edata; + + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, + interface); + if (!phydev) { + dev_err(&dev->dev, "could not attach to PHY\n"); + return -ENODEV; + } + + if (interface == PHY_INTERFACE_MODE_SGMII) + gfar_configure_serdes(dev); + + /* Remove any features not supported by the controller */ + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); + + /* Add support for flow control */ + phy_support_asym_pause(phydev); + + /* disable EEE autoneg, EEE not supported by eTSEC */ + memset(&edata, 0, sizeof(struct ethtool_eee)); + phy_ethtool_set_eee(phydev, &edata); + + return 0; +} + +static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) +{ + struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); + + memset(fcb, 0, GMAC_FCB_LEN); + + return fcb; +} + +static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, + int fcb_length) +{ + /* If we're here, it's a IP packet with a TCP or UDP + * payload. We set it to checksum, using a pseudo-header + * we provide + */ + u8 flags = TXFCB_DEFAULT; + + /* Tell the controller what the protocol is + * And provide the already calculated phcs + */ + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { + flags |= TXFCB_UDP; + fcb->phcs = (__force __be16)(udp_hdr(skb)->check); + } else + fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); + + /* l3os is the distance between the start of the + * frame (skb->data) and the start of the IP hdr. + * l4os is the distance between the start of the + * l3 hdr and the l4 hdr + */ + fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); + fcb->l4os = skb_network_header_len(skb); + + fcb->flags = flags; +} + +static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) +{ + fcb->flags |= TXFCB_VLN; + fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); +} + +static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, + struct txbd8 *base, int ring_size) +{ + struct txbd8 *new_bd = bdp + stride; + + return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; +} + +static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, + int ring_size) +{ + return skip_txbd(bdp, 1, base, ring_size); +} + +/* eTSEC12: csum generation not supported for some fcb offsets */ +static inline bool gfar_csum_errata_12(struct gfar_private *priv, + unsigned long fcb_addr) +{ + return (gfar_has_errata(priv, GFAR_ERRATA_12) && + (fcb_addr % 0x20) > 0x18); +} + +/* eTSEC76: csum generation for frames larger than 2500 may + * cause excess delays before start of transmission + */ +static inline bool gfar_csum_errata_76(struct gfar_private *priv, + unsigned int len) +{ + return (gfar_has_errata(priv, GFAR_ERRATA_76) && + (len > 2500)); +} + +/* This is called by the kernel when a frame is ready for transmission. + * It is pointed to by the dev->hard_start_xmit function pointer + */ +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct netdev_queue *txq; + struct gfar __iomem *regs = NULL; + struct txfcb *fcb = NULL; + struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; + u32 lstatus; + skb_frag_t *frag; + int i, rq = 0; + int do_tstamp, do_csum, do_vlan; + u32 bufaddr; + unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; + + rq = skb->queue_mapping; + tx_queue = priv->tx_queue[rq]; + txq = netdev_get_tx_queue(dev, rq); + base = tx_queue->tx_bd_base; + regs = tx_queue->grp->regs; + + do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); + do_vlan = skb_vlan_tag_present(skb); + do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en; + + if (do_csum || do_vlan) + fcb_len = GMAC_FCB_LEN; + + /* check if time stamp should be generated */ + if (unlikely(do_tstamp)) + fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; + + /* make space for additional header when fcb is needed */ + if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, fcb_len); + if (!skb_new) { + dev->stats.tx_errors++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->sk) + skb_set_owner_w(skb_new, skb->sk); + dev_consume_skb_any(skb); + skb = skb_new; + } + + /* total number of fragments in the SKB */ + nr_frags = skb_shinfo(skb)->nr_frags; + + /* calculate the required number of TxBDs for this skb */ + if (unlikely(do_tstamp)) + nr_txbds = nr_frags + 2; + else + nr_txbds = nr_frags + 1; + + /* check if there is space to queue this packet */ + if (nr_txbds > tx_queue->num_txbdfree) { + /* no space, stop the queue */ + netif_tx_stop_queue(txq); + dev->stats.tx_fifo_errors++; + return NETDEV_TX_BUSY; + } + + /* Update transmit stats */ + bytes_sent = skb->len; + tx_queue->stats.tx_bytes += bytes_sent; + /* keep Tx bytes on wire for BQL accounting */ + GFAR_CB(skb)->bytes_sent = bytes_sent; + tx_queue->stats.tx_packets++; + + txbdp = txbdp_start = tx_queue->cur_tx; + lstatus = be32_to_cpu(txbdp->lstatus); + + /* Add TxPAL between FCB and frame if required */ + if (unlikely(do_tstamp)) { + skb_push(skb, GMAC_TXPAL_LEN); + memset(skb->data, 0, GMAC_TXPAL_LEN); + } + + /* Add TxFCB if required */ + if (fcb_len) { + fcb = gfar_add_fcb(skb); + lstatus |= BD_LFLAG(TXBD_TOE); + } + + /* Set up checksumming */ + if (do_csum) { + gfar_tx_checksum(skb, fcb, fcb_len); + + if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || + unlikely(gfar_csum_errata_76(priv, skb->len))) { + __skb_pull(skb, GMAC_FCB_LEN); + skb_checksum_help(skb); + if (do_vlan || do_tstamp) { + /* put back a new fcb for vlan/tstamp TOE */ + fcb = gfar_add_fcb(skb); + } else { + /* Tx TOE not used */ + lstatus &= ~(BD_LFLAG(TXBD_TOE)); + fcb = NULL; + } + } + } + + if (do_vlan) + gfar_tx_vlan(skb, fcb); + + bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) + goto dma_map_err; + + txbdp_start->bufPtr = cpu_to_be32(bufaddr); + + /* Time stamp insertion requires one additional TxBD */ + if (unlikely(do_tstamp)) + txbdp_tstamp = txbdp = next_txbd(txbdp, base, + tx_queue->tx_ring_size); + + if (likely(!nr_frags)) { + if (likely(!do_tstamp)) + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + } else { + u32 lstatus_start = lstatus; + + /* Place the fragment addresses and lengths into the TxBDs */ + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < nr_frags; i++, frag++) { + unsigned int size; + + /* Point at the next BD, wrapping as needed */ + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + + size = skb_frag_size(frag); + + lstatus = be32_to_cpu(txbdp->lstatus) | size | + BD_LFLAG(TXBD_READY); + + /* Handle the last BD specially */ + if (i == nr_frags - 1) + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + + bufaddr = skb_frag_dma_map(priv->dev, frag, 0, + size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) + goto dma_map_err; + + /* set the TxBD length and buffer pointer */ + txbdp->bufPtr = cpu_to_be32(bufaddr); + txbdp->lstatus = cpu_to_be32(lstatus); + } + + lstatus = lstatus_start; + } + + /* If time stamping is requested one additional TxBD must be set up. The + * first TxBD points to the FCB and must have a data length of + * GMAC_FCB_LEN. The second TxBD points to the actual frame data with + * the full frame length. + */ + if (unlikely(do_tstamp)) { + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + bufaddr = be32_to_cpu(txbdp_start->bufPtr); + bufaddr += fcb_len; + + lstatus_ts |= BD_LFLAG(TXBD_READY) | + (skb_headlen(skb) - fcb_len); + if (!nr_frags) + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + + txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; + + /* Setup tx hardware time stamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + fcb->ptp = 1; + } else { + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); + } + + netdev_tx_sent_queue(txq, bytes_sent); + + gfar_wmb(); + + txbdp_start->lstatus = cpu_to_be32(lstatus); + + gfar_wmb(); /* force lstatus write before tx_skbuff */ + + tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; + + /* Update the current skb pointer to the next entry we will use + * (wrapping if necessary) + */ + tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & + TX_RING_MOD_MASK(tx_queue->tx_ring_size); + + tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); + + /* We can work in parallel with gfar_clean_tx_ring(), except + * when modifying num_txbdfree. Note that we didn't grab the lock + * when we were reading the num_txbdfree and checking for available + * space, that's because outside of this function it can only grow. + */ + spin_lock_bh(&tx_queue->txlock); + /* reduce TxBD free count */ + tx_queue->num_txbdfree -= (nr_txbds); + spin_unlock_bh(&tx_queue->txlock); + + /* If the next BD still needs to be cleaned up, then the bds + * are full. We need to tell the kernel to stop sending us stuff. + */ + if (!tx_queue->num_txbdfree) { + netif_tx_stop_queue(txq); + + dev->stats.tx_fifo_errors++; + } + + /* Tell the DMA to go go go */ + gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); + + return NETDEV_TX_OK; + +dma_map_err: + txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); + if (do_tstamp) + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + for (i = 0; i < nr_frags; i++) { + lstatus = be32_to_cpu(txbdp->lstatus); + if (!(lstatus & BD_LFLAG(TXBD_READY))) + break; + + lstatus &= ~BD_LFLAG(TXBD_READY); + txbdp->lstatus = cpu_to_be32(lstatus); + bufaddr = be32_to_cpu(txbdp->bufPtr); + dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), + DMA_TO_DEVICE); + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + } + gfar_wmb(); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/* Changes the mac address if the controller is not running. */ +static int gfar_set_mac_address(struct net_device *dev) +{ + gfar_set_mac_for_addr(dev, 0, dev->dev_addr); + + return 0; +} + +static int gfar_change_mtu(struct net_device *dev, int new_mtu) +{ + struct gfar_private *priv = netdev_priv(dev); + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + if (dev->flags & IFF_UP) + stop_gfar(dev); + + dev->mtu = new_mtu; + + if (dev->flags & IFF_UP) + startup_gfar(dev); + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + + return 0; +} + +void reset_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + stop_gfar(ndev); + startup_gfar(ndev); + + clear_bit_unlock(GFAR_RESETTING, &priv->state); +} + +/* gfar_reset_task gets scheduled when a packet has not been + * transmitted after a set amount of time. + * For now, assume that clearing out all the structures, and + * starting over will fix the problem. + */ +static void gfar_reset_task(struct work_struct *work) +{ + struct gfar_private *priv = container_of(work, struct gfar_private, + reset_task); + reset_gfar(priv->ndev); +} + +static void gfar_timeout(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + dev->stats.tx_errors++; + schedule_work(&priv->reset_task); +} + +static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + priv->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + if (priv->hwts_rx_en) { + priv->hwts_rx_en = 0; + reset_gfar(netdev); + } + break; + default: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + if (!priv->hwts_rx_en) { + priv->hwts_rx_en = 1; + reset_gfar(netdev); + } + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + config.flags = 0; + config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = (priv->hwts_rx_en ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct phy_device *phydev = dev->phydev; + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd == SIOCSHWTSTAMP) + return gfar_hwtstamp_set(dev, rq); + if (cmd == SIOCGHWTSTAMP) + return gfar_hwtstamp_get(dev, rq); + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +/* Interrupt Handler for Transmit complete */ +static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) +{ + struct net_device *dev = tx_queue->dev; + struct netdev_queue *txq; + struct gfar_private *priv = netdev_priv(dev); + struct txbd8 *bdp, *next = NULL; + struct txbd8 *lbdp = NULL; + struct txbd8 *base = tx_queue->tx_bd_base; + struct sk_buff *skb; + int skb_dirtytx; + int tx_ring_size = tx_queue->tx_ring_size; + int frags = 0, nr_txbds = 0; + int i; + int howmany = 0; + int tqi = tx_queue->qindex; + unsigned int bytes_sent = 0; + u32 lstatus; + size_t buflen; + + txq = netdev_get_tx_queue(dev, tqi); + bdp = tx_queue->dirty_tx; + skb_dirtytx = tx_queue->skb_dirtytx; + + while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { + + frags = skb_shinfo(skb)->nr_frags; + + /* When time stamping, one additional TxBD must be freed. + * Also, we need to dma_unmap_single() the TxPAL. + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + nr_txbds = frags + 2; + else + nr_txbds = frags + 1; + + lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); + + lstatus = be32_to_cpu(lbdp->lstatus); + + /* Only clean completed frames */ + if ((lstatus & BD_LFLAG(TXBD_READY)) && + (lstatus & BD_LENGTH_MASK)) + break; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + next = next_txbd(bdp, base, tx_ring_size); + buflen = be16_to_cpu(next->length) + + GMAC_FCB_LEN + GMAC_TXPAL_LEN; + } else + buflen = be16_to_cpu(bdp->length); + + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), + buflen, DMA_TO_DEVICE); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + struct skb_shared_hwtstamps shhwtstamps; + u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & + ~0x7UL); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); + skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); + skb_tstamp_tx(skb, &shhwtstamps); + gfar_clear_txbd_status(bdp); + bdp = next; + } + + gfar_clear_txbd_status(bdp); + bdp = next_txbd(bdp, base, tx_ring_size); + + for (i = 0; i < frags; i++) { + dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), + be16_to_cpu(bdp->length), + DMA_TO_DEVICE); + gfar_clear_txbd_status(bdp); + bdp = next_txbd(bdp, base, tx_ring_size); + } + + bytes_sent += GFAR_CB(skb)->bytes_sent; + + dev_kfree_skb_any(skb); + + tx_queue->tx_skbuff[skb_dirtytx] = NULL; + + skb_dirtytx = (skb_dirtytx + 1) & + TX_RING_MOD_MASK(tx_ring_size); + + howmany++; + spin_lock(&tx_queue->txlock); + tx_queue->num_txbdfree += nr_txbds; + spin_unlock(&tx_queue->txlock); + } + + /* If we freed a buffer, we can restart transmission, if necessary */ + if (tx_queue->num_txbdfree && + netif_tx_queue_stopped(txq) && + !(test_bit(GFAR_DOWN, &priv->state))) + netif_wake_subqueue(priv->ndev, tqi); + + /* Update dirty indicators */ + tx_queue->skb_dirtytx = skb_dirtytx; + tx_queue->dirty_tx = bdp; + + netdev_tx_completed_queue(txq, howmany, bytes_sent); +} + +static void count_errors(u32 lstatus, struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct gfar_extra_stats *estats = &priv->extra_stats; + + /* If the packet was truncated, none of the other errors matter */ + if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { + stats->rx_length_errors++; + + atomic64_inc(&estats->rx_trunc); + + return; + } + /* Count the errors, if there were any */ + if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { + stats->rx_length_errors++; + + if (lstatus & BD_LFLAG(RXBD_LARGE)) + atomic64_inc(&estats->rx_large); + else + atomic64_inc(&estats->rx_short); + } + if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { + stats->rx_frame_errors++; + atomic64_inc(&estats->rx_nonoctet); + } + if (lstatus & BD_LFLAG(RXBD_CRCERR)) { + atomic64_inc(&estats->rx_crcerr); + stats->rx_crc_errors++; + } + if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { + atomic64_inc(&estats->rx_overrun); + stats->rx_over_errors++; + } +} + +static irqreturn_t gfar_receive(int irq, void *grp_id) +{ + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; + unsigned long flags; + u32 imask, ievent; + + ievent = gfar_read(&grp->regs->ievent); + + if (unlikely(ievent & IEVENT_FGPI)) { + gfar_write(&grp->regs->ievent, IEVENT_FGPI); + return IRQ_HANDLED; + } + + if (likely(napi_schedule_prep(&grp->napi_rx))) { + spin_lock_irqsave(&grp->grplock, flags); + imask = gfar_read(&grp->regs->imask); + imask &= IMASK_RX_DISABLED; + gfar_write(&grp->regs->imask, imask); + spin_unlock_irqrestore(&grp->grplock, flags); + __napi_schedule(&grp->napi_rx); + } else { + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); + } + + return IRQ_HANDLED; +} + +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *grp_id) +{ + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; + unsigned long flags; + u32 imask; + + if (likely(napi_schedule_prep(&grp->napi_tx))) { + spin_lock_irqsave(&grp->grplock, flags); + imask = gfar_read(&grp->regs->imask); + imask &= IMASK_TX_DISABLED; + gfar_write(&grp->regs->imask, imask); + spin_unlock_irqrestore(&grp->grplock, flags); + __napi_schedule(&grp->napi_tx); + } else { + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); + } + + return IRQ_HANDLED; +} + +static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, + struct sk_buff *skb, bool first) +{ + int size = lstatus & BD_LENGTH_MASK; + struct page *page = rxb->page; + + if (likely(first)) { + skb_put(skb, size); + } else { + /* the last fragments' length contains the full frame length */ + if (lstatus & BD_LFLAG(RXBD_LAST)) + size -= skb->len; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); + } + + /* try reuse page */ + if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) + return false; + + /* change offset to the other half */ + rxb->page_offset ^= GFAR_RXB_TRUESIZE; + + page_ref_inc(page); + + return true; +} + +static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, + struct gfar_rx_buff *old_rxb) +{ + struct gfar_rx_buff *new_rxb; + u16 nta = rxq->next_to_alloc; + + new_rxb = &rxq->rx_buff[nta]; + + /* find next buf that can reuse a page */ + nta++; + rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; + + /* copy page reference */ + *new_rxb = *old_rxb; + + /* sync for use by the device */ + dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, + old_rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); +} + +static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, + u32 lstatus, struct sk_buff *skb) +{ + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; + struct page *page = rxb->page; + bool first = false; + + if (likely(!skb)) { + void *buff_addr = page_address(page) + rxb->page_offset; + + skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); + if (unlikely(!skb)) { + gfar_rx_alloc_err(rx_queue); + return NULL; + } + skb_reserve(skb, RXBUF_ALIGNMENT); + first = true; + } + + dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); + + if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { + /* reuse the free half of the page */ + gfar_reuse_rx_page(rx_queue, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear rxb content */ + rxb->page = NULL; + + return skb; +} + +static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) +{ + /* If valid headers were found, and valid sums + * were verified, then we tell the kernel that no + * checksumming is necessary. Otherwise, it is [FIXME] + */ + if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == + (RXFCB_CIP | RXFCB_CTU)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct rxfcb *fcb = NULL; + + /* fcb is at the beginning if exists */ + fcb = (struct rxfcb *)skb->data; + + /* Remove the FCB from the skb + * Remove the padded bytes, if there are any + */ + if (priv->uses_rxfcb) + skb_pull(skb, GMAC_FCB_LEN); + + /* Get receive timestamp from the skb */ + if (priv->hwts_rx_en) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + u64 *ns = (u64 *) skb->data; + + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); + } + + if (priv->padding) + skb_pull(skb, priv->padding); + + /* Trim off the FCS */ + pskb_trim(skb, skb->len - ETH_FCS_LEN); + + if (ndev->features & NETIF_F_RXCSUM) + gfar_rx_checksum(skb, fcb); + + /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. + * Even if vlan rx accel is disabled, on some chips + * RXFCB_VLN is pseudo randomly set. + */ + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && + be16_to_cpu(fcb->flags) & RXFCB_VLN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(fcb->vlctl)); +} + +/* gfar_clean_rx_ring() -- Processes each frame in the rx ring + * until the budget/quota has been reached. Returns the number + * of frames handled + */ +static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, + int rx_work_limit) +{ + struct net_device *ndev = rx_queue->ndev; + struct gfar_private *priv = netdev_priv(ndev); + struct rxbd8 *bdp; + int i, howmany = 0; + struct sk_buff *skb = rx_queue->skb; + int cleaned_cnt = gfar_rxbd_unused(rx_queue); + unsigned int total_bytes = 0, total_pkts = 0; + + /* Get the first full descriptor */ + i = rx_queue->next_to_clean; + + while (rx_work_limit--) { + u32 lstatus; + + if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + cleaned_cnt = 0; + } + + bdp = &rx_queue->rx_bd_base[i]; + lstatus = be32_to_cpu(bdp->lstatus); + if (lstatus & BD_LFLAG(RXBD_EMPTY)) + break; + + /* order rx buffer descriptor reads */ + rmb(); + + /* fetch next to clean buffer from the ring */ + skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); + if (unlikely(!skb)) + break; + + cleaned_cnt++; + howmany++; + + if (unlikely(++i == rx_queue->rx_ring_size)) + i = 0; + + rx_queue->next_to_clean = i; + + /* fetch next buffer if not the last in frame */ + if (!(lstatus & BD_LFLAG(RXBD_LAST))) + continue; + + if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { + count_errors(lstatus, ndev); + + /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + continue; + } + + gfar_process_frame(ndev, skb); + + /* Increment the number of packets */ + total_pkts++; + total_bytes += skb->len; + + skb_record_rx_queue(skb, rx_queue->qindex); + + skb->protocol = eth_type_trans(skb, ndev); + + /* Send the packet up the stack */ + napi_gro_receive(&rx_queue->grp->napi_rx, skb); + + skb = NULL; + } + + /* Store incomplete frames for completion */ + rx_queue->skb = skb; + + rx_queue->stats.rx_packets += total_pkts; + rx_queue->stats.rx_bytes += total_bytes; + + if (cleaned_cnt) + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(priv->tx_actual_en)) { + u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + + gfar_write(rx_queue->rfbptr, bdp_dma); + } + + return howmany; +} + +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_rx); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; + int work_done = 0; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_RX_MASK); + + work_done = gfar_clean_rx_ring(rx_queue, budget); + + if (work_done < budget) { + u32 imask; + napi_complete_done(napi, work_done); + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_RX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return work_done; +} + +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_tx); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; + u32 imask; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_TX_MASK); + + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) + gfar_clean_tx_ring(tx_queue); + + napi_complete(napi); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_TX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + + return 0; +} + +static int gfar_poll_rx(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_rx); + struct gfar_private *priv = gfargrp->priv; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_rx_q *rx_queue = NULL; + int work_done = 0, work_done_per_q = 0; + int i, budget_per_q = 0; + unsigned long rstat_rxf; + int num_act_queues; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_RX_MASK); + + rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; + + num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); + if (num_act_queues) + budget_per_q = budget/num_act_queues; + + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + /* skip queue if not active */ + if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) + continue; + + rx_queue = priv->rx_queue[i]; + work_done_per_q = + gfar_clean_rx_ring(rx_queue, budget_per_q); + work_done += work_done_per_q; + + /* finished processing this queue */ + if (work_done_per_q < budget_per_q) { + /* clear active queue hw indication */ + gfar_write(®s->rstat, + RSTAT_CLEAR_RXF0 >> i); + num_act_queues--; + + if (!num_act_queues) + break; + } + } + + if (!num_act_queues) { + u32 imask; + napi_complete_done(napi, work_done); + + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_RX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return work_done; +} + +static int gfar_poll_tx(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_tx); + struct gfar_private *priv = gfargrp->priv; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = NULL; + int has_tx_work = 0; + int i; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_TX_MASK); + + for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { + tx_queue = priv->tx_queue[i]; + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { + gfar_clean_tx_ring(tx_queue); + has_tx_work = 1; + } + } + + if (!has_tx_work) { + u32 imask; + napi_complete(napi); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_TX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return 0; +} + +/* GFAR error interrupt handler */ +static irqreturn_t gfar_error(int irq, void *grp_id) +{ + struct gfar_priv_grp *gfargrp = grp_id; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_private *priv= gfargrp->priv; + struct net_device *dev = priv->ndev; + + /* Save ievent for future reference */ + u32 events = gfar_read(®s->ievent); + + /* Clear IEVENT */ + gfar_write(®s->ievent, events & IEVENT_ERR_MASK); + + /* Magic Packet is not an error. */ + if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && + (events & IEVENT_MAG)) + events &= ~IEVENT_MAG; + + /* Hmm... */ + if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) + netdev_dbg(dev, + "error interrupt (ievent=0x%08x imask=0x%08x)\n", + events, gfar_read(®s->imask)); + + /* Update the error counters */ + if (events & IEVENT_TXE) { + dev->stats.tx_errors++; + + if (events & IEVENT_LC) + dev->stats.tx_window_errors++; + if (events & IEVENT_CRL) + dev->stats.tx_aborted_errors++; + if (events & IEVENT_XFUN) { + netif_dbg(priv, tx_err, dev, + "TX FIFO underrun, packet dropped\n"); + dev->stats.tx_dropped++; + atomic64_inc(&priv->extra_stats.tx_underrun); + + schedule_work(&priv->reset_task); + } + netif_dbg(priv, tx_err, dev, "Transmit Error\n"); + } + if (events & IEVENT_BSY) { + dev->stats.rx_over_errors++; + atomic64_inc(&priv->extra_stats.rx_bsy); + + netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", + gfar_read(®s->rstat)); + } + if (events & IEVENT_BABR) { + dev->stats.rx_errors++; + atomic64_inc(&priv->extra_stats.rx_babr); + + netif_dbg(priv, rx_err, dev, "babbling RX error\n"); + } + if (events & IEVENT_EBERR) { + atomic64_inc(&priv->extra_stats.eberr); + netif_dbg(priv, rx_err, dev, "bus error\n"); + } + if (events & IEVENT_RXC) + netif_dbg(priv, rx_status, dev, "control frame\n"); + + if (events & IEVENT_BABT) { + atomic64_inc(&priv->extra_stats.tx_babt); + netif_dbg(priv, tx_err, dev, "babbling TX error\n"); + } + return IRQ_HANDLED; +} + +/* The interrupt handler for devices with one interrupt */ +static irqreturn_t gfar_interrupt(int irq, void *grp_id) +{ + struct gfar_priv_grp *gfargrp = grp_id; + + /* Save ievent for future reference */ + u32 events = gfar_read(&gfargrp->regs->ievent); + + /* Check for reception */ + if (events & IEVENT_RX_MASK) + gfar_receive(irq, grp_id); + + /* Check for transmit completion */ + if (events & IEVENT_TX_MASK) + gfar_transmit(irq, grp_id); + + /* Check for errors */ + if (events & IEVENT_ERR_MASK) + gfar_error(irq, grp_id); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void gfar_netpoll(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + int i; + + /* If the device has multiple interrupts, run tx/rx */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) { + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + + disable_irq(gfar_irq(grp, TX)->irq); + disable_irq(gfar_irq(grp, RX)->irq); + disable_irq(gfar_irq(grp, ER)->irq); + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); + enable_irq(gfar_irq(grp, ER)->irq); + enable_irq(gfar_irq(grp, RX)->irq); + enable_irq(gfar_irq(grp, TX)->irq); + } + } else { + for (i = 0; i < priv->num_grps; i++) { + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + + disable_irq(gfar_irq(grp, TX)->irq); + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); + enable_irq(gfar_irq(grp, TX)->irq); + } + } +} +#endif + +static void free_grp_irqs(struct gfar_priv_grp *grp) +{ + free_irq(gfar_irq(grp, TX)->irq, grp); + free_irq(gfar_irq(grp, RX)->irq, grp); + free_irq(gfar_irq(grp, ER)->irq, grp); +} + +static int register_grp_irqs(struct gfar_priv_grp *grp) +{ + struct gfar_private *priv = grp->priv; + struct net_device *dev = priv->ndev; + int err; + + /* If the device has multiple interrupts, register for + * them. Otherwise, only register for the one + */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + /* Install our interrupt handlers for Error, + * Transmit, and Receive + */ + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, + gfar_irq(grp, ER)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, ER)->irq); + + goto err_irq_fail; + } + enable_irq_wake(gfar_irq(grp, ER)->irq); + + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, TX)->irq); + goto tx_irq_fail; + } + err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, + gfar_irq(grp, RX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, RX)->irq); + goto rx_irq_fail; + } + enable_irq_wake(gfar_irq(grp, RX)->irq); + + } else { + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, TX)->irq); + goto err_irq_fail; + } + enable_irq_wake(gfar_irq(grp, TX)->irq); + } + + return 0; + +rx_irq_fail: + free_irq(gfar_irq(grp, TX)->irq, grp); +tx_irq_fail: + free_irq(gfar_irq(grp, ER)->irq, grp); +err_irq_fail: + return err; + +} + +static void gfar_free_irq(struct gfar_private *priv) +{ + int i; + + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) + free_grp_irqs(&priv->gfargrp[i]); + } else { + for (i = 0; i < priv->num_grps; i++) + free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, + &priv->gfargrp[i]); + } +} + +static int gfar_request_irq(struct gfar_private *priv) +{ + int err, i, j; + + for (i = 0; i < priv->num_grps; i++) { + err = register_grp_irqs(&priv->gfargrp[i]); + if (err) { + for (j = 0; j < i; j++) + free_grp_irqs(&priv->gfargrp[j]); + return err; + } + } + + return 0; +} + +/* Called when something needs to use the ethernet device + * Returns 0 for success. + */ +static int gfar_enet_open(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + int err; + + err = init_phy(dev); + if (err) + return err; + + err = gfar_request_irq(priv); + if (err) + return err; + + err = startup_gfar(dev); + if (err) + return err; + + return err; +} + +/* Stops the kernel queue, and halts the controller */ +static int gfar_close(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + cancel_work_sync(&priv->reset_task); + stop_gfar(dev); + + /* Disconnect from the PHY */ + phy_disconnect(dev->phydev); + + gfar_free_irq(priv); + + return 0; +} + +/* Clears each of the exact match registers to zero, so they + * don't interfere with normal reception + */ +static void gfar_clear_exact_match(struct net_device *dev) +{ + int idx; + static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + + for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) + gfar_set_mac_for_addr(dev, idx, zero_arr); +} + +/* Update the hash table based on the current list of multicast + * addresses we subscribe to. Also, change the promiscuity of + * the device based on the flags (this function is called + * whenever dev->flags is changed + */ +static void gfar_set_multi(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + + if (dev->flags & IFF_PROMISC) { + /* Set RCTRL to PROM */ + tempval = gfar_read(®s->rctrl); + tempval |= RCTRL_PROM; + gfar_write(®s->rctrl, tempval); + } else { + /* Set RCTRL to not PROM */ + tempval = gfar_read(®s->rctrl); + tempval &= ~(RCTRL_PROM); + gfar_write(®s->rctrl, tempval); + } + + if (dev->flags & IFF_ALLMULTI) { + /* Set the hash to rx all multicast frames */ + gfar_write(®s->igaddr0, 0xffffffff); + gfar_write(®s->igaddr1, 0xffffffff); + gfar_write(®s->igaddr2, 0xffffffff); + gfar_write(®s->igaddr3, 0xffffffff); + gfar_write(®s->igaddr4, 0xffffffff); + gfar_write(®s->igaddr5, 0xffffffff); + gfar_write(®s->igaddr6, 0xffffffff); + gfar_write(®s->igaddr7, 0xffffffff); + gfar_write(®s->gaddr0, 0xffffffff); + gfar_write(®s->gaddr1, 0xffffffff); + gfar_write(®s->gaddr2, 0xffffffff); + gfar_write(®s->gaddr3, 0xffffffff); + gfar_write(®s->gaddr4, 0xffffffff); + gfar_write(®s->gaddr5, 0xffffffff); + gfar_write(®s->gaddr6, 0xffffffff); + gfar_write(®s->gaddr7, 0xffffffff); + } else { + int em_num; + int idx; + + /* zero out the hash */ + gfar_write(®s->igaddr0, 0x0); + gfar_write(®s->igaddr1, 0x0); + gfar_write(®s->igaddr2, 0x0); + gfar_write(®s->igaddr3, 0x0); + gfar_write(®s->igaddr4, 0x0); + gfar_write(®s->igaddr5, 0x0); + gfar_write(®s->igaddr6, 0x0); + gfar_write(®s->igaddr7, 0x0); + gfar_write(®s->gaddr0, 0x0); + gfar_write(®s->gaddr1, 0x0); + gfar_write(®s->gaddr2, 0x0); + gfar_write(®s->gaddr3, 0x0); + gfar_write(®s->gaddr4, 0x0); + gfar_write(®s->gaddr5, 0x0); + gfar_write(®s->gaddr6, 0x0); + gfar_write(®s->gaddr7, 0x0); + + /* If we have extended hash tables, we need to + * clear the exact match registers to prepare for + * setting them + */ + if (priv->extended_hash) { + em_num = GFAR_EM_NUM + 1; + gfar_clear_exact_match(dev); + idx = 1; + } else { + idx = 0; + em_num = 0; + } + + if (netdev_mc_empty(dev)) + return; + + /* Parse the list, and set the appropriate bits */ + netdev_for_each_mc_addr(ha, dev) { + if (idx < em_num) { + gfar_set_mac_for_addr(dev, idx, ha->addr); + idx++; + } else + gfar_set_hash_for_addr(dev, ha->addr); + } + } +} + void gfar_mac_reset(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; @@ -1262,45 +3274,23 @@ static void gfar_hw_init(struct gfar_private *priv) gfar_write_isrg(priv); } -static void gfar_init_addr_hash_table(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { - priv->extended_hash = 1; - priv->hash_width = 9; - - priv->hash_regs[0] = ®s->igaddr0; - priv->hash_regs[1] = ®s->igaddr1; - priv->hash_regs[2] = ®s->igaddr2; - priv->hash_regs[3] = ®s->igaddr3; - priv->hash_regs[4] = ®s->igaddr4; - priv->hash_regs[5] = ®s->igaddr5; - priv->hash_regs[6] = ®s->igaddr6; - priv->hash_regs[7] = ®s->igaddr7; - priv->hash_regs[8] = ®s->gaddr0; - priv->hash_regs[9] = ®s->gaddr1; - priv->hash_regs[10] = ®s->gaddr2; - priv->hash_regs[11] = ®s->gaddr3; - priv->hash_regs[12] = ®s->gaddr4; - priv->hash_regs[13] = ®s->gaddr5; - priv->hash_regs[14] = ®s->gaddr6; - priv->hash_regs[15] = ®s->gaddr7; - - } else { - priv->extended_hash = 0; - priv->hash_width = 8; - - priv->hash_regs[0] = ®s->gaddr0; - priv->hash_regs[1] = ®s->gaddr1; - priv->hash_regs[2] = ®s->gaddr2; - priv->hash_regs[3] = ®s->gaddr3; - priv->hash_regs[4] = ®s->gaddr4; - priv->hash_regs[5] = ®s->gaddr5; - priv->hash_regs[6] = ®s->gaddr6; - priv->hash_regs[7] = ®s->gaddr7; - } -} +static const struct net_device_ops gfar_netdev_ops = { + .ndo_open = gfar_enet_open, + .ndo_start_xmit = gfar_start_xmit, + .ndo_stop = gfar_close, + .ndo_change_mtu = gfar_change_mtu, + .ndo_set_features = gfar_set_features, + .ndo_set_rx_mode = gfar_set_multi, + .ndo_tx_timeout = gfar_timeout, + .ndo_do_ioctl = gfar_ioctl, + .ndo_get_stats = gfar_get_stats, + .ndo_change_carrier = fixed_phy_change_carrier, + .ndo_set_mac_address = gfar_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gfar_netpoll, +#endif +}; /* Set up the ethernet device structure, private data, * and anything else we need before we start @@ -1731,2037 +3721,6 @@ static const struct dev_pm_ops gfar_pm_ops = { #endif -/* Reads the controller's registers to determine what interface - * connects it to the PHY. - */ -static phy_interface_t gfar_get_interface(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 ecntrl; - - ecntrl = gfar_read(®s->ecntrl); - - if (ecntrl & ECNTRL_SGMII_MODE) - return PHY_INTERFACE_MODE_SGMII; - - if (ecntrl & ECNTRL_TBI_MODE) { - if (ecntrl & ECNTRL_REDUCED_MODE) - return PHY_INTERFACE_MODE_RTBI; - else - return PHY_INTERFACE_MODE_TBI; - } - - if (ecntrl & ECNTRL_REDUCED_MODE) { - if (ecntrl & ECNTRL_REDUCED_MII_MODE) { - return PHY_INTERFACE_MODE_RMII; - } - else { - phy_interface_t interface = priv->interface; - - /* This isn't autodetected right now, so it must - * be set by the device tree or platform code. - */ - if (interface == PHY_INTERFACE_MODE_RGMII_ID) - return PHY_INTERFACE_MODE_RGMII_ID; - - return PHY_INTERFACE_MODE_RGMII; - } - } - - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) - return PHY_INTERFACE_MODE_GMII; - - return PHY_INTERFACE_MODE_MII; -} - - -/* Initializes driver's PHY state, and attaches to the PHY. - * Returns 0 on success. - */ -static int init_phy(struct net_device *dev) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - struct gfar_private *priv = netdev_priv(dev); - phy_interface_t interface; - struct phy_device *phydev; - struct ethtool_eee edata; - - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); - - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - - interface = gfar_get_interface(dev); - - phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, - interface); - if (!phydev) { - dev_err(&dev->dev, "could not attach to PHY\n"); - return -ENODEV; - } - - if (interface == PHY_INTERFACE_MODE_SGMII) - gfar_configure_serdes(dev); - - /* Remove any features not supported by the controller */ - linkmode_and(phydev->supported, phydev->supported, mask); - linkmode_copy(phydev->advertising, phydev->supported); - - /* Add support for flow control */ - phy_support_asym_pause(phydev); - - /* disable EEE autoneg, EEE not supported by eTSEC */ - memset(&edata, 0, sizeof(struct ethtool_eee)); - phy_ethtool_set_eee(phydev, &edata); - - return 0; -} - -/* Initialize TBI PHY interface for communicating with the - * SERDES lynx PHY on the chip. We communicate with this PHY - * through the MDIO bus on each controller, treating it as a - * "normal" PHY at the address found in the TBIPA register. We assume - * that the TBIPA register is valid. Either the MDIO bus code will set - * it to a value that doesn't conflict with other PHYs on the bus, or the - * value doesn't matter, as there are no other PHYs on the bus. - */ -static void gfar_configure_serdes(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - struct phy_device *tbiphy; - - if (!priv->tbi_node) { - dev_warn(&dev->dev, "error: SGMII mode requires that the " - "device tree specify a tbi-handle\n"); - return; - } - - tbiphy = of_phy_find_device(priv->tbi_node); - if (!tbiphy) { - dev_err(&dev->dev, "error: Could not get TBI device\n"); - return; - } - - /* If the link is already up, we must already be ok, and don't need to - * configure and reset the TBI<->SerDes link. Maybe U-Boot configured - * everything for us? Resetting it takes the link down and requires - * several seconds for it to come back. - */ - if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { - put_device(&tbiphy->mdio.dev); - return; - } - - /* Single clk mode, mii mode off(for serdes communication) */ - phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); - - phy_write(tbiphy, MII_ADVERTISE, - ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM); - - phy_write(tbiphy, MII_BMCR, - BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | - BMCR_SPEED1000); - - put_device(&tbiphy->mdio.dev); -} - -static int __gfar_is_rx_idle(struct gfar_private *priv) -{ - u32 res; - - /* Normaly TSEC should not hang on GRS commands, so we should - * actually wait for IEVENT_GRSC flag. - */ - if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) - return 0; - - /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are - * the same as bits 23-30, the eTSEC Rx is assumed to be idle - * and the Rx can be safely reset. - */ - res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); - res &= 0x7f807f80; - if ((res & 0xffff) == (res >> 16)) - return 1; - - return 0; -} - -/* Halt the receive and transmit queues */ -static void gfar_halt_nodisable(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - unsigned int timeout; - int stopped; - - gfar_ints_disable(priv); - - if (gfar_is_dma_stopped(priv)) - return; - - /* Stop the DMA, and wait for it to stop */ - tempval = gfar_read(®s->dmactrl); - tempval |= (DMACTRL_GRS | DMACTRL_GTS); - gfar_write(®s->dmactrl, tempval); - -retry: - timeout = 1000; - while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { - cpu_relax(); - timeout--; - } - - if (!timeout) - stopped = gfar_is_dma_stopped(priv); - - if (!stopped && !gfar_is_rx_dma_stopped(priv) && - !__gfar_is_rx_idle(priv)) - goto retry; -} - -/* Halt the receive and transmit queues */ -void gfar_halt(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - - /* Dissable the Rx/Tx hw queues */ - gfar_write(®s->rqueue, 0); - gfar_write(®s->tqueue, 0); - - mdelay(10); - - gfar_halt_nodisable(priv); - - /* Disable Rx/Tx DMA */ - tempval = gfar_read(®s->maccfg1); - tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); - gfar_write(®s->maccfg1, tempval); -} - -void stop_gfar(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - - netif_tx_stop_all_queues(dev); - - smp_mb__before_atomic(); - set_bit(GFAR_DOWN, &priv->state); - smp_mb__after_atomic(); - - disable_napi(priv); - - /* disable ints and gracefully shut down Rx/Tx DMA */ - gfar_halt(priv); - - phy_stop(dev->phydev); - - free_skb_resources(priv); -} - -static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) -{ - struct txbd8 *txbdp; - struct gfar_private *priv = netdev_priv(tx_queue->dev); - int i, j; - - txbdp = tx_queue->tx_bd_base; - - for (i = 0; i < tx_queue->tx_ring_size; i++) { - if (!tx_queue->tx_skbuff[i]) - continue; - - dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), - be16_to_cpu(txbdp->length), DMA_TO_DEVICE); - txbdp->lstatus = 0; - for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; - j++) { - txbdp++; - dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), - be16_to_cpu(txbdp->length), - DMA_TO_DEVICE); - } - txbdp++; - dev_kfree_skb_any(tx_queue->tx_skbuff[i]); - tx_queue->tx_skbuff[i] = NULL; - } - kfree(tx_queue->tx_skbuff); - tx_queue->tx_skbuff = NULL; -} - -static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) -{ - int i; - - struct rxbd8 *rxbdp = rx_queue->rx_bd_base; - - dev_kfree_skb(rx_queue->skb); - - for (i = 0; i < rx_queue->rx_ring_size; i++) { - struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; - - rxbdp->lstatus = 0; - rxbdp->bufPtr = 0; - rxbdp++; - - if (!rxb->page) - continue; - - dma_unmap_page(rx_queue->dev, rxb->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - __free_page(rxb->page); - - rxb->page = NULL; - } - - kfree(rx_queue->rx_buff); - rx_queue->rx_buff = NULL; -} - -/* If there are any tx skbs or rx skbs still around, free them. - * Then free tx_skbuff and rx_skbuff - */ -static void free_skb_resources(struct gfar_private *priv) -{ - struct gfar_priv_tx_q *tx_queue = NULL; - struct gfar_priv_rx_q *rx_queue = NULL; - int i; - - /* Go through all the buffer descriptors and free their data buffers */ - for (i = 0; i < priv->num_tx_queues; i++) { - struct netdev_queue *txq; - - tx_queue = priv->tx_queue[i]; - txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); - if (tx_queue->tx_skbuff) - free_skb_tx_queue(tx_queue); - netdev_tx_reset_queue(txq); - } - - for (i = 0; i < priv->num_rx_queues; i++) { - rx_queue = priv->rx_queue[i]; - if (rx_queue->rx_buff) - free_skb_rx_queue(rx_queue); - } - - dma_free_coherent(priv->dev, - sizeof(struct txbd8) * priv->total_tx_ring_size + - sizeof(struct rxbd8) * priv->total_rx_ring_size, - priv->tx_queue[0]->tx_bd_base, - priv->tx_queue[0]->tx_bd_dma_base); -} - -void gfar_start(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - int i = 0; - - /* Enable Rx/Tx hw queues */ - gfar_write(®s->rqueue, priv->rqueue); - gfar_write(®s->tqueue, priv->tqueue); - - /* Initialize DMACTRL to have WWR and WOP */ - tempval = gfar_read(®s->dmactrl); - tempval |= DMACTRL_INIT_SETTINGS; - gfar_write(®s->dmactrl, tempval); - - /* Make sure we aren't stopped */ - tempval = gfar_read(®s->dmactrl); - tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); - gfar_write(®s->dmactrl, tempval); - - for (i = 0; i < priv->num_grps; i++) { - regs = priv->gfargrp[i].regs; - /* Clear THLT/RHLT, so that the DMA starts polling now */ - gfar_write(®s->tstat, priv->gfargrp[i].tstat); - gfar_write(®s->rstat, priv->gfargrp[i].rstat); - } - - /* Enable Rx/Tx DMA */ - tempval = gfar_read(®s->maccfg1); - tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); - gfar_write(®s->maccfg1, tempval); - - gfar_ints_enable(priv); - - netif_trans_update(priv->ndev); /* prevent tx timeout */ -} - -static void free_grp_irqs(struct gfar_priv_grp *grp) -{ - free_irq(gfar_irq(grp, TX)->irq, grp); - free_irq(gfar_irq(grp, RX)->irq, grp); - free_irq(gfar_irq(grp, ER)->irq, grp); -} - -static int register_grp_irqs(struct gfar_priv_grp *grp) -{ - struct gfar_private *priv = grp->priv; - struct net_device *dev = priv->ndev; - int err; - - /* If the device has multiple interrupts, register for - * them. Otherwise, only register for the one - */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - /* Install our interrupt handlers for Error, - * Transmit, and Receive - */ - err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, - gfar_irq(grp, ER)->name, grp); - if (err < 0) { - netif_err(priv, intr, dev, "Can't get IRQ %d\n", - gfar_irq(grp, ER)->irq); - - goto err_irq_fail; - } - enable_irq_wake(gfar_irq(grp, ER)->irq); - - err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, - gfar_irq(grp, TX)->name, grp); - if (err < 0) { - netif_err(priv, intr, dev, "Can't get IRQ %d\n", - gfar_irq(grp, TX)->irq); - goto tx_irq_fail; - } - err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, - gfar_irq(grp, RX)->name, grp); - if (err < 0) { - netif_err(priv, intr, dev, "Can't get IRQ %d\n", - gfar_irq(grp, RX)->irq); - goto rx_irq_fail; - } - enable_irq_wake(gfar_irq(grp, RX)->irq); - - } else { - err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, - gfar_irq(grp, TX)->name, grp); - if (err < 0) { - netif_err(priv, intr, dev, "Can't get IRQ %d\n", - gfar_irq(grp, TX)->irq); - goto err_irq_fail; - } - enable_irq_wake(gfar_irq(grp, TX)->irq); - } - - return 0; - -rx_irq_fail: - free_irq(gfar_irq(grp, TX)->irq, grp); -tx_irq_fail: - free_irq(gfar_irq(grp, ER)->irq, grp); -err_irq_fail: - return err; - -} - -static void gfar_free_irq(struct gfar_private *priv) -{ - int i; - - /* Free the IRQs */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - for (i = 0; i < priv->num_grps; i++) - free_grp_irqs(&priv->gfargrp[i]); - } else { - for (i = 0; i < priv->num_grps; i++) - free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, - &priv->gfargrp[i]); - } -} - -static int gfar_request_irq(struct gfar_private *priv) -{ - int err, i, j; - - for (i = 0; i < priv->num_grps; i++) { - err = register_grp_irqs(&priv->gfargrp[i]); - if (err) { - for (j = 0; j < i; j++) - free_grp_irqs(&priv->gfargrp[j]); - return err; - } - } - - return 0; -} - -/* Bring the controller up and running */ -int startup_gfar(struct net_device *ndev) -{ - struct gfar_private *priv = netdev_priv(ndev); - int err; - - gfar_mac_reset(priv); - - err = gfar_alloc_skb_resources(ndev); - if (err) - return err; - - gfar_init_tx_rx_base(priv); - - smp_mb__before_atomic(); - clear_bit(GFAR_DOWN, &priv->state); - smp_mb__after_atomic(); - - /* Start Rx/Tx DMA and enable the interrupts */ - gfar_start(priv); - - /* force link state update after mac reset */ - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - - phy_start(ndev->phydev); - - enable_napi(priv); - - netif_tx_wake_all_queues(ndev); - - return 0; -} - -/* Called when something needs to use the ethernet device - * Returns 0 for success. - */ -static int gfar_enet_open(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - int err; - - err = init_phy(dev); - if (err) - return err; - - err = gfar_request_irq(priv); - if (err) - return err; - - err = startup_gfar(dev); - if (err) - return err; - - return err; -} - -static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) -{ - struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); - - memset(fcb, 0, GMAC_FCB_LEN); - - return fcb; -} - -static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, - int fcb_length) -{ - /* If we're here, it's a IP packet with a TCP or UDP - * payload. We set it to checksum, using a pseudo-header - * we provide - */ - u8 flags = TXFCB_DEFAULT; - - /* Tell the controller what the protocol is - * And provide the already calculated phcs - */ - if (ip_hdr(skb)->protocol == IPPROTO_UDP) { - flags |= TXFCB_UDP; - fcb->phcs = (__force __be16)(udp_hdr(skb)->check); - } else - fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); - - /* l3os is the distance between the start of the - * frame (skb->data) and the start of the IP hdr. - * l4os is the distance between the start of the - * l3 hdr and the l4 hdr - */ - fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); - fcb->l4os = skb_network_header_len(skb); - - fcb->flags = flags; -} - -static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) -{ - fcb->flags |= TXFCB_VLN; - fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); -} - -static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, - struct txbd8 *base, int ring_size) -{ - struct txbd8 *new_bd = bdp + stride; - - return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; -} - -static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, - int ring_size) -{ - return skip_txbd(bdp, 1, base, ring_size); -} - -/* eTSEC12: csum generation not supported for some fcb offsets */ -static inline bool gfar_csum_errata_12(struct gfar_private *priv, - unsigned long fcb_addr) -{ - return (gfar_has_errata(priv, GFAR_ERRATA_12) && - (fcb_addr % 0x20) > 0x18); -} - -/* eTSEC76: csum generation for frames larger than 2500 may - * cause excess delays before start of transmission - */ -static inline bool gfar_csum_errata_76(struct gfar_private *priv, - unsigned int len) -{ - return (gfar_has_errata(priv, GFAR_ERRATA_76) && - (len > 2500)); -} - -/* This is called by the kernel when a frame is ready for transmission. - * It is pointed to by the dev->hard_start_xmit function pointer - */ -static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar_priv_tx_q *tx_queue = NULL; - struct netdev_queue *txq; - struct gfar __iomem *regs = NULL; - struct txfcb *fcb = NULL; - struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; - u32 lstatus; - skb_frag_t *frag; - int i, rq = 0; - int do_tstamp, do_csum, do_vlan; - u32 bufaddr; - unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; - - rq = skb->queue_mapping; - tx_queue = priv->tx_queue[rq]; - txq = netdev_get_tx_queue(dev, rq); - base = tx_queue->tx_bd_base; - regs = tx_queue->grp->regs; - - do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); - do_vlan = skb_vlan_tag_present(skb); - do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en; - - if (do_csum || do_vlan) - fcb_len = GMAC_FCB_LEN; - - /* check if time stamp should be generated */ - if (unlikely(do_tstamp)) - fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; - - /* make space for additional header when fcb is needed */ - if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { - struct sk_buff *skb_new; - - skb_new = skb_realloc_headroom(skb, fcb_len); - if (!skb_new) { - dev->stats.tx_errors++; - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->sk) - skb_set_owner_w(skb_new, skb->sk); - dev_consume_skb_any(skb); - skb = skb_new; - } - - /* total number of fragments in the SKB */ - nr_frags = skb_shinfo(skb)->nr_frags; - - /* calculate the required number of TxBDs for this skb */ - if (unlikely(do_tstamp)) - nr_txbds = nr_frags + 2; - else - nr_txbds = nr_frags + 1; - - /* check if there is space to queue this packet */ - if (nr_txbds > tx_queue->num_txbdfree) { - /* no space, stop the queue */ - netif_tx_stop_queue(txq); - dev->stats.tx_fifo_errors++; - return NETDEV_TX_BUSY; - } - - /* Update transmit stats */ - bytes_sent = skb->len; - tx_queue->stats.tx_bytes += bytes_sent; - /* keep Tx bytes on wire for BQL accounting */ - GFAR_CB(skb)->bytes_sent = bytes_sent; - tx_queue->stats.tx_packets++; - - txbdp = txbdp_start = tx_queue->cur_tx; - lstatus = be32_to_cpu(txbdp->lstatus); - - /* Add TxPAL between FCB and frame if required */ - if (unlikely(do_tstamp)) { - skb_push(skb, GMAC_TXPAL_LEN); - memset(skb->data, 0, GMAC_TXPAL_LEN); - } - - /* Add TxFCB if required */ - if (fcb_len) { - fcb = gfar_add_fcb(skb); - lstatus |= BD_LFLAG(TXBD_TOE); - } - - /* Set up checksumming */ - if (do_csum) { - gfar_tx_checksum(skb, fcb, fcb_len); - - if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || - unlikely(gfar_csum_errata_76(priv, skb->len))) { - __skb_pull(skb, GMAC_FCB_LEN); - skb_checksum_help(skb); - if (do_vlan || do_tstamp) { - /* put back a new fcb for vlan/tstamp TOE */ - fcb = gfar_add_fcb(skb); - } else { - /* Tx TOE not used */ - lstatus &= ~(BD_LFLAG(TXBD_TOE)); - fcb = NULL; - } - } - } - - if (do_vlan) - gfar_tx_vlan(skb, fcb); - - bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, bufaddr))) - goto dma_map_err; - - txbdp_start->bufPtr = cpu_to_be32(bufaddr); - - /* Time stamp insertion requires one additional TxBD */ - if (unlikely(do_tstamp)) - txbdp_tstamp = txbdp = next_txbd(txbdp, base, - tx_queue->tx_ring_size); - - if (likely(!nr_frags)) { - if (likely(!do_tstamp)) - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - } else { - u32 lstatus_start = lstatus; - - /* Place the fragment addresses and lengths into the TxBDs */ - frag = &skb_shinfo(skb)->frags[0]; - for (i = 0; i < nr_frags; i++, frag++) { - unsigned int size; - - /* Point at the next BD, wrapping as needed */ - txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); - - size = skb_frag_size(frag); - - lstatus = be32_to_cpu(txbdp->lstatus) | size | - BD_LFLAG(TXBD_READY); - - /* Handle the last BD specially */ - if (i == nr_frags - 1) - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - - bufaddr = skb_frag_dma_map(priv->dev, frag, 0, - size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, bufaddr))) - goto dma_map_err; - - /* set the TxBD length and buffer pointer */ - txbdp->bufPtr = cpu_to_be32(bufaddr); - txbdp->lstatus = cpu_to_be32(lstatus); - } - - lstatus = lstatus_start; - } - - /* If time stamping is requested one additional TxBD must be set up. The - * first TxBD points to the FCB and must have a data length of - * GMAC_FCB_LEN. The second TxBD points to the actual frame data with - * the full frame length. - */ - if (unlikely(do_tstamp)) { - u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); - - bufaddr = be32_to_cpu(txbdp_start->bufPtr); - bufaddr += fcb_len; - - lstatus_ts |= BD_LFLAG(TXBD_READY) | - (skb_headlen(skb) - fcb_len); - if (!nr_frags) - lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - - txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); - txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); - lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; - - /* Setup tx hardware time stamping */ - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - fcb->ptp = 1; - } else { - lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); - } - - netdev_tx_sent_queue(txq, bytes_sent); - - gfar_wmb(); - - txbdp_start->lstatus = cpu_to_be32(lstatus); - - gfar_wmb(); /* force lstatus write before tx_skbuff */ - - tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; - - /* Update the current skb pointer to the next entry we will use - * (wrapping if necessary) - */ - tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & - TX_RING_MOD_MASK(tx_queue->tx_ring_size); - - tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); - - /* We can work in parallel with gfar_clean_tx_ring(), except - * when modifying num_txbdfree. Note that we didn't grab the lock - * when we were reading the num_txbdfree and checking for available - * space, that's because outside of this function it can only grow. - */ - spin_lock_bh(&tx_queue->txlock); - /* reduce TxBD free count */ - tx_queue->num_txbdfree -= (nr_txbds); - spin_unlock_bh(&tx_queue->txlock); - - /* If the next BD still needs to be cleaned up, then the bds - * are full. We need to tell the kernel to stop sending us stuff. - */ - if (!tx_queue->num_txbdfree) { - netif_tx_stop_queue(txq); - - dev->stats.tx_fifo_errors++; - } - - /* Tell the DMA to go go go */ - gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); - - return NETDEV_TX_OK; - -dma_map_err: - txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); - if (do_tstamp) - txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); - for (i = 0; i < nr_frags; i++) { - lstatus = be32_to_cpu(txbdp->lstatus); - if (!(lstatus & BD_LFLAG(TXBD_READY))) - break; - - lstatus &= ~BD_LFLAG(TXBD_READY); - txbdp->lstatus = cpu_to_be32(lstatus); - bufaddr = be32_to_cpu(txbdp->bufPtr); - dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), - DMA_TO_DEVICE); - txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); - } - gfar_wmb(); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -/* Stops the kernel queue, and halts the controller */ -static int gfar_close(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - - cancel_work_sync(&priv->reset_task); - stop_gfar(dev); - - /* Disconnect from the PHY */ - phy_disconnect(dev->phydev); - - gfar_free_irq(priv); - - return 0; -} - -/* Changes the mac address if the controller is not running. */ -static int gfar_set_mac_address(struct net_device *dev) -{ - gfar_set_mac_for_addr(dev, 0, dev->dev_addr); - - return 0; -} - -static int gfar_change_mtu(struct net_device *dev, int new_mtu) -{ - struct gfar_private *priv = netdev_priv(dev); - - while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) - cpu_relax(); - - if (dev->flags & IFF_UP) - stop_gfar(dev); - - dev->mtu = new_mtu; - - if (dev->flags & IFF_UP) - startup_gfar(dev); - - clear_bit_unlock(GFAR_RESETTING, &priv->state); - - return 0; -} - -void reset_gfar(struct net_device *ndev) -{ - struct gfar_private *priv = netdev_priv(ndev); - - while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) - cpu_relax(); - - stop_gfar(ndev); - startup_gfar(ndev); - - clear_bit_unlock(GFAR_RESETTING, &priv->state); -} - -/* gfar_reset_task gets scheduled when a packet has not been - * transmitted after a set amount of time. - * For now, assume that clearing out all the structures, and - * starting over will fix the problem. - */ -static void gfar_reset_task(struct work_struct *work) -{ - struct gfar_private *priv = container_of(work, struct gfar_private, - reset_task); - reset_gfar(priv->ndev); -} - -static void gfar_timeout(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - - dev->stats.tx_errors++; - schedule_work(&priv->reset_task); -} - -/* Interrupt Handler for Transmit complete */ -static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) -{ - struct net_device *dev = tx_queue->dev; - struct netdev_queue *txq; - struct gfar_private *priv = netdev_priv(dev); - struct txbd8 *bdp, *next = NULL; - struct txbd8 *lbdp = NULL; - struct txbd8 *base = tx_queue->tx_bd_base; - struct sk_buff *skb; - int skb_dirtytx; - int tx_ring_size = tx_queue->tx_ring_size; - int frags = 0, nr_txbds = 0; - int i; - int howmany = 0; - int tqi = tx_queue->qindex; - unsigned int bytes_sent = 0; - u32 lstatus; - size_t buflen; - - txq = netdev_get_tx_queue(dev, tqi); - bdp = tx_queue->dirty_tx; - skb_dirtytx = tx_queue->skb_dirtytx; - - while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { - - frags = skb_shinfo(skb)->nr_frags; - - /* When time stamping, one additional TxBD must be freed. - * Also, we need to dma_unmap_single() the TxPAL. - */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - nr_txbds = frags + 2; - else - nr_txbds = frags + 1; - - lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); - - lstatus = be32_to_cpu(lbdp->lstatus); - - /* Only clean completed frames */ - if ((lstatus & BD_LFLAG(TXBD_READY)) && - (lstatus & BD_LENGTH_MASK)) - break; - - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { - next = next_txbd(bdp, base, tx_ring_size); - buflen = be16_to_cpu(next->length) + - GMAC_FCB_LEN + GMAC_TXPAL_LEN; - } else - buflen = be16_to_cpu(bdp->length); - - dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), - buflen, DMA_TO_DEVICE); - - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { - struct skb_shared_hwtstamps shhwtstamps; - u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & - ~0x7UL); - - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); - skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); - skb_tstamp_tx(skb, &shhwtstamps); - gfar_clear_txbd_status(bdp); - bdp = next; - } - - gfar_clear_txbd_status(bdp); - bdp = next_txbd(bdp, base, tx_ring_size); - - for (i = 0; i < frags; i++) { - dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), - be16_to_cpu(bdp->length), - DMA_TO_DEVICE); - gfar_clear_txbd_status(bdp); - bdp = next_txbd(bdp, base, tx_ring_size); - } - - bytes_sent += GFAR_CB(skb)->bytes_sent; - - dev_kfree_skb_any(skb); - - tx_queue->tx_skbuff[skb_dirtytx] = NULL; - - skb_dirtytx = (skb_dirtytx + 1) & - TX_RING_MOD_MASK(tx_ring_size); - - howmany++; - spin_lock(&tx_queue->txlock); - tx_queue->num_txbdfree += nr_txbds; - spin_unlock(&tx_queue->txlock); - } - - /* If we freed a buffer, we can restart transmission, if necessary */ - if (tx_queue->num_txbdfree && - netif_tx_queue_stopped(txq) && - !(test_bit(GFAR_DOWN, &priv->state))) - netif_wake_subqueue(priv->ndev, tqi); - - /* Update dirty indicators */ - tx_queue->skb_dirtytx = skb_dirtytx; - tx_queue->dirty_tx = bdp; - - netdev_tx_completed_queue(txq, howmany, bytes_sent); -} - -static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) -{ - struct page *page; - dma_addr_t addr; - - page = dev_alloc_page(); - if (unlikely(!page)) - return false; - - addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(rxq->dev, addr))) { - __free_page(page); - - return false; - } - - rxb->dma = addr; - rxb->page = page; - rxb->page_offset = 0; - - return true; -} - -static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) -{ - struct gfar_private *priv = netdev_priv(rx_queue->ndev); - struct gfar_extra_stats *estats = &priv->extra_stats; - - netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); - atomic64_inc(&estats->rx_alloc_err); -} - -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, - int alloc_cnt) -{ - struct rxbd8 *bdp; - struct gfar_rx_buff *rxb; - int i; - - i = rx_queue->next_to_use; - bdp = &rx_queue->rx_bd_base[i]; - rxb = &rx_queue->rx_buff[i]; - - while (alloc_cnt--) { - /* try reuse page */ - if (unlikely(!rxb->page)) { - if (unlikely(!gfar_new_page(rx_queue, rxb))) { - gfar_rx_alloc_err(rx_queue); - break; - } - } - - /* Setup the new RxBD */ - gfar_init_rxbdp(rx_queue, bdp, - rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); - - /* Update to the next pointer */ - bdp++; - rxb++; - - if (unlikely(++i == rx_queue->rx_ring_size)) { - i = 0; - bdp = rx_queue->rx_bd_base; - rxb = rx_queue->rx_buff; - } - } - - rx_queue->next_to_use = i; - rx_queue->next_to_alloc = i; -} - -static void count_errors(u32 lstatus, struct net_device *ndev) -{ - struct gfar_private *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; - struct gfar_extra_stats *estats = &priv->extra_stats; - - /* If the packet was truncated, none of the other errors matter */ - if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { - stats->rx_length_errors++; - - atomic64_inc(&estats->rx_trunc); - - return; - } - /* Count the errors, if there were any */ - if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { - stats->rx_length_errors++; - - if (lstatus & BD_LFLAG(RXBD_LARGE)) - atomic64_inc(&estats->rx_large); - else - atomic64_inc(&estats->rx_short); - } - if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { - stats->rx_frame_errors++; - atomic64_inc(&estats->rx_nonoctet); - } - if (lstatus & BD_LFLAG(RXBD_CRCERR)) { - atomic64_inc(&estats->rx_crcerr); - stats->rx_crc_errors++; - } - if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { - atomic64_inc(&estats->rx_overrun); - stats->rx_over_errors++; - } -} - -irqreturn_t gfar_receive(int irq, void *grp_id) -{ - struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; - unsigned long flags; - u32 imask, ievent; - - ievent = gfar_read(&grp->regs->ievent); - - if (unlikely(ievent & IEVENT_FGPI)) { - gfar_write(&grp->regs->ievent, IEVENT_FGPI); - return IRQ_HANDLED; - } - - if (likely(napi_schedule_prep(&grp->napi_rx))) { - spin_lock_irqsave(&grp->grplock, flags); - imask = gfar_read(&grp->regs->imask); - imask &= IMASK_RX_DISABLED; - gfar_write(&grp->regs->imask, imask); - spin_unlock_irqrestore(&grp->grplock, flags); - __napi_schedule(&grp->napi_rx); - } else { - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived. - */ - gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); - } - - return IRQ_HANDLED; -} - -/* Interrupt Handler for Transmit complete */ -static irqreturn_t gfar_transmit(int irq, void *grp_id) -{ - struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; - unsigned long flags; - u32 imask; - - if (likely(napi_schedule_prep(&grp->napi_tx))) { - spin_lock_irqsave(&grp->grplock, flags); - imask = gfar_read(&grp->regs->imask); - imask &= IMASK_TX_DISABLED; - gfar_write(&grp->regs->imask, imask); - spin_unlock_irqrestore(&grp->grplock, flags); - __napi_schedule(&grp->napi_tx); - } else { - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived. - */ - gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); - } - - return IRQ_HANDLED; -} - -static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, - struct sk_buff *skb, bool first) -{ - int size = lstatus & BD_LENGTH_MASK; - struct page *page = rxb->page; - - if (likely(first)) { - skb_put(skb, size); - } else { - /* the last fragments' length contains the full frame length */ - if (lstatus & BD_LFLAG(RXBD_LAST)) - size -= skb->len; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rxb->page_offset + RXBUF_ALIGNMENT, - size, GFAR_RXB_TRUESIZE); - } - - /* try reuse page */ - if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) - return false; - - /* change offset to the other half */ - rxb->page_offset ^= GFAR_RXB_TRUESIZE; - - page_ref_inc(page); - - return true; -} - -static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, - struct gfar_rx_buff *old_rxb) -{ - struct gfar_rx_buff *new_rxb; - u16 nta = rxq->next_to_alloc; - - new_rxb = &rxq->rx_buff[nta]; - - /* find next buf that can reuse a page */ - nta++; - rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; - - /* copy page reference */ - *new_rxb = *old_rxb; - - /* sync for use by the device */ - dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, - old_rxb->page_offset, - GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); -} - -static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, - u32 lstatus, struct sk_buff *skb) -{ - struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; - struct page *page = rxb->page; - bool first = false; - - if (likely(!skb)) { - void *buff_addr = page_address(page) + rxb->page_offset; - - skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); - if (unlikely(!skb)) { - gfar_rx_alloc_err(rx_queue); - return NULL; - } - skb_reserve(skb, RXBUF_ALIGNMENT); - first = true; - } - - dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, - GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); - - if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { - /* reuse the free half of the page */ - gfar_reuse_rx_page(rx_queue, rxb); - } else { - /* page cannot be reused, unmap it */ - dma_unmap_page(rx_queue->dev, rxb->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - } - - /* clear rxb content */ - rxb->page = NULL; - - return skb; -} - -static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) -{ - /* If valid headers were found, and valid sums - * were verified, then we tell the kernel that no - * checksumming is necessary. Otherwise, it is [FIXME] - */ - if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == - (RXFCB_CIP | RXFCB_CTU)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb_checksum_none_assert(skb); -} - -/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) -{ - struct gfar_private *priv = netdev_priv(ndev); - struct rxfcb *fcb = NULL; - - /* fcb is at the beginning if exists */ - fcb = (struct rxfcb *)skb->data; - - /* Remove the FCB from the skb - * Remove the padded bytes, if there are any - */ - if (priv->uses_rxfcb) - skb_pull(skb, GMAC_FCB_LEN); - - /* Get receive timestamp from the skb */ - if (priv->hwts_rx_en) { - struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); - u64 *ns = (u64 *) skb->data; - - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); - } - - if (priv->padding) - skb_pull(skb, priv->padding); - - /* Trim off the FCS */ - pskb_trim(skb, skb->len - ETH_FCS_LEN); - - if (ndev->features & NETIF_F_RXCSUM) - gfar_rx_checksum(skb, fcb); - - /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. - * Even if vlan rx accel is disabled, on some chips - * RXFCB_VLN is pseudo randomly set. - */ - if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && - be16_to_cpu(fcb->flags) & RXFCB_VLN) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - be16_to_cpu(fcb->vlctl)); -} - -/* gfar_clean_rx_ring() -- Processes each frame in the rx ring - * until the budget/quota has been reached. Returns the number - * of frames handled - */ -int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) -{ - struct net_device *ndev = rx_queue->ndev; - struct gfar_private *priv = netdev_priv(ndev); - struct rxbd8 *bdp; - int i, howmany = 0; - struct sk_buff *skb = rx_queue->skb; - int cleaned_cnt = gfar_rxbd_unused(rx_queue); - unsigned int total_bytes = 0, total_pkts = 0; - - /* Get the first full descriptor */ - i = rx_queue->next_to_clean; - - while (rx_work_limit--) { - u32 lstatus; - - if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { - gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); - cleaned_cnt = 0; - } - - bdp = &rx_queue->rx_bd_base[i]; - lstatus = be32_to_cpu(bdp->lstatus); - if (lstatus & BD_LFLAG(RXBD_EMPTY)) - break; - - /* order rx buffer descriptor reads */ - rmb(); - - /* fetch next to clean buffer from the ring */ - skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); - if (unlikely(!skb)) - break; - - cleaned_cnt++; - howmany++; - - if (unlikely(++i == rx_queue->rx_ring_size)) - i = 0; - - rx_queue->next_to_clean = i; - - /* fetch next buffer if not the last in frame */ - if (!(lstatus & BD_LFLAG(RXBD_LAST))) - continue; - - if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { - count_errors(lstatus, ndev); - - /* discard faulty buffer */ - dev_kfree_skb(skb); - skb = NULL; - rx_queue->stats.rx_dropped++; - continue; - } - - gfar_process_frame(ndev, skb); - - /* Increment the number of packets */ - total_pkts++; - total_bytes += skb->len; - - skb_record_rx_queue(skb, rx_queue->qindex); - - skb->protocol = eth_type_trans(skb, ndev); - - /* Send the packet up the stack */ - napi_gro_receive(&rx_queue->grp->napi_rx, skb); - - skb = NULL; - } - - /* Store incomplete frames for completion */ - rx_queue->skb = skb; - - rx_queue->stats.rx_packets += total_pkts; - rx_queue->stats.rx_bytes += total_bytes; - - if (cleaned_cnt) - gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); - - /* Update Last Free RxBD pointer for LFC */ - if (unlikely(priv->tx_actual_en)) { - u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); - - gfar_write(rx_queue->rfbptr, bdp_dma); - } - - return howmany; -} - -static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_rx); - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; - int work_done = 0; - - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(®s->ievent, IEVENT_RX_MASK); - - work_done = gfar_clean_rx_ring(rx_queue, budget); - - if (work_done < budget) { - u32 imask; - napi_complete_done(napi, work_done); - /* Clear the halt bit in RSTAT */ - gfar_write(®s->rstat, gfargrp->rstat); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(®s->imask); - imask |= IMASK_RX_DEFAULT; - gfar_write(®s->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return work_done; -} - -static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_tx); - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; - u32 imask; - - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(®s->ievent, IEVENT_TX_MASK); - - /* run Tx cleanup to completion */ - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) - gfar_clean_tx_ring(tx_queue); - - napi_complete(napi); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(®s->imask); - imask |= IMASK_TX_DEFAULT; - gfar_write(®s->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - - return 0; -} - -static int gfar_poll_rx(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_rx); - struct gfar_private *priv = gfargrp->priv; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_rx_q *rx_queue = NULL; - int work_done = 0, work_done_per_q = 0; - int i, budget_per_q = 0; - unsigned long rstat_rxf; - int num_act_queues; - - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(®s->ievent, IEVENT_RX_MASK); - - rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; - - num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); - if (num_act_queues) - budget_per_q = budget/num_act_queues; - - for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { - /* skip queue if not active */ - if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) - continue; - - rx_queue = priv->rx_queue[i]; - work_done_per_q = - gfar_clean_rx_ring(rx_queue, budget_per_q); - work_done += work_done_per_q; - - /* finished processing this queue */ - if (work_done_per_q < budget_per_q) { - /* clear active queue hw indication */ - gfar_write(®s->rstat, - RSTAT_CLEAR_RXF0 >> i); - num_act_queues--; - - if (!num_act_queues) - break; - } - } - - if (!num_act_queues) { - u32 imask; - napi_complete_done(napi, work_done); - - /* Clear the halt bit in RSTAT */ - gfar_write(®s->rstat, gfargrp->rstat); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(®s->imask); - imask |= IMASK_RX_DEFAULT; - gfar_write(®s->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return work_done; -} - -static int gfar_poll_tx(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_tx); - struct gfar_private *priv = gfargrp->priv; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = NULL; - int has_tx_work = 0; - int i; - - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(®s->ievent, IEVENT_TX_MASK); - - for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { - tx_queue = priv->tx_queue[i]; - /* run Tx cleanup to completion */ - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { - gfar_clean_tx_ring(tx_queue); - has_tx_work = 1; - } - } - - if (!has_tx_work) { - u32 imask; - napi_complete(napi); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(®s->imask); - imask |= IMASK_TX_DEFAULT; - gfar_write(®s->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return 0; -} - - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void gfar_netpoll(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - int i; - - /* If the device has multiple interrupts, run tx/rx */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - for (i = 0; i < priv->num_grps; i++) { - struct gfar_priv_grp *grp = &priv->gfargrp[i]; - - disable_irq(gfar_irq(grp, TX)->irq); - disable_irq(gfar_irq(grp, RX)->irq); - disable_irq(gfar_irq(grp, ER)->irq); - gfar_interrupt(gfar_irq(grp, TX)->irq, grp); - enable_irq(gfar_irq(grp, ER)->irq); - enable_irq(gfar_irq(grp, RX)->irq); - enable_irq(gfar_irq(grp, TX)->irq); - } - } else { - for (i = 0; i < priv->num_grps; i++) { - struct gfar_priv_grp *grp = &priv->gfargrp[i]; - - disable_irq(gfar_irq(grp, TX)->irq); - gfar_interrupt(gfar_irq(grp, TX)->irq, grp); - enable_irq(gfar_irq(grp, TX)->irq); - } - } -} -#endif - -/* The interrupt handler for devices with one interrupt */ -static irqreturn_t gfar_interrupt(int irq, void *grp_id) -{ - struct gfar_priv_grp *gfargrp = grp_id; - - /* Save ievent for future reference */ - u32 events = gfar_read(&gfargrp->regs->ievent); - - /* Check for reception */ - if (events & IEVENT_RX_MASK) - gfar_receive(irq, grp_id); - - /* Check for transmit completion */ - if (events & IEVENT_TX_MASK) - gfar_transmit(irq, grp_id); - - /* Check for errors */ - if (events & IEVENT_ERR_MASK) - gfar_error(irq, grp_id); - - return IRQ_HANDLED; -} - -/* Called every time the controller might need to be made - * aware of new link state. The PHY code conveys this - * information through variables in the phydev structure, and this - * function converts those variables into the appropriate - * register values, and can bring down the device if needed. - */ -static void adjust_link(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; - - if (unlikely(phydev->link != priv->oldlink || - (phydev->link && (phydev->duplex != priv->oldduplex || - phydev->speed != priv->oldspeed)))) - gfar_update_link_state(priv); -} - -/* Update the hash table based on the current list of multicast - * addresses we subscribe to. Also, change the promiscuity of - * the device based on the flags (this function is called - * whenever dev->flags is changed - */ -static void gfar_set_multi(struct net_device *dev) -{ - struct netdev_hw_addr *ha; - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - - if (dev->flags & IFF_PROMISC) { - /* Set RCTRL to PROM */ - tempval = gfar_read(®s->rctrl); - tempval |= RCTRL_PROM; - gfar_write(®s->rctrl, tempval); - } else { - /* Set RCTRL to not PROM */ - tempval = gfar_read(®s->rctrl); - tempval &= ~(RCTRL_PROM); - gfar_write(®s->rctrl, tempval); - } - - if (dev->flags & IFF_ALLMULTI) { - /* Set the hash to rx all multicast frames */ - gfar_write(®s->igaddr0, 0xffffffff); - gfar_write(®s->igaddr1, 0xffffffff); - gfar_write(®s->igaddr2, 0xffffffff); - gfar_write(®s->igaddr3, 0xffffffff); - gfar_write(®s->igaddr4, 0xffffffff); - gfar_write(®s->igaddr5, 0xffffffff); - gfar_write(®s->igaddr6, 0xffffffff); - gfar_write(®s->igaddr7, 0xffffffff); - gfar_write(®s->gaddr0, 0xffffffff); - gfar_write(®s->gaddr1, 0xffffffff); - gfar_write(®s->gaddr2, 0xffffffff); - gfar_write(®s->gaddr3, 0xffffffff); - gfar_write(®s->gaddr4, 0xffffffff); - gfar_write(®s->gaddr5, 0xffffffff); - gfar_write(®s->gaddr6, 0xffffffff); - gfar_write(®s->gaddr7, 0xffffffff); - } else { - int em_num; - int idx; - - /* zero out the hash */ - gfar_write(®s->igaddr0, 0x0); - gfar_write(®s->igaddr1, 0x0); - gfar_write(®s->igaddr2, 0x0); - gfar_write(®s->igaddr3, 0x0); - gfar_write(®s->igaddr4, 0x0); - gfar_write(®s->igaddr5, 0x0); - gfar_write(®s->igaddr6, 0x0); - gfar_write(®s->igaddr7, 0x0); - gfar_write(®s->gaddr0, 0x0); - gfar_write(®s->gaddr1, 0x0); - gfar_write(®s->gaddr2, 0x0); - gfar_write(®s->gaddr3, 0x0); - gfar_write(®s->gaddr4, 0x0); - gfar_write(®s->gaddr5, 0x0); - gfar_write(®s->gaddr6, 0x0); - gfar_write(®s->gaddr7, 0x0); - - /* If we have extended hash tables, we need to - * clear the exact match registers to prepare for - * setting them - */ - if (priv->extended_hash) { - em_num = GFAR_EM_NUM + 1; - gfar_clear_exact_match(dev); - idx = 1; - } else { - idx = 0; - em_num = 0; - } - - if (netdev_mc_empty(dev)) - return; - - /* Parse the list, and set the appropriate bits */ - netdev_for_each_mc_addr(ha, dev) { - if (idx < em_num) { - gfar_set_mac_for_addr(dev, idx, ha->addr); - idx++; - } else - gfar_set_hash_for_addr(dev, ha->addr); - } - } -} - - -/* Clears each of the exact match registers to zero, so they - * don't interfere with normal reception - */ -static void gfar_clear_exact_match(struct net_device *dev) -{ - int idx; - static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; - - for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) - gfar_set_mac_for_addr(dev, idx, zero_arr); -} - -/* Set the appropriate hash bit for the given addr */ -/* The algorithm works like so: - * 1) Take the Destination Address (ie the multicast address), and - * do a CRC on it (little endian), and reverse the bits of the - * result. - * 2) Use the 8 most significant bits as a hash into a 256-entry - * table. The table is controlled through 8 32-bit registers: - * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is - * gaddr7. This means that the 3 most significant bits in the - * hash index which gaddr register to use, and the 5 other bits - * indicate which bit (assuming an IBM numbering scheme, which - * for PowerPC (tm) is usually the case) in the register holds - * the entry. - */ -static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) -{ - u32 tempval; - struct gfar_private *priv = netdev_priv(dev); - u32 result = ether_crc(ETH_ALEN, addr); - int width = priv->hash_width; - u8 whichbit = (result >> (32 - width)) & 0x1f; - u8 whichreg = result >> (32 - width + 5); - u32 value = (1 << (31-whichbit)); - - tempval = gfar_read(priv->hash_regs[whichreg]); - tempval |= value; - gfar_write(priv->hash_regs[whichreg], tempval); -} - - -/* There are multiple MAC Address register pairs on some controllers - * This function sets the numth pair to a given address - */ -static void gfar_set_mac_for_addr(struct net_device *dev, int num, - const u8 *addr) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - u32 __iomem *macptr = ®s->macstnaddr1; - - macptr += num*2; - - /* For a station address of 0x12345678ABCD in transmission - * order (BE), MACnADDR1 is set to 0xCDAB7856 and - * MACnADDR2 is set to 0x34120000. - */ - tempval = (addr[5] << 24) | (addr[4] << 16) | - (addr[3] << 8) | addr[2]; - - gfar_write(macptr, tempval); - - tempval = (addr[1] << 24) | (addr[0] << 16); - - gfar_write(macptr+1, tempval); -} - -/* GFAR error interrupt handler */ -static irqreturn_t gfar_error(int irq, void *grp_id) -{ - struct gfar_priv_grp *gfargrp = grp_id; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_private *priv= gfargrp->priv; - struct net_device *dev = priv->ndev; - - /* Save ievent for future reference */ - u32 events = gfar_read(®s->ievent); - - /* Clear IEVENT */ - gfar_write(®s->ievent, events & IEVENT_ERR_MASK); - - /* Magic Packet is not an error. */ - if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && - (events & IEVENT_MAG)) - events &= ~IEVENT_MAG; - - /* Hmm... */ - if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) - netdev_dbg(dev, - "error interrupt (ievent=0x%08x imask=0x%08x)\n", - events, gfar_read(®s->imask)); - - /* Update the error counters */ - if (events & IEVENT_TXE) { - dev->stats.tx_errors++; - - if (events & IEVENT_LC) - dev->stats.tx_window_errors++; - if (events & IEVENT_CRL) - dev->stats.tx_aborted_errors++; - if (events & IEVENT_XFUN) { - netif_dbg(priv, tx_err, dev, - "TX FIFO underrun, packet dropped\n"); - dev->stats.tx_dropped++; - atomic64_inc(&priv->extra_stats.tx_underrun); - - schedule_work(&priv->reset_task); - } - netif_dbg(priv, tx_err, dev, "Transmit Error\n"); - } - if (events & IEVENT_BSY) { - dev->stats.rx_over_errors++; - atomic64_inc(&priv->extra_stats.rx_bsy); - - netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", - gfar_read(®s->rstat)); - } - if (events & IEVENT_BABR) { - dev->stats.rx_errors++; - atomic64_inc(&priv->extra_stats.rx_babr); - - netif_dbg(priv, rx_err, dev, "babbling RX error\n"); - } - if (events & IEVENT_EBERR) { - atomic64_inc(&priv->extra_stats.eberr); - netif_dbg(priv, rx_err, dev, "bus error\n"); - } - if (events & IEVENT_RXC) - netif_dbg(priv, rx_status, dev, "control frame\n"); - - if (events & IEVENT_BABT) { - atomic64_inc(&priv->extra_stats.tx_babt); - netif_dbg(priv, tx_err, dev, "babbling TX error\n"); - } - return IRQ_HANDLED; -} - -static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) -{ - struct net_device *ndev = priv->ndev; - struct phy_device *phydev = ndev->phydev; - u32 val = 0; - - if (!phydev->duplex) - return val; - - if (!priv->pause_aneg_en) { - if (priv->tx_pause_en) - val |= MACCFG1_TX_FLOW; - if (priv->rx_pause_en) - val |= MACCFG1_RX_FLOW; - } else { - u16 lcl_adv, rmt_adv; - u8 flowctrl; - /* get link partner capabilities */ - rmt_adv = 0; - if (phydev->pause) - rmt_adv = LPA_PAUSE_CAP; - if (phydev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; - - lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); - if (flowctrl & FLOW_CTRL_TX) - val |= MACCFG1_TX_FLOW; - if (flowctrl & FLOW_CTRL_RX) - val |= MACCFG1_RX_FLOW; - } - - return val; -} - -static noinline void gfar_update_link_state(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - struct net_device *ndev = priv->ndev; - struct phy_device *phydev = ndev->phydev; - struct gfar_priv_rx_q *rx_queue = NULL; - int i; - - if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) - return; - - if (phydev->link) { - u32 tempval1 = gfar_read(®s->maccfg1); - u32 tempval = gfar_read(®s->maccfg2); - u32 ecntrl = gfar_read(®s->ecntrl); - u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); - - if (phydev->duplex != priv->oldduplex) { - if (!(phydev->duplex)) - tempval &= ~(MACCFG2_FULL_DUPLEX); - else - tempval |= MACCFG2_FULL_DUPLEX; - - priv->oldduplex = phydev->duplex; - } - - if (phydev->speed != priv->oldspeed) { - switch (phydev->speed) { - case 1000: - tempval = - ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); - - ecntrl &= ~(ECNTRL_R100); - break; - case 100: - case 10: - tempval = - ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); - - /* Reduced mode distinguishes - * between 10 and 100 - */ - if (phydev->speed == SPEED_100) - ecntrl |= ECNTRL_R100; - else - ecntrl &= ~(ECNTRL_R100); - break; - default: - netif_warn(priv, link, priv->ndev, - "Ack! Speed (%d) is not 10/100/1000!\n", - phydev->speed); - break; - } - - priv->oldspeed = phydev->speed; - } - - tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - tempval1 |= gfar_get_flowctrl_cfg(priv); - - /* Turn last free buffer recording on */ - if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { - for (i = 0; i < priv->num_rx_queues; i++) { - u32 bdp_dma; - - rx_queue = priv->rx_queue[i]; - bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); - gfar_write(rx_queue->rfbptr, bdp_dma); - } - - priv->tx_actual_en = 1; - } - - if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) - priv->tx_actual_en = 0; - - gfar_write(®s->maccfg1, tempval1); - gfar_write(®s->maccfg2, tempval); - gfar_write(®s->ecntrl, ecntrl); - - if (!priv->oldlink) - priv->oldlink = 1; - - } else if (priv->oldlink) { - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - } - - if (netif_msg_link(priv)) - phy_print_status(phydev); -} - static const struct of_device_id gfar_match[] = { { diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index f2af96349c7b..f472a6dbbe6f 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -67,8 +67,6 @@ struct ethtool_rx_list { /* Number of bytes to align the rx bufs to */ #define RXBUF_ALIGNMENT 64 -#define PHY_INIT_TIMEOUT 100000 - #define DRV_NAME "gfar-enet" extern const char gfar_driver_version[]; @@ -88,10 +86,6 @@ extern const char gfar_driver_version[]; #define GFAR_RX_MAX_RING_SIZE 256 #define GFAR_TX_MAX_RING_SIZE 256 -#define GFAR_MAX_FIFO_THRESHOLD 511 -#define GFAR_MAX_FIFO_STARVE 511 -#define GFAR_MAX_FIFO_STARVE_OFF 511 - #define FBTHR_SHIFT 24 #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 @@ -109,9 +103,6 @@ extern const char gfar_driver_version[]; #define DEFAULT_FIFO_TX_THR 0x100 #define DEFAULT_FIFO_TX_STARVE 0x40 #define DEFAULT_FIFO_TX_STARVE_OFF 0x80 -#define DEFAULT_BD_STASH 1 -#define DEFAULT_STASH_LENGTH 96 -#define DEFAULT_STASH_INDEX 0 /* The number of Exact Match registers */ #define GFAR_EM_NUM 15 @@ -139,15 +130,6 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_COALESCE 0 #define DEFAULT_RXCOUNT 0 -#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \ - | SUPPORTED_10baseT_Full \ - | SUPPORTED_100baseT_Half \ - | SUPPORTED_100baseT_Full \ - | SUPPORTED_Autoneg \ - | SUPPORTED_MII) - -#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full - /* TBI register addresses */ #define MII_TBICON 0x11 @@ -185,8 +167,6 @@ extern const char gfar_driver_version[]; #define ECNTRL_REDUCED_MII_MODE 0x00000004 #define ECNTRL_SGMII_MODE 0x00000002 -#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE - #define MINFLR_INIT_SETTINGS 0x00000040 /* Tqueue control */ @@ -266,12 +246,6 @@ extern const char gfar_driver_version[]; #define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME) #define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME) -#define skip_bd(bdp, stride, base, ring_size) ({ \ - typeof(bdp) new_bd = (bdp) + (stride); \ - (new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; }) - -#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size) - #define RCTRL_TS_ENABLE 0x01000000 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_LFC 0x00004000 @@ -385,11 +359,6 @@ extern const char gfar_driver_version[]; #define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT) #define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT) -/* Fifo management */ -#define FIFO_TX_THR_MASK 0x01ff -#define FIFO_TX_STARVE_MASK 0x01ff -#define FIFO_TX_STARVE_OFF_MASK 0x01ff - /* Attribute fields */ /* This enables rx snooping for buffers and descriptors */ @@ -1326,16 +1295,9 @@ static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq) return bdp_dma; } -irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); -void reset_gfar(struct net_device *dev); void gfar_mac_reset(struct gfar_private *priv); -void gfar_halt(struct gfar_private *priv); -void gfar_start(struct gfar_private *priv); -void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, - u32 regnum, u32 read); -void gfar_configure_coalescing_all(struct gfar_private *priv); int gfar_set_features(struct net_device *dev, netdev_features_t features); extern const struct ethtool_ops gfar_ethtool_ops; @@ -1348,13 +1310,6 @@ extern const struct ethtool_ops gfar_ethtool_ops; #define RQFCR_PID_PORT_MASK 0xFFFF0000 #define RQFCR_PID_MAC_MASK 0xFF000000 -struct gfar_mask_entry { - unsigned int mask; /* The mask value which is valid form start to end */ - unsigned int start; - unsigned int end; - unsigned int block; /* Same block values indicate depended entries */ -}; - /* Represents a receive filer table entry */ struct gfar_filer_entry { u32 ctrl; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 3433b46b90c1..3c8e4e2efc07 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -45,19 +45,6 @@ #define GFAR_MAX_COAL_USECS 0xffff #define GFAR_MAX_COAL_FRAMES 0xff -static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, - u64 *buf); -static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); -static int gfar_gcoalesce(struct net_device *dev, - struct ethtool_coalesce *cvals); -static int gfar_scoalesce(struct net_device *dev, - struct ethtool_coalesce *cvals); -static void gfar_gringparam(struct net_device *dev, - struct ethtool_ringparam *rvals); -static int gfar_sringparam(struct net_device *dev, - struct ethtool_ringparam *rvals); -static void gfar_gdrvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo); static const char stat_gstrings[][ETH_GSTRING_LEN] = { /* extra stats */