Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

This commit is contained in:
David S. Miller 2008-07-22 17:54:47 -07:00
commit 7cf75262a4
63 changed files with 13274 additions and 6951 deletions

View File

@ -513,21 +513,11 @@ Additional Configurations
Intel(R) PRO/1000 PT Dual Port Server Connection
Intel(R) PRO/1000 PT Dual Port Server Adapter
Intel(R) PRO/1000 PF Dual Port Server Adapter
Intel(R) PRO/1000 PT Quad Port Server Adapter
Intel(R) PRO/1000 PT Quad Port Server Adapter
NAPI
----
NAPI (Rx polling mode) is supported in the e1000 driver. NAPI is enabled
or disabled based on the configuration of the kernel. To override
the default, use the following compile-time flags.
To enable NAPI, compile the driver module, passing in a configuration option:
make CFLAGS_EXTRA=-DE1000_NAPI install
To disable NAPI, compile the driver module, passing in a configuration option:
make CFLAGS_EXTRA=-DE1000_NO_NAPI install
NAPI (Rx polling mode) is enabled in the e1000 driver.
See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.

View File

@ -3527,7 +3527,7 @@ S: Supported
S390 NETWORK DRIVERS
P: Ursula Braun
M: ubraun@linux.vnet.ibm.com
M: ursula.braun@de.ibm.com
P: Frank Blaschka
M: blaschka@linux.vnet.ibm.com
M: linux390@de.ibm.com
@ -3547,7 +3547,7 @@ S: Supported
S390 IUCV NETWORK LAYER
P: Ursula Braun
M: ubraun@linux.vnet.ibm.com
M: ursula.braun@de.ibm.com
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/

View File

@ -98,7 +98,6 @@
#include <linux/compiler.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
@ -120,11 +119,6 @@
NETIF_MSG_LINK)
/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */
#ifdef CONFIG_8139TOO_PIO
#define USE_IO_OPS 1
#endif
/* define to 1, 2 or 3 to enable copious debugging info */
#define RTL8139_DEBUG 0
@ -156,6 +150,13 @@
static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Whether to use MMIO or PIO. Default to MMIO. */
#ifdef CONFIG_8139TOO_PIO
static int use_io = 1;
#else
static int use_io = 0;
#endif
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static int multicast_filter_limit = 32;
@ -614,6 +615,8 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_param(use_io, int, 0);
MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
module_param(multicast_filter_limit, int, 0);
module_param_array(media, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
@ -709,13 +712,8 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
assert (tp->pci_dev != NULL);
pdev = tp->pci_dev;
#ifdef USE_IO_OPS
if (tp->mmio_addr)
ioport_unmap (tp->mmio_addr);
#else
if (tp->mmio_addr)
pci_iounmap (pdev, tp->mmio_addr);
#endif /* USE_IO_OPS */
/* it's ok to call this even if we have no regions to free */
pci_release_regions (pdev);
@ -790,32 +788,33 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
DPRINTK("PIO region size == 0x%02X\n", pio_len);
DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
#ifdef USE_IO_OPS
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
retry:
if (use_io) {
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
/* check for weird/broken PCI region reporting */
if (pio_len < RTL_MIN_IO_SIZE) {
dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
} else {
/* make sure PCI base addr 1 is MMIO */
if (!(mmio_flags & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
if (mmio_len < RTL_MIN_IO_SIZE) {
dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
}
/* check for weird/broken PCI region reporting */
if (pio_len < RTL_MIN_IO_SIZE) {
dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
#else
/* make sure PCI base addr 1 is MMIO */
if (!(mmio_flags & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
if (mmio_len < RTL_MIN_IO_SIZE) {
dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
#endif
rc = pci_request_regions (pdev, DRV_NAME);
if (rc)
@ -825,28 +824,28 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
/* enable PCI bus-mastering */
pci_set_master (pdev);
#ifdef USE_IO_OPS
ioaddr = ioport_map(pio_start, pio_len);
if (!ioaddr) {
dev_err(&pdev->dev, "cannot map PIO, aborting\n");
rc = -EIO;
goto err_out;
if (use_io) {
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr) {
dev_err(&pdev->dev, "cannot map PIO, aborting\n");
rc = -EIO;
goto err_out;
}
dev->base_addr = pio_start;
tp->regs_len = pio_len;
} else {
/* ioremap MMIO region */
ioaddr = pci_iomap(pdev, 1, 0);
if (ioaddr == NULL) {
dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n");
pci_release_regions(pdev);
use_io = 1;
goto retry;
}
dev->base_addr = (long) ioaddr;
tp->regs_len = mmio_len;
}
dev->base_addr = pio_start;
tp->mmio_addr = ioaddr;
tp->regs_len = pio_len;
#else
/* ioremap MMIO region */
ioaddr = pci_iomap(pdev, 1, 0);
if (ioaddr == NULL) {
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
goto err_out;
}
dev->base_addr = (long) ioaddr;
tp->mmio_addr = ioaddr;
tp->regs_len = mmio_len;
#endif /* USE_IO_OPS */
/* Bring old chips out of low-power mode. */
RTL_W8 (HltClk, 'R');
@ -952,6 +951,14 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
"Use the \"8139cp\" driver for improved performance and stability.\n");
}
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
printk(KERN_INFO "8139too: OQO Model 2 detected. Forcing PIO\n");
use_io = 1;
}
i = rtl8139_init_board (pdev, &dev);
if (i < 0)
return i;
@ -2381,20 +2388,24 @@ static void rtl8139_set_msglevel(struct net_device *dev, u32 datum)
np->msg_enable = datum;
}
/* TODO: we are too slack to do reg dumping for pio, for now */
#ifdef CONFIG_8139TOO_PIO
#define rtl8139_get_regs_len NULL
#define rtl8139_get_regs NULL
#else
static int rtl8139_get_regs_len(struct net_device *dev)
{
struct rtl8139_private *np = netdev_priv(dev);
struct rtl8139_private *np;
/* TODO: we are too slack to do reg dumping for pio, for now */
if (use_io)
return 0;
np = netdev_priv(dev);
return np->regs_len;
}
static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
{
struct rtl8139_private *np = netdev_priv(dev);
struct rtl8139_private *np;
/* TODO: we are too slack to do reg dumping for pio, for now */
if (use_io)
return;
np = netdev_priv(dev);
regs->version = RTL_REGS_VER;
@ -2402,7 +2413,6 @@ static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs,
memcpy_fromio(regbuf, np->mmio_addr, regs->len);
spin_unlock_irq(&np->lock);
}
#endif /* CONFIG_8139TOO_MMIO */
static int rtl8139_get_sset_count(struct net_device *dev, int sset)
{

View File

@ -1926,20 +1926,6 @@ config E1000
To compile this driver as a module, choose M here. The module
will be called e1000.
config E1000_NAPI
bool "Use Rx Polling (NAPI)"
depends on E1000
help
NAPI is a new driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card. It is
still somewhat experimental and thus not yet enabled by default.
If your estimated Rx load is 10kpps or more, or if the card will be
deployed on potentially unfriendly networks (e.g. in a firewall),
then say Y here.
If in doubt, say N.
config E1000_DISABLE_PACKET_SPLIT
bool "Disable Packet Split for PCI express adapters"
depends on E1000
@ -2304,6 +2290,17 @@ config ATL1
To compile this driver as a module, choose M here. The module
will be called atl1.
config ATL1E
tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)"
depends on PCI && EXPERIMENTAL
select CRC32
select MII
help
This driver supports the Atheros L1E gigabit ethernet adapter.
To compile this driver as a module, choose M here. The module
will be called atl1e.
endif # NETDEV_1000
#

View File

@ -15,6 +15,7 @@ obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_TEHUTI) += tehuti.o

View File

@ -820,7 +820,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
lp->skb = skb;
lp->skb_length = skb->len;
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
lp->stats.tx_bytes += skb->len;
dev->stats.tx_bytes += skb->len;
/* Set address of the data in the Transmit Address register */
at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr);
@ -843,34 +843,33 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
*/
static struct net_device_stats *at91ether_stats(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
int ale, lenerr, seqe, lcol, ecol;
if (netif_running(dev)) {
lp->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */
dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */
ale = at91_emac_read(AT91_EMAC_ALE);
lp->stats.rx_frame_errors += ale; /* Alignment errors */
dev->stats.rx_frame_errors += ale; /* Alignment errors */
lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF);
lp->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
seqe = at91_emac_read(AT91_EMAC_SEQE);
lp->stats.rx_crc_errors += seqe; /* CRC error */
lp->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */
lp->stats.rx_errors += (ale + lenerr + seqe
dev->stats.rx_crc_errors += seqe; /* CRC error */
dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */
dev->stats.rx_errors += (ale + lenerr + seqe
+ at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB));
lp->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */
lp->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */
lp->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */
lp->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */
dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */
dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */
dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
lcol = at91_emac_read(AT91_EMAC_LCOL);
ecol = at91_emac_read(AT91_EMAC_ECOL);
lp->stats.tx_window_errors += lcol; /* Late collisions */
lp->stats.tx_aborted_errors += ecol; /* 16 collisions */
dev->stats.tx_window_errors += lcol; /* Late collisions */
dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
lp->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
}
return &lp->stats;
return &dev->stats;
}
/*
@ -896,16 +895,16 @@ static void at91ether_rx(struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
dev->last_rx = jiffies;
lp->stats.rx_bytes += pktlen;
dev->stats.rx_bytes += pktlen;
netif_rx(skb);
}
else {
lp->stats.rx_dropped += 1;
dev->stats.rx_dropped += 1;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
}
if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
lp->stats.multicast++;
dev->stats.multicast++;
dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */
if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */
@ -934,7 +933,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
/* The TCOM bit is set even if the transmission failed. */
if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
lp->stats.tx_errors += 1;
dev->stats.tx_errors += 1;
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);

View File

@ -84,7 +84,6 @@ struct recv_desc_bufs
struct at91_private
{
struct net_device_stats stats;
struct mii_if_info mii; /* ethtool support */
struct at91_eth_data board_data; /* board-specific configuration */
struct clk *ether_clk; /* clock */

View File

@ -0,0 +1,2 @@
obj-$(CONFIG_ATL1E) += atl1e.o
atl1e-objs += atl1e_main.o atl1e_hw.o atl1e_ethtool.o atl1e_param.o

503
drivers/net/atl1e/atl1e.h Normal file
View File

@ -0,0 +1,503 @@
/*
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
* Copyright(c) 2007 xiong huang <xiong.huang@atheros.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _ATL1E_H_
#define _ATL1E_H_
#include <linux/version.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <linux/mii.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/tcp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include "atl1e_hw.h"
#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
#define CMD_IO_SPACE 0x0001
#define CMD_MEMORY_SPACE 0x0002
#define CMD_BUS_MASTER 0x0004
#define BAR_0 0
#define BAR_1 1
#define BAR_5 5
/* Wake Up Filter Control */
#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define SPEED_0 0xffff
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
/* Error Codes */
#define AT_ERR_EEPROM 1
#define AT_ERR_PHY 2
#define AT_ERR_CONFIG 3
#define AT_ERR_PARAM 4
#define AT_ERR_MAC_TYPE 5
#define AT_ERR_PHY_TYPE 6
#define AT_ERR_PHY_SPEED 7
#define AT_ERR_PHY_RES 8
#define AT_ERR_TIMEOUT 9
#define MAX_JUMBO_FRAME_SIZE 0x2000
#define AT_VLAN_TAG_TO_TPD_TAG(_vlan, _tpd) \
_tpd = (((_vlan) << (4)) | (((_vlan) >> 13) & 7) |\
(((_vlan) >> 9) & 8))
#define AT_TPD_TAG_TO_VLAN_TAG(_tpd, _vlan) \
_vlan = (((_tpd) >> 8) | (((_tpd) & 0x77) << 9) |\
(((_tdp) & 0x88) << 5))
#define AT_MAX_RECEIVE_QUEUE 4
#define AT_PAGE_NUM_PER_QUEUE 2
#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL
#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL
#define AT_TX_WATCHDOG (5 * HZ)
#define AT_MAX_INT_WORK 10
#define AT_TWSI_EEPROM_TIMEOUT 100
#define AT_HW_MAX_IDLE_DELAY 10
#define AT_SUSPEND_LINK_TIMEOUT 28
#define AT_REGS_LEN 75
#define AT_EEPROM_LEN 512
#define AT_ADV_MASK (ADVERTISE_10_HALF |\
ADVERTISE_10_FULL |\
ADVERTISE_100_HALF |\
ADVERTISE_100_FULL |\
ADVERTISE_1000_FULL)
/* tpd word 2 */
#define TPD_BUFLEN_MASK 0x3FFF
#define TPD_BUFLEN_SHIFT 0
#define TPD_DMAINT_MASK 0x0001
#define TPD_DMAINT_SHIFT 14
#define TPD_PKTNT_MASK 0x0001
#define TPD_PKTINT_SHIFT 15
#define TPD_VLANTAG_MASK 0xFFFF
#define TPD_VLAN_SHIFT 16
/* tpd word 3 bits 0:4 */
#define TPD_EOP_MASK 0x0001
#define TPD_EOP_SHIFT 0
#define TPD_IP_VERSION_MASK 0x0001
#define TPD_IP_VERSION_SHIFT 1 /* 0 : IPV4, 1 : IPV6 */
#define TPD_INS_VL_TAG_MASK 0x0001
#define TPD_INS_VL_TAG_SHIFT 2
#define TPD_CC_SEGMENT_EN_MASK 0x0001
#define TPD_CC_SEGMENT_EN_SHIFT 3
#define TPD_SEGMENT_EN_MASK 0x0001
#define TPD_SEGMENT_EN_SHIFT 4
/* tdp word 3 bits 5:7 if ip version is 0 */
#define TPD_IP_CSUM_MASK 0x0001
#define TPD_IP_CSUM_SHIFT 5
#define TPD_TCP_CSUM_MASK 0x0001
#define TPD_TCP_CSUM_SHIFT 6
#define TPD_UDP_CSUM_MASK 0x0001
#define TPD_UDP_CSUM_SHIFT 7
/* tdp word 3 bits 5:7 if ip version is 1 */
#define TPD_V6_IPHLLO_MASK 0x0007
#define TPD_V6_IPHLLO_SHIFT 7
/* tpd word 3 bits 8:9 bit */
#define TPD_VL_TAGGED_MASK 0x0001
#define TPD_VL_TAGGED_SHIFT 8
#define TPD_ETHTYPE_MASK 0x0001
#define TPD_ETHTYPE_SHIFT 9
/* tdp word 3 bits 10:13 if ip version is 0 */
#define TDP_V4_IPHL_MASK 0x000F
#define TPD_V4_IPHL_SHIFT 10
/* tdp word 3 bits 10:13 if ip version is 1 */
#define TPD_V6_IPHLHI_MASK 0x000F
#define TPD_V6_IPHLHI_SHIFT 10
/* tpd word 3 bit 14:31 if segment enabled */
#define TPD_TCPHDRLEN_MASK 0x000F
#define TPD_TCPHDRLEN_SHIFT 14
#define TPD_HDRFLAG_MASK 0x0001
#define TPD_HDRFLAG_SHIFT 18
#define TPD_MSS_MASK 0x1FFF
#define TPD_MSS_SHIFT 19
/* tdp word 3 bit 16:31 if custom csum enabled */
#define TPD_PLOADOFFSET_MASK 0x00FF
#define TPD_PLOADOFFSET_SHIFT 16
#define TPD_CCSUMOFFSET_MASK 0x00FF
#define TPD_CCSUMOFFSET_SHIFT 24
struct atl1e_tpd_desc {
__le64 buffer_addr;
__le32 word2;
__le32 word3;
};
/* how about 0x2000 */
#define MAX_TX_BUF_LEN 0x2000
#define MAX_TX_BUF_SHIFT 13
/*#define MAX_TX_BUF_LEN 0x3000 */
/* rrs word 1 bit 0:31 */
#define RRS_RX_CSUM_MASK 0xFFFF
#define RRS_RX_CSUM_SHIFT 0
#define RRS_PKT_SIZE_MASK 0x3FFF
#define RRS_PKT_SIZE_SHIFT 16
#define RRS_CPU_NUM_MASK 0x0003
#define RRS_CPU_NUM_SHIFT 30
#define RRS_IS_RSS_IPV4 0x0001
#define RRS_IS_RSS_IPV4_TCP 0x0002
#define RRS_IS_RSS_IPV6 0x0004
#define RRS_IS_RSS_IPV6_TCP 0x0008
#define RRS_IS_IPV6 0x0010
#define RRS_IS_IP_FRAG 0x0020
#define RRS_IS_IP_DF 0x0040
#define RRS_IS_802_3 0x0080
#define RRS_IS_VLAN_TAG 0x0100
#define RRS_IS_ERR_FRAME 0x0200
#define RRS_IS_IPV4 0x0400
#define RRS_IS_UDP 0x0800
#define RRS_IS_TCP 0x1000
#define RRS_IS_BCAST 0x2000
#define RRS_IS_MCAST 0x4000
#define RRS_IS_PAUSE 0x8000
#define RRS_ERR_BAD_CRC 0x0001
#define RRS_ERR_CODE 0x0002
#define RRS_ERR_DRIBBLE 0x0004
#define RRS_ERR_RUNT 0x0008
#define RRS_ERR_RX_OVERFLOW 0x0010
#define RRS_ERR_TRUNC 0x0020
#define RRS_ERR_IP_CSUM 0x0040
#define RRS_ERR_L4_CSUM 0x0080
#define RRS_ERR_LENGTH 0x0100
#define RRS_ERR_DES_ADDR 0x0200
struct atl1e_recv_ret_status {
u16 seq_num;
u16 hash_lo;
__le32 word1;
u16 pkt_flag;
u16 err_flag;
u16 hash_hi;
u16 vtag;
};
enum atl1e_dma_req_block {
atl1e_dma_req_128 = 0,
atl1e_dma_req_256 = 1,
atl1e_dma_req_512 = 2,
atl1e_dma_req_1024 = 3,
atl1e_dma_req_2048 = 4,
atl1e_dma_req_4096 = 5
};
enum atl1e_rrs_type {
atl1e_rrs_disable = 0,
atl1e_rrs_ipv4 = 1,
atl1e_rrs_ipv4_tcp = 2,
atl1e_rrs_ipv6 = 4,
atl1e_rrs_ipv6_tcp = 8
};
enum atl1e_nic_type {
athr_l1e = 0,
athr_l2e_revA = 1,
athr_l2e_revB = 2
};
struct atl1e_hw_stats {
/* rx */
unsigned long rx_ok; /* The number of good packet received. */
unsigned long rx_bcast; /* The number of good broadcast packet received. */
unsigned long rx_mcast; /* The number of good multicast packet received. */
unsigned long rx_pause; /* The number of Pause packet received. */
unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */
unsigned long rx_fcs_err; /* The number of packets with bad FCS. */
unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */
unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */
unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
unsigned long rx_align_err; /* Alignment Error */
unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */
/* tx */
unsigned long tx_ok; /* The number of good packet transmitted. */
unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */
unsigned long tx_mcast; /* The number of good multicast packet transmitted. */
unsigned long tx_pause; /* The number of Pause packet transmitted. */
unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
unsigned long tx_defer; /* The number of packets transmitted that is deferred. */
unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */
unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
};
struct atl1e_hw {
u8 __iomem *hw_addr; /* inner register address */
resource_size_t mem_rang;
struct atl1e_adapter *adapter;
enum atl1e_nic_type nic_type;
u16 device_id;
u16 vendor_id;
u16 subsystem_id;
u16 subsystem_vendor_id;
u8 revision_id;
u16 pci_cmd_word;
u8 mac_addr[ETH_ALEN];
u8 perm_mac_addr[ETH_ALEN];
u8 preamble_len;
u16 max_frame_size;
u16 rx_jumbo_th;
u16 tx_jumbo_th;
u16 media_type;
#define MEDIA_TYPE_AUTO_SENSOR 0
#define MEDIA_TYPE_100M_FULL 1
#define MEDIA_TYPE_100M_HALF 2
#define MEDIA_TYPE_10M_FULL 3
#define MEDIA_TYPE_10M_HALF 4
u16 autoneg_advertised;
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
#define ADVERTISE_1000_FULL 0x0020
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
u16 imt; /* Interrupt Moderator timer ( 2us resolution) */
u16 ict; /* Interrupt Clear timer (2us resolution) */
u32 smb_timer;
u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
interrupt request */
u16 tpd_thresh;
u16 rx_count_down; /* 2us resolution */
u16 tx_count_down;
u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
enum atl1e_rrs_type rrs_type;
u32 base_cpu;
u32 indirect_tab;
enum atl1e_dma_req_block dmar_block;
enum atl1e_dma_req_block dmaw_block;
u8 dmaw_dly_cnt;
u8 dmar_dly_cnt;
bool phy_configured;
bool re_autoneg;
bool emi_ca;
};
/*
* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct atl1e_tx_buffer {
struct sk_buff *skb;
u16 length;
dma_addr_t dma;
};
struct atl1e_rx_page {
dma_addr_t dma; /* receive rage DMA address */
u8 *addr; /* receive rage virtual address */
dma_addr_t write_offset_dma; /* the DMA address which contain the
receive data offset in the page */
u32 *write_offset_addr; /* the virtaul address which contain
the receive data offset in the page */
u32 read_offset; /* the offset where we have read */
};
struct atl1e_rx_page_desc {
struct atl1e_rx_page rx_page[AT_PAGE_NUM_PER_QUEUE];
u8 rx_using;
u16 rx_nxseq;
};
/* transmit packet descriptor (tpd) ring */
struct atl1e_tx_ring {
struct atl1e_tpd_desc *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
u16 count; /* the count of transmit rings */
rwlock_t tx_lock;
u16 next_to_use;
atomic_t next_to_clean;
struct atl1e_tx_buffer *tx_buffer;
dma_addr_t cmb_dma;
u32 *cmb;
};
/* receive packet descriptor ring */
struct atl1e_rx_ring {
void *desc;
dma_addr_t dma;
int size;
u32 page_size; /* bytes length of rxf page */
u32 real_page_size; /* real_page_size = page_size + jumbo + aliagn */
struct atl1e_rx_page_desc rx_page_desc[AT_MAX_RECEIVE_QUEUE];
};
/* board specific private data structure */
struct atl1e_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct vlan_group *vlgrp;
struct napi_struct napi;
struct mii_if_info mii; /* MII interface info */
struct atl1e_hw hw;
struct atl1e_hw_stats hw_stats;
struct net_device_stats net_stats;
bool have_msi;
u32 wol;
u16 link_speed;
u16 link_duplex;
spinlock_t mdio_lock;
spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct reset_task;
struct work_struct link_chg_task;
struct timer_list watchdog_timer;
struct timer_list phy_config_timer;
/* All Descriptor memory */
dma_addr_t ring_dma;
void *ring_vir_addr;
int ring_size;
struct atl1e_tx_ring tx_ring;
struct atl1e_rx_ring rx_ring;
int num_rx_queues;
unsigned long flags;
#define __AT_TESTING 0x0001
#define __AT_RESETTING 0x0002
#define __AT_DOWN 0x0003
u32 bd_number; /* board number;*/
u32 pci_state[16];
u32 *config_space;
};
#define AT_WRITE_REG(a, reg, value) ( \
writel((value), ((a)->hw_addr + reg)))
#define AT_WRITE_FLUSH(a) (\
readl((a)->hw_addr))
#define AT_READ_REG(a, reg) ( \
readl((a)->hw_addr + reg))
#define AT_WRITE_REGB(a, reg, value) (\
writeb((value), ((a)->hw_addr + reg)))
#define AT_READ_REGB(a, reg) (\
readb((a)->hw_addr + reg))
#define AT_WRITE_REGW(a, reg, value) (\
writew((value), ((a)->hw_addr + reg)))
#define AT_READ_REGW(a, reg) (\
readw((a)->hw_addr + reg))
#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
#define AT_READ_REG_ARRAY(a, reg, offset) ( \
readl(((a)->hw_addr + reg) + ((offset) << 2)))
extern char atl1e_driver_name[];
extern char atl1e_driver_version[];
extern void atl1e_check_options(struct atl1e_adapter *adapter);
extern int atl1e_up(struct atl1e_adapter *adapter);
extern void atl1e_down(struct atl1e_adapter *adapter);
extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
extern void atl1e_set_ethtool_ops(struct net_device *netdev);
#endif /* _ATL1_E_H_ */

View File

@ -0,0 +1,405 @@
/*
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include "atl1e.h"
static int atl1e_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
if (hw->nic_type == athr_l1e)
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->advertising |= hw->autoneg_advertised;
ecmd->port = PORT_TP;
ecmd->phy_address = 0;
ecmd->transceiver = XCVR_INTERNAL;
if (adapter->link_speed != SPEED_0) {
ecmd->speed = adapter->link_speed;
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
ecmd->speed = -1;
ecmd->duplex = -1;
}
ecmd->autoneg = AUTONEG_ENABLE;
return 0;
}
static int atl1e_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
if (ecmd->autoneg == AUTONEG_ENABLE) {
u16 adv4, adv9;
if ((ecmd->advertising&ADVERTISE_1000_FULL)) {
if (hw->nic_type == athr_l1e) {
hw->autoneg_advertised =
ecmd->advertising & AT_ADV_MASK;
} else {
clear_bit(__AT_RESETTING, &adapter->flags);
return -EINVAL;
}
} else if (ecmd->advertising&ADVERTISE_1000_HALF) {
clear_bit(__AT_RESETTING, &adapter->flags);
return -EINVAL;
} else {
hw->autoneg_advertised =
ecmd->advertising & AT_ADV_MASK;
}
ecmd->advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
if (hw->autoneg_advertised & ADVERTISE_10_HALF)
adv4 |= MII_AR_10T_HD_CAPS;
if (hw->autoneg_advertised & ADVERTISE_10_FULL)
adv4 |= MII_AR_10T_FD_CAPS;
if (hw->autoneg_advertised & ADVERTISE_100_HALF)
adv4 |= MII_AR_100TX_HD_CAPS;
if (hw->autoneg_advertised & ADVERTISE_100_FULL)
adv4 |= MII_AR_100TX_FD_CAPS;
if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
adv9 |= MII_AT001_CR_1000T_FD_CAPS;
if (adv4 != hw->mii_autoneg_adv_reg ||
adv9 != hw->mii_1000t_ctrl_reg) {
hw->mii_autoneg_adv_reg = adv4;
hw->mii_1000t_ctrl_reg = adv9;
hw->re_autoneg = true;
}
} else {
clear_bit(__AT_RESETTING, &adapter->flags);
return -EINVAL;
}
/* reset the link */
if (netif_running(adapter->netdev)) {
atl1e_down(adapter);
atl1e_up(adapter);
} else
atl1e_reset_hw(&adapter->hw);
clear_bit(__AT_RESETTING, &adapter->flags);
return 0;
}
static u32 atl1e_get_tx_csum(struct net_device *netdev)
{
return (netdev->features & NETIF_F_HW_CSUM) != 0;
}
static u32 atl1e_get_msglevel(struct net_device *netdev)
{
#ifdef DBG
return 1;
#else
return 0;
#endif
}
static void atl1e_set_msglevel(struct net_device *netdev, u32 data)
{
}
static int atl1e_get_regs_len(struct net_device *netdev)
{
return AT_REGS_LEN * sizeof(u32);
}
static void atl1e_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u16 phy_data;
memset(p, 0, AT_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP);
regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG);
regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL);
regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL);
regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT);
regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL);
regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER);
regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS);
regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL);
regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK);
regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL);
regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG);
regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4);
regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE);
regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4);
regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
regs_buff[20] = AT_READ_REG(hw, REG_MTU);
regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL);
regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR);
regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN);
regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR);
regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR);
regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN);
regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR);
regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR);
atl1e_read_phy_reg(hw, MII_BMCR, &phy_data);
regs_buff[73] = (u32)phy_data;
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
regs_buff[74] = (u32)phy_data;
}
static int atl1e_get_eeprom_len(struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
if (!atl1e_check_eeprom_exist(&adapter->hw))
return AT_EEPROM_LEN;
else
return 0;
}
static int atl1e_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
u32 *eeprom_buff;
int first_dword, last_dword;
int ret_val = 0;
int i;
if (eeprom->len == 0)
return -EINVAL;
if (atl1e_check_eeprom_exist(hw)) /* not exist */
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
first_dword = eeprom->offset >> 2;
last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
eeprom_buff = kmalloc(sizeof(u32) *
(last_dword - first_dword + 1), GFP_KERNEL);
if (eeprom_buff == NULL)
return -ENOMEM;
for (i = first_dword; i < last_dword; i++) {
if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) {
kfree(eeprom_buff);
return -EIO;
}
}
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
eeprom->len);
kfree(eeprom_buff);
return ret_val;
}
static int atl1e_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
u32 *eeprom_buff;
u32 *ptr;
int first_dword, last_dword;
int ret_val = 0;
int i;
if (eeprom->len == 0)
return -EOPNOTSUPP;
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EINVAL;
first_dword = eeprom->offset >> 2;
last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL);
if (eeprom_buff == NULL)
return -ENOMEM;
ptr = (u32 *)eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) {
ret_val = -EIO;
goto out;
}
ptr++;
}
if (((eeprom->offset + eeprom->len) & 3)) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
if (!atl1e_read_eeprom(hw, last_dword * 4,
&(eeprom_buff[last_dword - first_dword]))) {
ret_val = -EIO;
goto out;
}
}
/* Device's eeprom is always little-endian, word addressable */
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_dword - first_dword + 1; i++) {
if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4),
eeprom_buff[i])) {
ret_val = -EIO;
goto out;
}
}
out:
kfree(eeprom_buff);
return ret_val;
}
static void atl1e_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
strncpy(drvinfo->driver, atl1e_driver_name, 32);
strncpy(drvinfo->version, atl1e_driver_version, 32);
strncpy(drvinfo->fw_version, "L1e", 32);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl1e_get_regs_len(netdev);
drvinfo->eedump_len = atl1e_get_eeprom_len(netdev);
}
static void atl1e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_MAGIC | WAKE_PHY;
wol->wolopts = 0;
if (adapter->wol & AT_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & AT_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & AT_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & AT_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & AT_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
return;
}
static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
WAKE_MCAST | WAKE_BCAST | WAKE_MCAST))
return -EOPNOTSUPP;
/* these settings will always override what we currently have */
adapter->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= AT_WUFC_MAG;
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
return 0;
}
static int atl1e_nway_reset(struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
atl1e_reinit_locked(adapter);
return 0;
}
static struct ethtool_ops atl1e_ethtool_ops = {
.get_settings = atl1e_get_settings,
.set_settings = atl1e_set_settings,
.get_drvinfo = atl1e_get_drvinfo,
.get_regs_len = atl1e_get_regs_len,
.get_regs = atl1e_get_regs,
.get_wol = atl1e_get_wol,
.set_wol = atl1e_set_wol,
.get_msglevel = atl1e_get_msglevel,
.set_msglevel = atl1e_set_msglevel,
.nway_reset = atl1e_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = atl1e_get_eeprom_len,
.get_eeprom = atl1e_get_eeprom,
.set_eeprom = atl1e_set_eeprom,
.get_tx_csum = atl1e_get_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso,
#endif
};
void atl1e_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops);
}

View File

@ -0,0 +1,664 @@
/*
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include "atl1e.h"
/*
* check_eeprom_exist
* return 0 if eeprom exist
*/
int atl1e_check_eeprom_exist(struct atl1e_hw *hw)
{
u32 value;
value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
if (value & SPI_FLASH_CTRL_EN_VPD) {
value &= ~SPI_FLASH_CTRL_EN_VPD;
AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
}
value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST);
return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
}
void atl1e_hw_set_mac_addr(struct atl1e_hw *hw)
{
u32 value;
/*
* 00-0B-6A-F6-00-DC
* 0: 6AF600DC 1: 000B
* low dword
*/
value = (((u32)hw->mac_addr[2]) << 24) |
(((u32)hw->mac_addr[3]) << 16) |
(((u32)hw->mac_addr[4]) << 8) |
(((u32)hw->mac_addr[5])) ;
AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
/* hight dword */
value = (((u32)hw->mac_addr[0]) << 8) |
(((u32)hw->mac_addr[1])) ;
AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
}
/*
* atl1e_get_permanent_address
* return 0 if get valid mac address,
*/
static int atl1e_get_permanent_address(struct atl1e_hw *hw)
{
u32 addr[2];
u32 i;
u32 twsi_ctrl_data;
u8 eth_addr[ETH_ALEN];
if (is_valid_ether_addr(hw->perm_mac_addr))
return 0;
/* init */
addr[0] = addr[1] = 0;
if (!atl1e_check_eeprom_exist(hw)) {
/* eeprom exist */
twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
msleep(10);
twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
break;
}
if (i >= AT_TWSI_EEPROM_TIMEOUT)
return AT_ERR_TIMEOUT;
}
/* maybe MAC-address is from BIOS */
addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4);
*(u32 *) &eth_addr[2] = swab32(addr[0]);
*(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
if (is_valid_ether_addr(eth_addr)) {
memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
return 0;
}
return AT_ERR_EEPROM;
}
bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value)
{
return true;
}
bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value)
{
int i;
u32 control;
if (offset & 3)
return false; /* address do not align */
AT_WRITE_REG(hw, REG_VPD_DATA, 0);
control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
AT_WRITE_REG(hw, REG_VPD_CAP, control);
for (i = 0; i < 10; i++) {
msleep(2);
control = AT_READ_REG(hw, REG_VPD_CAP);
if (control & VPD_CAP_VPD_FLAG)
break;
}
if (control & VPD_CAP_VPD_FLAG) {
*p_value = AT_READ_REG(hw, REG_VPD_DATA);
return true;
}
return false; /* timeout */
}
void atl1e_force_ps(struct atl1e_hw *hw)
{
AT_WRITE_REGW(hw, REG_GPHY_CTRL,
GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
}
/*
* Reads the adapter's MAC address from the EEPROM
*
* hw - Struct containing variables accessed by shared code
*/
int atl1e_read_mac_addr(struct atl1e_hw *hw)
{
int err = 0;
err = atl1e_get_permanent_address(hw);
if (err)
return AT_ERR_EEPROM;
memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
return 0;
}
/*
* atl1e_hash_mc_addr
* purpose
* set hash value for a multicast address
* hash calcu processing :
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
{
u32 crc32;
u32 value = 0;
int i;
crc32 = ether_crc_le(6, mc_addr);
crc32 = ~crc32;
for (i = 0; i < 32; i++)
value |= (((crc32 >> i) & 1) << (31 - i));
return value;
}
/*
* Sets the bit in the multicast table corresponding to the hash value.
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
/*
* The HASH Table is a register array of 2 32-bit registers.
* It is treated like an array of 64 bits. We want to set
* bit BitArray[hash_value]. So we figure out what register
* the bit is in, read it, OR in the new bit, then write
* back the new value. The register is determined by the
* upper 7 bits of the hash value and the bit within that
* register are determined by the lower 5 bits of the value.
*/
hash_reg = (hash_value >> 31) & 0x1;
hash_bit = (hash_value >> 26) & 0x1F;
mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
mta |= (1 << hash_bit);
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
}
/*
* Reads the value from a PHY register
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
wmb();
for (i = 0; i < MDIO_WAIT_TIMES; i++) {
udelay(2);
val = AT_READ_REG(hw, REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
wmb();
}
if (!(val & (MDIO_START | MDIO_BUSY))) {
*phy_data = (u16)val;
return 0;
}
return AT_ERR_PHY;
}
/*
* Writes a value to a PHY register
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to write
* data - data to write to the PHY
*/
int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data)
{
int i;
u32 val;
val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
(reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
MDIO_SUP_PREAMBLE |
MDIO_START |
MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
wmb();
for (i = 0; i < MDIO_WAIT_TIMES; i++) {
udelay(2);
val = AT_READ_REG(hw, REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
wmb();
}
if (!(val & (MDIO_START | MDIO_BUSY)))
return 0;
return AT_ERR_PHY;
}
/*
* atl1e_init_pcie - init PCIE module
*/
static void atl1e_init_pcie(struct atl1e_hw *hw)
{
u32 value;
/* comment 2lines below to save more power when sususpend
value = LTSSM_TEST_MODE_DEF;
AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
*/
/* pcie flow control mode change */
value = AT_READ_REG(hw, 0x1008);
value |= 0x8000;
AT_WRITE_REG(hw, 0x1008, value);
}
/*
* Configures PHY autoneg and flow control advertisement settings
*
* hw - Struct containing variables accessed by shared code
*/
static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
{
s32 ret_val;
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
if (0 != hw->mii_autoneg_adv_reg)
return 0;
/* Read the MII Auto-Neg Advertisement Register (Address 4/9). */
mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
/*
* Need to parse autoneg_advertised and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
/*
* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T control Register (Address 9).
*/
mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
/*
* Need to parse MediaType and setup the
* appropriate PHY registers.
*/
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
MII_AR_10T_FD_CAPS |
MII_AR_100TX_HD_CAPS |
MII_AR_100TX_FD_CAPS);
hw->autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL |
ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
if (hw->nic_type == athr_l1e) {
mii_1000t_ctrl_reg |=
MII_AT001_CR_1000T_FD_CAPS;
hw->autoneg_advertised |= ADVERTISE_1000_FULL;
}
break;
case MEDIA_TYPE_100M_FULL:
mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
}
return 0;
}
/*
* Resets the PHY and make all config validate
*
* hw - Struct containing variables accessed by shared code
*
* Sets bit 15 and 12 of the MII control regiser (for F001 bug)
*/
int atl1e_phy_commit(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 phy_data;
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
if (ret_val) {
u32 val;
int i;
/**************************************
* pcie serdes link may be down !
**************************************/
for (i = 0; i < 25; i++) {
msleep(1);
val = AT_READ_REG(hw, REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
}
if (0 != (val & (MDIO_START | MDIO_BUSY))) {
dev_err(&pdev->dev,
"pcie linkdown at least for 25ms\n");
return ret_val;
}
dev_err(&pdev->dev, "pcie linkup after %d ms\n", i);
}
return 0;
}
int atl1e_phy_init(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
struct pci_dev *pdev = adapter->pdev;
s32 ret_val;
u16 phy_val;
if (hw->phy_configured) {
if (hw->re_autoneg) {
hw->re_autoneg = false;
return atl1e_restart_autoneg(hw);
}
return 0;
}
/* RESET GPHY Core */
AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
msleep(2);
AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
GPHY_CTRL_EXT_RESET);
msleep(2);
/* patches */
/* p1. eable hibernation mode */
ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB);
if (ret_val)
return ret_val;
ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00);
if (ret_val)
return ret_val;
/* p2. set Class A/B for all modes */
ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0);
if (ret_val)
return ret_val;
phy_val = 0x02ef;
/* remove Class AB */
/* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */
ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val);
if (ret_val)
return ret_val;
/* p3. 10B ??? */
ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12);
if (ret_val)
return ret_val;
ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04);
if (ret_val)
return ret_val;
/* p4. 1000T power */
ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4);
if (ret_val)
return ret_val;
ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB);
if (ret_val)
return ret_val;
ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5);
if (ret_val)
return ret_val;
ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46);
if (ret_val)
return ret_val;
msleep(1);
/*Enable PHY LinkChange Interrupt */
ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
if (ret_val) {
dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n");
return ret_val;
}
/* setup AutoNeg parameters */
ret_val = atl1e_phy_setup_autoneg_adv(hw);
if (ret_val) {
dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n");
return ret_val;
}
/* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
dev_dbg(&pdev->dev, "Restarting Auto-Neg");
ret_val = atl1e_phy_commit(hw);
if (ret_val) {
dev_err(&pdev->dev, "Error Resetting the phy");
return ret_val;
}
hw->phy_configured = true;
return 0;
}
/*
* Reset the transmit and receive units; mask and clear all interrupts.
* hw - Struct containing variables accessed by shared code
* return : 0 or idle status (if error)
*/
int atl1e_reset_hw(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 idle_status_data = 0;
u16 pci_cfg_cmd_word = 0;
int timeout = 0;
/* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word);
if ((pci_cfg_cmd_word & (CMD_IO_SPACE |
CMD_MEMORY_SPACE | CMD_BUS_MASTER))
!= (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) {
pci_cfg_cmd_word |= (CMD_IO_SPACE |
CMD_MEMORY_SPACE | CMD_BUS_MASTER);
pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word);
}
/*
* Issue Soft Reset to the MAC. This will reset the chip's
* transmit, receive, DMA. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
AT_WRITE_REG(hw, REG_MASTER_CTRL,
MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST);
wmb();
msleep(1);
/* Wait at least 10ms for All module to be Idle */
for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS);
if (idle_status_data == 0)
break;
msleep(1);
cpu_relax();
}
if (timeout >= AT_HW_MAX_IDLE_DELAY) {
dev_err(&pdev->dev,
"MAC state machine cann't be idle since"
" disabled for 10ms second\n");
return AT_ERR_TIMEOUT;
}
return 0;
}
/*
* Performs basic configuration of the adapter.
*
* hw - Struct containing variables accessed by shared code
* Assumes that the controller has previously been reset and is in a
* post-reset uninitialized state. Initializes multicast table,
* and Calls routines to setup link
* Leaves the transmit and receive units disabled and uninitialized.
*/
int atl1e_init_hw(struct atl1e_hw *hw)
{
s32 ret_val = 0;
atl1e_init_pcie(hw);
/* Zero out the Multicast HASH table */
/* clear the old settings from the multicast hash table */
AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
ret_val = atl1e_phy_init(hw);
return ret_val;
}
/*
* Detects the current speed and duplex settings of the hardware.
*
* hw - Struct containing variables accessed by shared code
* speed - Speed of the connection
* duplex - Duplex setting of the connection
*/
int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
{
int err;
u16 phy_data;
/* Read PHY Specific Status Register (17) */
err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
if (err)
return err;
if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
return AT_ERR_PHY_RES;
switch (phy_data & MII_AT001_PSSR_SPEED) {
case MII_AT001_PSSR_1000MBS:
*speed = SPEED_1000;
break;
case MII_AT001_PSSR_100MBS:
*speed = SPEED_100;
break;
case MII_AT001_PSSR_10MBS:
*speed = SPEED_10;
break;
default:
return AT_ERR_PHY_SPEED;
break;
}
if (phy_data & MII_AT001_PSSR_DPLX)
*duplex = FULL_DUPLEX;
else
*duplex = HALF_DUPLEX;
return 0;
}
int atl1e_restart_autoneg(struct atl1e_hw *hw)
{
int err = 0;
err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
if (err)
return err;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
err = atl1e_write_phy_reg(hw, MII_AT001_CR,
hw->mii_1000t_ctrl_reg);
if (err)
return err;
}
err = atl1e_write_phy_reg(hw, MII_BMCR,
MII_CR_RESET | MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
return err;
}

View File

@ -0,0 +1,793 @@
/*
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _ATHL1E_HW_H_
#define _ATHL1E_HW_H_
#include <linux/types.h>
#include <linux/mii.h>
struct atl1e_adapter;
struct atl1e_hw;
/* function prototype */
s32 atl1e_reset_hw(struct atl1e_hw *hw);
s32 atl1e_read_mac_addr(struct atl1e_hw *hw);
s32 atl1e_init_hw(struct atl1e_hw *hw);
s32 atl1e_phy_commit(struct atl1e_hw *hw);
s32 atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex);
u32 atl1e_auto_get_fc(struct atl1e_adapter *adapter, u16 duplex);
u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr);
void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value);
s32 atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data);
s32 atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data);
s32 atl1e_validate_mdi_setting(struct atl1e_hw *hw);
void atl1e_hw_set_mac_addr(struct atl1e_hw *hw);
bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value);
bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value);
s32 atl1e_phy_enter_power_saving(struct atl1e_hw *hw);
s32 atl1e_phy_leave_power_saving(struct atl1e_hw *hw);
s32 atl1e_phy_init(struct atl1e_hw *hw);
int atl1e_check_eeprom_exist(struct atl1e_hw *hw);
void atl1e_force_ps(struct atl1e_hw *hw);
s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
/* register definition */
#define REG_PM_CTRLSTAT 0x44
#define REG_PCIE_CAP_LIST 0x58
#define REG_DEVICE_CAP 0x5C
#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
#define REG_DEVICE_CTRL 0x60
#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
#define REG_VPD_CAP 0x6C
#define VPD_CAP_ID_MASK 0xff
#define VPD_CAP_ID_SHIFT 0
#define VPD_CAP_NEXT_PTR_MASK 0xFF
#define VPD_CAP_NEXT_PTR_SHIFT 8
#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
#define VPD_CAP_VPD_ADDR_SHIFT 16
#define VPD_CAP_VPD_FLAG 0x80000000
#define REG_VPD_DATA 0x70
#define REG_SPI_FLASH_CTRL 0x200
#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
#define SPI_FLASH_CTRL_STS_WEN 0x2
#define SPI_FLASH_CTRL_STS_WPEN 0x80
#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
#define SPI_FLASH_CTRL_INS_MASK 0x7
#define SPI_FLASH_CTRL_INS_SHIFT 8
#define SPI_FLASH_CTRL_START 0x800
#define SPI_FLASH_CTRL_EN_VPD 0x2000
#define SPI_FLASH_CTRL_LDSTART 0x8000
#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
#define REG_SPI_ADDR 0x204
#define REG_SPI_DATA 0x208
#define REG_SPI_FLASH_CONFIG 0x20C
#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
#define REG_SPI_FLASH_OP_PROGRAM 0x210
#define REG_SPI_FLASH_OP_SC_ERASE 0x211
#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
#define REG_SPI_FLASH_OP_RDID 0x213
#define REG_SPI_FLASH_OP_WREN 0x214
#define REG_SPI_FLASH_OP_RDSR 0x215
#define REG_SPI_FLASH_OP_WRSR 0x216
#define REG_SPI_FLASH_OP_READ 0x217
#define REG_TWSI_CTRL 0x218
#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
#define TWSI_CTRL_LD_OFFSET_SHIFT 0
#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
#define TWSI_CTRL_SW_LDSTART 0x800
#define TWSI_CTRL_HW_LDSTART 0x1000
#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F
#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
#define TWSI_CTRL_LD_EXIST 0x400000
#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
#define TWSI_CTRL_FREQ_SEL_100K 0
#define TWSI_CTRL_FREQ_SEL_200K 1
#define TWSI_CTRL_FREQ_SEL_300K 2
#define TWSI_CTRL_FREQ_SEL_400K 3
#define TWSI_CTRL_SMB_SLV_ADDR
#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
#define REG_PCIE_DEV_MISC_CTRL 0x21C
#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
#define REG_PCIE_PHYMISC 0x1000
#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
#define REG_LTSSM_TEST_MODE 0x12FC
#define LTSSM_TEST_MODE_DEF 0xE000
/* Selene Master Control Register */
#define REG_MASTER_CTRL 0x1400
#define MASTER_CTRL_SOFT_RST 0x1
#define MASTER_CTRL_MTIMER_EN 0x2
#define MASTER_CTRL_ITIMER_EN 0x4
#define MASTER_CTRL_MANUAL_INT 0x8
#define MASTER_CTRL_ITIMER2_EN 0x20
#define MASTER_CTRL_INT_RDCLR 0x40
#define MASTER_CTRL_LED_MODE 0x200
#define MASTER_CTRL_REV_NUM_SHIFT 16
#define MASTER_CTRL_REV_NUM_MASK 0xff
#define MASTER_CTRL_DEV_ID_SHIFT 24
#define MASTER_CTRL_DEV_ID_MASK 0xff
/* Timer Initial Value Register */
#define REG_MANUAL_TIMER_INIT 0x1404
/* IRQ ModeratorTimer Initial Value Register */
#define REG_IRQ_MODU_TIMER_INIT 0x1408 /* w */
#define REG_IRQ_MODU_TIMER2_INIT 0x140A /* w */
#define REG_GPHY_CTRL 0x140C
#define GPHY_CTRL_EXT_RESET 1
#define GPHY_CTRL_PIPE_MOD 2
#define GPHY_CTRL_TEST_MODE_MASK 3
#define GPHY_CTRL_TEST_MODE_SHIFT 2
#define GPHY_CTRL_BERT_START 0x10
#define GPHY_CTRL_GATE_25M_EN 0x20
#define GPHY_CTRL_LPW_EXIT 0x40
#define GPHY_CTRL_PHY_IDDQ 0x80
#define GPHY_CTRL_PHY_IDDQ_DIS 0x100
#define GPHY_CTRL_PCLK_SEL_DIS 0x200
#define GPHY_CTRL_HIB_EN 0x400
#define GPHY_CTRL_HIB_PULSE 0x800
#define GPHY_CTRL_SEL_ANA_RST 0x1000
#define GPHY_CTRL_PHY_PLL_ON 0x2000
#define GPHY_CTRL_PWDOWN_HW 0x4000
#define GPHY_CTRL_DEFAULT (\
GPHY_CTRL_PHY_PLL_ON |\
GPHY_CTRL_SEL_ANA_RST |\
GPHY_CTRL_HIB_PULSE |\
GPHY_CTRL_HIB_EN)
#define GPHY_CTRL_PW_WOL_DIS (\
GPHY_CTRL_PHY_PLL_ON |\
GPHY_CTRL_SEL_ANA_RST |\
GPHY_CTRL_HIB_PULSE |\
GPHY_CTRL_HIB_EN |\
GPHY_CTRL_PWDOWN_HW |\
GPHY_CTRL_PCLK_SEL_DIS |\
GPHY_CTRL_PHY_IDDQ)
/* IRQ Anti-Lost Timer Initial Value Register */
#define REG_CMBDISDMA_TIMER 0x140E
/* Block IDLE Status Register */
#define REG_IDLE_STATUS 0x1410
#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC state machine is in non-IDLE state. 0: RXMAC is idling */
#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC state machine is in non-IDLE state. 0: TXMAC is idling */
#define IDLE_STATUS_RXQ 4 /* 1: RXQ state machine is in non-IDLE state. 0: RXQ is idling */
#define IDLE_STATUS_TXQ 8 /* 1: TXQ state machine is in non-IDLE state. 0: TXQ is idling */
#define IDLE_STATUS_DMAR 0x10 /* 1: DMAR state machine is in non-IDLE state. 0: DMAR is idling */
#define IDLE_STATUS_DMAW 0x20 /* 1: DMAW state machine is in non-IDLE state. 0: DMAW is idling */
#define IDLE_STATUS_SMB 0x40 /* 1: SMB state machine is in non-IDLE state. 0: SMB is idling */
#define IDLE_STATUS_CMB 0x80 /* 1: CMB state machine is in non-IDLE state. 0: CMB is idling */
/* MDIO Control Register */
#define REG_MDIO_CTRL 0x1414
#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit control data to write to PHY MII management register */
#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit status data that was read from the PHY MII management register*/
#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */
#define MDIO_REG_ADDR_SHIFT 16
#define MDIO_RW 0x200000 /* 1: read, 0: write */
#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */
#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO master. And this bit is self cleared after one cycle*/
#define MDIO_CLK_SEL_SHIFT 24
#define MDIO_CLK_25_4 0
#define MDIO_CLK_25_6 2
#define MDIO_CLK_25_8 3
#define MDIO_CLK_25_10 4
#define MDIO_CLK_25_14 5
#define MDIO_CLK_25_20 6
#define MDIO_CLK_25_28 7
#define MDIO_BUSY 0x8000000
#define MDIO_AP_EN 0x10000000
#define MDIO_WAIT_TIMES 10
/* MII PHY Status Register */
#define REG_PHY_STATUS 0x1418
#define PHY_STATUS_100M 0x20000
#define PHY_STATUS_EMI_CA 0x40000
/* BIST Control and Status Register0 (for the Packet Memory) */
#define REG_BIST0_CTRL 0x141c
#define BIST0_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */
/* BIST process and reset to zero when BIST is done */
#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */
/* decoder failure or more than 1 cell stuck-to-x failure */
#define BIST0_FUSE_FLAG 0x4 /* 1: Indicating one cell has been fixed */
/* BIST Control and Status Register1(for the retry buffer of PCI Express) */
#define REG_BIST1_CTRL 0x1420
#define BIST1_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */
/* BIST process and reset to zero when BIST is done */
#define BIST1_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */
/* decoder failure or more than 1 cell stuck-to-x failure.*/
#define BIST1_FUSE_FLAG 0x4
/* SerDes Lock Detect Control and Status Register */
#define REG_SERDES_LOCK 0x1424
#define SERDES_LOCK_DETECT 1 /* 1: SerDes lock detected . This signal comes from Analog SerDes */
#define SERDES_LOCK_DETECT_EN 2 /* 1: Enable SerDes Lock detect function */
/* MAC Control Register */
#define REG_MAC_CTRL 0x1480
#define MAC_CTRL_TX_EN 1 /* 1: Transmit Enable */
#define MAC_CTRL_RX_EN 2 /* 1: Receive Enable */
#define MAC_CTRL_TX_FLOW 4 /* 1: Transmit Flow Control Enable */
#define MAC_CTRL_RX_FLOW 8 /* 1: Receive Flow Control Enable */
#define MAC_CTRL_LOOPBACK 0x10 /* 1: Loop back at G/MII Interface */
#define MAC_CTRL_DUPLX 0x20 /* 1: Full-duplex mode 0: Half-duplex mode */
#define MAC_CTRL_ADD_CRC 0x40 /* 1: Instruct MAC to attach CRC on all egress Ethernet frames */
#define MAC_CTRL_PAD 0x80 /* 1: Instruct MAC to pad short frames to 60-bytes, and then attach CRC. This bit has higher priority over CRC_EN */
#define MAC_CTRL_LENCHK 0x100 /* 1: Instruct MAC to check if length field matches the real packet length */
#define MAC_CTRL_HUGE_EN 0x200 /* 1: receive Jumbo frame enable */
#define MAC_CTRL_PRMLEN_SHIFT 10 /* Preamble length */
#define MAC_CTRL_PRMLEN_MASK 0xf
#define MAC_CTRL_RMV_VLAN 0x4000 /* 1: to remove VLAN Tag automatically from all receive packets */
#define MAC_CTRL_PROMIS_EN 0x8000 /* 1: Promiscuous Mode Enable */
#define MAC_CTRL_TX_PAUSE 0x10000 /* 1: transmit test pause */
#define MAC_CTRL_SCNT 0x20000 /* 1: shortcut slot time counter */
#define MAC_CTRL_SRST_TX 0x40000 /* 1: synchronized reset Transmit MAC module */
#define MAC_CTRL_TX_SIMURST 0x80000 /* 1: transmit simulation reset */
#define MAC_CTRL_SPEED_SHIFT 20 /* 10: gigabit 01:10M/100M */
#define MAC_CTRL_SPEED_MASK 0x300000
#define MAC_CTRL_SPEED_1000 2
#define MAC_CTRL_SPEED_10_100 1
#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 /* 1: transmit maximum backoff (half-duplex test bit) */
#define MAC_CTRL_TX_HUGE 0x800000 /* 1: transmit huge enable */
#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 /* 1: RX checksum enable */
#define MAC_CTRL_MC_ALL_EN 0x2000000 /* 1: upload all multicast frame without error to system */
#define MAC_CTRL_BC_EN 0x4000000 /* 1: upload all broadcast frame without error to system */
#define MAC_CTRL_DBG 0x8000000 /* 1: upload all received frame to system (Debug Mode) */
/* MAC IPG/IFG Control Register */
#define REG_MAC_IPG_IFG 0x1484
#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back inter-packet gap. The default is 96-bit time */
#define MAC_IPG_IFG_IPGT_MASK 0x7f
#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to enforce in between RX frames */
#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */
#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */
#define MAC_IPG_IFG_IPGR1_MASK 0x7f
#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */
#define MAC_IPG_IFG_IPGR2_MASK 0x7f
/* MAC STATION ADDRESS */
#define REG_MAC_STA_ADDR 0x1488
/* Hash table for multicast address */
#define REG_RX_HASH_TABLE 0x1490
/* MAC Half-Duplex Control Register */
#define REG_MAC_HALF_DUPLX_CTRL 0x1498
#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */
#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 /* Retransmission maximum, afterwards the packet will be discarded */
#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 /* 1: Allow the transmission of a packet which has been excessively deferred */
#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 /* 1: No back-off on collision, immediately start the retransmission */
#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* 1: No back-off on backpressure, immediately start the transmission after back pressure */
#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */
#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */
#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */
#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */
/* Maximum Frame Length Control Register */
#define REG_MTU 0x149c
/* Wake-On-Lan control register */
#define REG_WOL_CTRL 0x14a0
#define WOL_PATTERN_EN 0x00000001
#define WOL_PATTERN_PME_EN 0x00000002
#define WOL_MAGIC_EN 0x00000004
#define WOL_MAGIC_PME_EN 0x00000008
#define WOL_LINK_CHG_EN 0x00000010
#define WOL_LINK_CHG_PME_EN 0x00000020
#define WOL_PATTERN_ST 0x00000100
#define WOL_MAGIC_ST 0x00000200
#define WOL_LINKCHG_ST 0x00000400
#define WOL_CLK_SWITCH_EN 0x00008000
#define WOL_PT0_EN 0x00010000
#define WOL_PT1_EN 0x00020000
#define WOL_PT2_EN 0x00040000
#define WOL_PT3_EN 0x00080000
#define WOL_PT4_EN 0x00100000
#define WOL_PT5_EN 0x00200000
#define WOL_PT6_EN 0x00400000
/* WOL Length ( 2 DWORD ) */
#define REG_WOL_PATTERN_LEN 0x14a4
#define WOL_PT_LEN_MASK 0x7f
#define WOL_PT0_LEN_SHIFT 0
#define WOL_PT1_LEN_SHIFT 8
#define WOL_PT2_LEN_SHIFT 16
#define WOL_PT3_LEN_SHIFT 24
#define WOL_PT4_LEN_SHIFT 0
#define WOL_PT5_LEN_SHIFT 8
#define WOL_PT6_LEN_SHIFT 16
/* Internal SRAM Partition Register */
#define REG_SRAM_TRD_ADDR 0x1518
#define REG_SRAM_TRD_LEN 0x151C
#define REG_SRAM_RXF_ADDR 0x1520
#define REG_SRAM_RXF_LEN 0x1524
#define REG_SRAM_TXF_ADDR 0x1528
#define REG_SRAM_TXF_LEN 0x152C
#define REG_SRAM_TCPH_ADDR 0x1530
#define REG_SRAM_PKTH_ADDR 0x1532
/* Load Ptr Register */
#define REG_LOAD_PTR 0x1534 /* Software sets this bit after the initialization of the head and tail */
/*
* addresses of all descriptors, as well as the following descriptor
* control register, which triggers each function block to load the head
* pointer to prepare for the operation. This bit is then self-cleared
* after one cycle.
*/
/* Descriptor Control register */
#define REG_RXF3_BASE_ADDR_HI 0x153C
#define REG_DESC_BASE_ADDR_HI 0x1540
#define REG_RXF0_BASE_ADDR_HI 0x1540 /* share with DESC BASE ADDR HI */
#define REG_HOST_RXF0_PAGE0_LO 0x1544
#define REG_HOST_RXF0_PAGE1_LO 0x1548
#define REG_TPD_BASE_ADDR_LO 0x154C
#define REG_RXF1_BASE_ADDR_HI 0x1550
#define REG_RXF2_BASE_ADDR_HI 0x1554
#define REG_HOST_RXFPAGE_SIZE 0x1558
#define REG_TPD_RING_SIZE 0x155C
/* RSS about */
#define REG_RSS_KEY0 0x14B0
#define REG_RSS_KEY1 0x14B4
#define REG_RSS_KEY2 0x14B8
#define REG_RSS_KEY3 0x14BC
#define REG_RSS_KEY4 0x14C0
#define REG_RSS_KEY5 0x14C4
#define REG_RSS_KEY6 0x14C8
#define REG_RSS_KEY7 0x14CC
#define REG_RSS_KEY8 0x14D0
#define REG_RSS_KEY9 0x14D4
#define REG_IDT_TABLE4 0x14E0
#define REG_IDT_TABLE5 0x14E4
#define REG_IDT_TABLE6 0x14E8
#define REG_IDT_TABLE7 0x14EC
#define REG_IDT_TABLE0 0x1560
#define REG_IDT_TABLE1 0x1564
#define REG_IDT_TABLE2 0x1568
#define REG_IDT_TABLE3 0x156C
#define REG_IDT_TABLE REG_IDT_TABLE0
#define REG_RSS_HASH_VALUE 0x1570
#define REG_RSS_HASH_FLAG 0x1574
#define REG_BASE_CPU_NUMBER 0x157C
/* TXQ Control Register */
#define REG_TXQ_CTRL 0x1580
#define TXQ_CTRL_NUM_TPD_BURST_MASK 0xF
#define TXQ_CTRL_NUM_TPD_BURST_SHIFT 0
#define TXQ_CTRL_EN 0x20 /* 1: Enable TXQ */
#define TXQ_CTRL_ENH_MODE 0x40 /* Performance enhancement mode, in which up to two back-to-back DMA read commands might be dispatched. */
#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 /* Number of data byte to read in a cache-aligned burst. Each SRAM entry is 8-byte in length. */
#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff
/* Jumbo packet Threshold for task offload */
#define REG_TX_EARLY_TH 0x1584 /* Jumbo frame threshold in QWORD unit. Packet greater than */
/* JUMBO_TASK_OFFLOAD_THRESHOLD will not be task offloaded. */
#define TX_TX_EARLY_TH_MASK 0x7ff
#define TX_TX_EARLY_TH_SHIFT 0
/* RXQ Control Register */
#define REG_RXQ_CTRL 0x15A0
#define RXQ_CTRL_PBA_ALIGN_32 0 /* rx-packet alignment */
#define RXQ_CTRL_PBA_ALIGN_64 1
#define RXQ_CTRL_PBA_ALIGN_128 2
#define RXQ_CTRL_PBA_ALIGN_256 3
#define RXQ_CTRL_Q1_EN 0x10
#define RXQ_CTRL_Q2_EN 0x20
#define RXQ_CTRL_Q3_EN 0x40
#define RXQ_CTRL_IPV6_XSUM_VERIFY_EN 0x80
#define RXQ_CTRL_HASH_TLEN_SHIFT 8
#define RXQ_CTRL_HASH_TLEN_MASK 0xFF
#define RXQ_CTRL_HASH_TYPE_IPV4 0x10000
#define RXQ_CTRL_HASH_TYPE_IPV4_TCP 0x20000
#define RXQ_CTRL_HASH_TYPE_IPV6 0x40000
#define RXQ_CTRL_HASH_TYPE_IPV6_TCP 0x80000
#define RXQ_CTRL_RSS_MODE_DISABLE 0
#define RXQ_CTRL_RSS_MODE_SQSINT 0x4000000
#define RXQ_CTRL_RSS_MODE_MQUESINT 0x8000000
#define RXQ_CTRL_RSS_MODE_MQUEMINT 0xC000000
#define RXQ_CTRL_NIP_QUEUE_SEL_TBL 0x10000000
#define RXQ_CTRL_HASH_ENABLE 0x20000000
#define RXQ_CTRL_CUT_THRU_EN 0x40000000
#define RXQ_CTRL_EN 0x80000000
/* Rx jumbo packet threshold and rrd retirement timer */
#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4
/*
* Jumbo packet threshold for non-VLAN packet, in QWORD (64-bit) unit.
* When the packet length greater than or equal to this value, RXQ
* shall start cut-through forwarding of the received packet.
*/
#define RXQ_JMBOSZ_TH_MASK 0x7ff
#define RXQ_JMBOSZ_TH_SHIFT 0 /* RRD retirement timer. Decrement by 1 after every 512ns passes*/
#define RXQ_JMBO_LKAH_MASK 0xf
#define RXQ_JMBO_LKAH_SHIFT 11
/* RXF flow control register */
#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0
#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff
#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16
#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff
/* DMA Engine Control Register */
#define REG_DMA_CTRL 0x15C0
#define DMA_CTRL_DMAR_IN_ORDER 0x1
#define DMA_CTRL_DMAR_ENH_ORDER 0x2
#define DMA_CTRL_DMAR_OUT_ORDER 0x4
#define DMA_CTRL_RCB_VALUE 0x8
#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
#define DMA_CTRL_DMAR_REQ_PRI 0x400
#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x1F
#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11
#define DMA_CTRL_DMAW_DLY_CNT_MASK 0xF
#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16
#define DMA_CTRL_TXCMB_EN 0x100000
#define DMA_CTRL_RXCMB_EN 0x200000
/* CMB/SMB Control Register */
#define REG_SMB_STAT_TIMER 0x15C4
#define REG_TRIG_RRD_THRESH 0x15CA
#define REG_TRIG_TPD_THRESH 0x15C8
#define REG_TRIG_TXTIMER 0x15CC
#define REG_TRIG_RXTIMER 0x15CE
/* HOST RXF Page 1,2,3 address */
#define REG_HOST_RXF1_PAGE0_LO 0x15D0
#define REG_HOST_RXF1_PAGE1_LO 0x15D4
#define REG_HOST_RXF2_PAGE0_LO 0x15D8
#define REG_HOST_RXF2_PAGE1_LO 0x15DC
#define REG_HOST_RXF3_PAGE0_LO 0x15E0
#define REG_HOST_RXF3_PAGE1_LO 0x15E4
/* Mail box */
#define REG_MB_RXF1_RADDR 0x15B4
#define REG_MB_RXF2_RADDR 0x15B8
#define REG_MB_RXF3_RADDR 0x15BC
#define REG_MB_TPD_PROD_IDX 0x15F0
/* RXF-Page 0-3 PageNo & Valid bit */
#define REG_HOST_RXF0_PAGE0_VLD 0x15F4
#define HOST_RXF_VALID 1
#define HOST_RXF_PAGENO_SHIFT 1
#define HOST_RXF_PAGENO_MASK 0x7F
#define REG_HOST_RXF0_PAGE1_VLD 0x15F5
#define REG_HOST_RXF1_PAGE0_VLD 0x15F6
#define REG_HOST_RXF1_PAGE1_VLD 0x15F7
#define REG_HOST_RXF2_PAGE0_VLD 0x15F8
#define REG_HOST_RXF2_PAGE1_VLD 0x15F9
#define REG_HOST_RXF3_PAGE0_VLD 0x15FA
#define REG_HOST_RXF3_PAGE1_VLD 0x15FB
/* Interrupt Status Register */
#define REG_ISR 0x1600
#define ISR_SMB 1
#define ISR_TIMER 2 /* Interrupt when Timer is counted down to zero */
/*
* Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set
* in Table 51 Selene Master Control Register (Offset 0x1400).
*/
#define ISR_MANUAL 4
#define ISR_HW_RXF_OV 8 /* RXF overflow interrupt */
#define ISR_HOST_RXF0_OV 0x10
#define ISR_HOST_RXF1_OV 0x20
#define ISR_HOST_RXF2_OV 0x40
#define ISR_HOST_RXF3_OV 0x80
#define ISR_TXF_UN 0x100
#define ISR_RX0_PAGE_FULL 0x200
#define ISR_DMAR_TO_RST 0x400
#define ISR_DMAW_TO_RST 0x800
#define ISR_GPHY 0x1000
#define ISR_TX_CREDIT 0x2000
#define ISR_GPHY_LPW 0x4000 /* GPHY low power state interrupt */
#define ISR_RX_PKT 0x10000 /* One packet received, triggered by RFD */
#define ISR_TX_PKT 0x20000 /* One packet transmitted, triggered by TPD */
#define ISR_TX_DMA 0x40000
#define ISR_RX_PKT_1 0x80000
#define ISR_RX_PKT_2 0x100000
#define ISR_RX_PKT_3 0x200000
#define ISR_MAC_RX 0x400000
#define ISR_MAC_TX 0x800000
#define ISR_UR_DETECTED 0x1000000
#define ISR_FERR_DETECTED 0x2000000
#define ISR_NFERR_DETECTED 0x4000000
#define ISR_CERR_DETECTED 0x8000000
#define ISR_PHY_LINKDOWN 0x10000000
#define ISR_DIS_INT 0x80000000
/* Interrupt Mask Register */
#define REG_IMR 0x1604
#define IMR_NORMAL_MASK (\
ISR_SMB |\
ISR_TXF_UN |\
ISR_HW_RXF_OV |\
ISR_HOST_RXF0_OV|\
ISR_MANUAL |\
ISR_GPHY |\
ISR_GPHY_LPW |\
ISR_DMAR_TO_RST |\
ISR_DMAW_TO_RST |\
ISR_PHY_LINKDOWN|\
ISR_RX_PKT |\
ISR_TX_PKT)
#define ISR_TX_EVENT (ISR_TXF_UN | ISR_TX_PKT)
#define ISR_RX_EVENT (ISR_HOST_RXF0_OV | ISR_HW_RXF_OV | ISR_RX_PKT)
#define REG_MAC_RX_STATUS_BIN 0x1700
#define REG_MAC_RX_STATUS_END 0x175c
#define REG_MAC_TX_STATUS_BIN 0x1760
#define REG_MAC_TX_STATUS_END 0x17c0
/* Hardware Offset Register */
#define REG_HOST_RXF0_PAGEOFF 0x1800
#define REG_TPD_CONS_IDX 0x1804
#define REG_HOST_RXF1_PAGEOFF 0x1808
#define REG_HOST_RXF2_PAGEOFF 0x180C
#define REG_HOST_RXF3_PAGEOFF 0x1810
/* RXF-Page 0-3 Offset DMA Address */
#define REG_HOST_RXF0_MB0_LO 0x1820
#define REG_HOST_RXF0_MB1_LO 0x1824
#define REG_HOST_RXF1_MB0_LO 0x1828
#define REG_HOST_RXF1_MB1_LO 0x182C
#define REG_HOST_RXF2_MB0_LO 0x1830
#define REG_HOST_RXF2_MB1_LO 0x1834
#define REG_HOST_RXF3_MB0_LO 0x1838
#define REG_HOST_RXF3_MB1_LO 0x183C
/* Tpd CMB DMA Address */
#define REG_HOST_TX_CMB_LO 0x1840
#define REG_HOST_SMB_ADDR_LO 0x1844
/* DEBUG ADDR */
#define REG_DEBUG_DATA0 0x1900
#define REG_DEBUG_DATA1 0x1904
/***************************** MII definition ***************************************/
/* PHY Common Register */
#define MII_BMCR 0x00
#define MII_BMSR 0x01
#define MII_PHYSID1 0x02
#define MII_PHYSID2 0x03
#define MII_ADVERTISE 0x04
#define MII_LPA 0x05
#define MII_EXPANSION 0x06
#define MII_AT001_CR 0x09
#define MII_AT001_SR 0x0A
#define MII_AT001_ESR 0x0F
#define MII_AT001_PSCR 0x10
#define MII_AT001_PSSR 0x11
#define MII_INT_CTRL 0x12
#define MII_INT_STATUS 0x13
#define MII_SMARTSPEED 0x14
#define MII_RERRCOUNTER 0x15
#define MII_SREVISION 0x16
#define MII_RESV1 0x17
#define MII_LBRERROR 0x18
#define MII_PHYADDR 0x19
#define MII_RESV2 0x1a
#define MII_TPISTATUS 0x1b
#define MII_NCONFIG 0x1c
#define MII_DBG_ADDR 0x1D
#define MII_DBG_DATA 0x1E
/* PHY Control Register */
#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_MASK 0x2040
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
/* Link partner ability register. */
#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
#define MII_LPA_PAUSE 0x0400 /* PAUSE */
#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
#define MII_LPA_NPAGE 0x8000 /* Next page bit */
/* Autoneg Advertisement Register */
#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
#define MII_AR_SPEED_MASK 0x01E0
#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
/* 1000BASE-T Control Register */
#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
/* 0=DTE device */
#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
/* 0=Configure PHY as Slave */
#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
/* 0=Automatic Master/Slave config */
#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
/* 1000BASE-T Status Register */
#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
/* Extended Status Register */
#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
/* AT001 PHY Specific Control Register */
#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008
#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
* 0=CLK125 toggling
*/
#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
/* Manual MDI configuration */
#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
* 100BASE-TX/10BASE-T:
* MDI Mode
*/
#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
* all speeds.
*/
#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080
/* 1=Enable Extended 10BASE-T distance
* (Lower 10BASE-T RX Threshold)
* 0=Normal 10BASE-T RX Threshold */
#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100
/* 1=5-Bit interface in 100BASE-TX
* 0=MII interface in 100BASE-TX */
#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1
#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5
#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
/* AT001 PHY Specific Status Register */
#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */
#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */
#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
#endif /*_ATHL1E_HW_H_*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,263 @@
/*
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/netdevice.h>
#include "atl1e.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define ATL1E_MAX_NIC 32
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET }
#define ATL1E_PARAM(x, desc) \
static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \
static int num_##x; \
module_param_array_named(x, x, int, &num_##x, 0); \
MODULE_PARM_DESC(x, desc);
/* Transmit Memory count
*
* Valid Range: 64-2048
*
* Default Value: 128
*/
#define ATL1E_MIN_TX_DESC_CNT 32
#define ATL1E_MAX_TX_DESC_CNT 1020
#define ATL1E_DEFAULT_TX_DESC_CNT 128
ATL1E_PARAM(tx_desc_cnt, "Transmit description count");
/* Receive Memory Block Count
*
* Valid Range: 16-512
*
* Default Value: 128
*/
#define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */
#define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */
#define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */
ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)");
/* User Specified MediaType Override
*
* Valid Range: 0-5
* - 0 - auto-negotiate at all supported speeds
* - 1 - only link at 100Mbps Full Duplex
* - 2 - only link at 100Mbps Half Duplex
* - 3 - only link at 10Mbps Full Duplex
* - 4 - only link at 10Mbps Half Duplex
* Default Value: 0
*/
ATL1E_PARAM(media_type, "MediaType Select");
/* Interrupt Moderate Timer in units of 2 us
*
* Valid Range: 10-65535
*
* Default Value: 45000(90ms)
*/
#define INT_MOD_DEFAULT_CNT 100 /* 200us */
#define INT_MOD_MAX_CNT 65000
#define INT_MOD_MIN_CNT 50
ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer");
#define AUTONEG_ADV_DEFAULT 0x2F
#define AUTONEG_ADV_MASK 0x2F
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
#define FLASH_VENDOR_DEFAULT 0
#define FLASH_VENDOR_MIN 0
#define FLASH_VENDOR_MAX 2
struct atl1e_option {
enum { enable_option, range_option, list_option } type;
char *name;
char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
struct atl1e_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct pci_dev *pdev)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
dev_info(&pdev->dev, "%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
dev_info(&pdev->dev, "%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option:{
int i;
struct atl1e_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
dev_info(&pdev->dev, "%s\n",
ent->str);
return 0;
}
}
break;
}
default:
BUG();
}
dev_info(&pdev->dev, "Invalid %s specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
/*
* atl1e_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
*/
void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
if (bd >= ATL1E_MAX_NIC) {
dev_notice(&pdev->dev, "no configuration for board #%i\n", bd);
dev_notice(&pdev->dev, "Using defaults for all values\n");
}
{ /* Transmit Ring Size */
struct atl1e_option opt = {
.type = range_option,
.name = "Transmit Ddescription Count",
.err = "using default of "
__MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT),
.def = ATL1E_DEFAULT_TX_DESC_CNT,
.arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT,
.max = ATL1E_MAX_TX_DESC_CNT} }
};
int val;
if (num_tx_desc_cnt > bd) {
val = tx_desc_cnt[bd];
atl1e_validate_option(&val, &opt, pdev);
adapter->tx_ring.count = (u16) val & 0xFFFC;
} else
adapter->tx_ring.count = (u16)opt.def;
}
{ /* Receive Memory Block Count */
struct atl1e_option opt = {
.type = range_option,
.name = "Memory size of rx buffer(KB)",
.err = "using default of "
__MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE),
.def = ATL1E_DEFAULT_RX_MEM_SIZE,
.arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE,
.max = ATL1E_MAX_RX_MEM_SIZE} }
};
int val;
if (num_rx_mem_size > bd) {
val = rx_mem_size[bd];
atl1e_validate_option(&val, &opt, pdev);
adapter->rx_ring.page_size = (u32)val * 1024;
} else {
adapter->rx_ring.page_size = (u32)opt.def * 1024;
}
}
{ /* Interrupt Moderate Timer */
struct atl1e_option opt = {
.type = range_option,
.name = "Interrupt Moderate Timer",
.err = "using default of "
__MODULE_STRING(INT_MOD_DEFAULT_CNT),
.def = INT_MOD_DEFAULT_CNT,
.arg = { .r = { .min = INT_MOD_MIN_CNT,
.max = INT_MOD_MAX_CNT} }
} ;
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
atl1e_validate_option(&val, &opt, pdev);
adapter->hw.imt = (u16) val;
} else
adapter->hw.imt = (u16)(opt.def);
}
{ /* MediaType */
struct atl1e_option opt = {
.type = range_option,
.name = "Speed/Duplex Selection",
.err = "using default of "
__MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR),
.def = MEDIA_TYPE_AUTO_SENSOR,
.arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR,
.max = MEDIA_TYPE_10M_HALF} }
} ;
int val;
if (num_media_type > bd) {
val = media_type[bd];
atl1e_validate_option(&val, &opt, pdev);
adapter->hw.media_type = (u16) val;
} else
adapter->hw.media_type = (u16)(opt.def);
}
}

View File

@ -888,19 +888,22 @@ dm9000_rx(struct net_device *dev)
dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
}
if (rxhdr.RxStatus & 0xbf) {
/* rxhdr.RxStatus is identical to RSR register. */
if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
RSR_PLE | RSR_RWTO |
RSR_LCS | RSR_RF)) {
GoodPacket = false;
if (rxhdr.RxStatus & 0x01) {
if (rxhdr.RxStatus & RSR_FOE) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "fifo error\n");
dev->stats.rx_fifo_errors++;
}
if (rxhdr.RxStatus & 0x02) {
if (rxhdr.RxStatus & RSR_CE) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "crc error\n");
dev->stats.rx_crc_errors++;
}
if (rxhdr.RxStatus & 0x80) {
if (rxhdr.RxStatus & RSR_RF) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "length error\n");
dev->stats.rx_length_errors++;
@ -1067,7 +1070,7 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */
iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock,flags);
@ -1118,7 +1121,7 @@ dm9000_phy_write(struct net_device *dev,
iow(db, DM9000_EPDRL, value);
iow(db, DM9000_EPDRH, value >> 8);
iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */
iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);

View File

@ -90,10 +90,13 @@ struct e1000_adapter;
#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
#define PFX "e1000: "
#define DPRINTK(nlevel, klevel, fmt, args...) \
(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
__FUNCTION__ , ## args))
#define DPRINTK(nlevel, klevel, fmt, args...) \
do { \
if (NETIF_MSG_##nlevel & adapter->msg_enable) \
printk(KERN_##klevel PFX "%s: %s: " fmt, \
adapter->netdev->name, __func__, ##args); \
} while (0)
#define E1000_MAX_INTR 10
@ -151,9 +154,9 @@ struct e1000_adapter;
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
#define E1000_MNG_VLAN_NONE -1
#define E1000_MNG_VLAN_NONE (-1)
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
@ -165,9 +168,13 @@ struct e1000_buffer {
u16 next_to_watch;
};
struct e1000_ps_page {
struct page *ps_page[PS_PAGE_BUFFERS];
};
struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; };
struct e1000_ps_page_dma {
u64 ps_page_dma[PS_PAGE_BUFFERS];
};
struct e1000_tx_ring {
/* pointer to the descriptor ring memory */
@ -217,13 +224,13 @@ struct e1000_rx_ring {
u16 rdt;
};
#define E1000_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
#define E1000_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) \
? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
#define E1000_RX_DESC_PS(R, i) \
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
#define E1000_RX_DESC_EXT(R, i) \
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
@ -246,9 +253,7 @@ struct e1000_adapter {
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
#ifdef CONFIG_E1000_NAPI
spinlock_t tx_queue_lock;
#endif
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
@ -286,22 +291,16 @@ struct e1000_adapter {
bool detect_tx_hung;
/* RX */
#ifdef CONFIG_E1000_NAPI
bool (*clean_rx) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
#else
bool (*clean_rx) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
#endif
void (*alloc_rx_buf) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count);
bool (*clean_rx)(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
void (*alloc_rx_buf)(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count);
struct e1000_rx_ring *rx_ring; /* One per active queue */
#ifdef CONFIG_E1000_NAPI
struct napi_struct napi;
struct net_device *polling_netdev; /* One per active queue */
#endif
int num_tx_queues;
int num_rx_queues;
@ -317,7 +316,6 @@ struct e1000_adapter {
u64 gorcl_old;
u16 rx_ps_bsize0;
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@ -342,6 +340,10 @@ struct e1000_adapter {
bool quad_port_a;
unsigned long flags;
u32 eeprom_wol;
/* for ioport free */
int bars;
int need_ioport;
};
enum e1000_state_t {
@ -353,9 +355,18 @@ enum e1000_state_t {
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
extern int e1000_up(struct e1000_adapter *adapter);
extern void e1000_down(struct e1000_adapter *adapter);
extern void e1000_reinit_locked(struct e1000_adapter *adapter);
extern void e1000_reset(struct e1000_adapter *adapter);
extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_update_stats(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
#endif /* _E1000_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -55,13 +55,13 @@
#define DEBUGOUT7 DEBUGOUT3
#define E1000_WRITE_REG(a, reg, value) ( \
writel((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg))))
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg)))
#define E1000_READ_REG(a, reg) ( \
readl((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg)))
#define ew32(reg, value) \
(writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg))))
#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
writel((value), ((a)->hw_addr + \
@ -96,7 +96,7 @@
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset)))
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
#define E1000_WRITE_FLUSH() er32(STATUS)
#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
writel((value), ((a)->flash_address + reg)))

View File

@ -213,10 +213,9 @@ struct e1000_option {
} arg;
};
static int __devinit
e1000_validate_option(unsigned int *value,
const struct e1000_option *opt,
struct e1000_adapter *adapter)
static int __devinit e1000_validate_option(unsigned int *value,
const struct e1000_option *opt,
struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@ -278,8 +277,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* in a variable in the adapter structure.
**/
void __devinit
e1000_check_options(struct e1000_adapter *adapter)
void __devinit e1000_check_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
@ -551,8 +549,7 @@ e1000_check_options(struct e1000_adapter *adapter)
* Handles speed and duplex options on fiber adapters
**/
static void __devinit
e1000_check_fiber_options(struct e1000_adapter *adapter)
static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
@ -579,8 +576,7 @@ e1000_check_fiber_options(struct e1000_adapter *adapter)
* Handles speed and duplex options on copper adapters
**/
static void __devinit
e1000_check_copper_options(struct e1000_adapter *adapter)
static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
{
unsigned int speed, dplx, an;
int bd = adapter->bd_number;

View File

@ -262,7 +262,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
}
outw(Perf_Page, ioaddr + HP_PAGING);
NS8390_init(dev, 0);
NS8390p_init(dev, 0);
/* Leave the 8390 and HP chip reset. */
outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);

View File

@ -389,7 +389,7 @@ static void __init
hp_init_card(struct net_device *dev)
{
int irq = dev->irq;
NS8390_init(dev, 0);
NS8390p_init(dev, 0);
outb_p(irqmap[irq&0x0f] | HP_RUN,
dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
return;

View File

@ -385,7 +385,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *rx_ring = &adapter->rx_ring[i];
rx_ring->buddy = 0;
rx_ring->buddy = NULL;
igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
adapter->eims_enable_mask |= rx_ring->eims_value;
if (rx_ring->itr_val)

View File

@ -70,8 +70,6 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT),
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
board_82598 },

View File

@ -287,7 +287,7 @@ int meth_reset(struct net_device *dev)
/* Initial mode: 10 | Half-duplex | Accept normal packets */
priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG;
if (dev->flags | IFF_PROMISC)
if (dev->flags & IFF_PROMISC)
priv->mac_ctrl |= METH_PROMISC;
mace->eth.mac_ctrl = priv->mac_ctrl;

View File

@ -2112,7 +2112,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
mv643xx_eth_irq(dev->irq, dev);
wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_CAUSE_EXT);
wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
}
#endif

View File

@ -125,7 +125,6 @@ struct myri10ge_cmd {
struct myri10ge_rx_buf {
struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */
struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
struct myri10ge_rx_buffer_state *info;
struct page *page;
@ -140,7 +139,6 @@ struct myri10ge_rx_buf {
struct myri10ge_tx_buf {
struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
u8 __iomem *wc_fifo; /* w/c send fifo address */
struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
char *req_bytes;
struct myri10ge_tx_buffer_state *info;
@ -332,10 +330,6 @@ MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
static int myri10ge_reset_recover = 1;
static int myri10ge_wcfifo = 0;
module_param(myri10ge_wcfifo, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled");
static int myri10ge_max_slices = 1;
module_param(myri10ge_max_slices, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
@ -1218,14 +1212,8 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
/* copy 8 descriptors to the firmware at a time */
if ((idx & 7) == 7) {
if (rx->wc_fifo == NULL)
myri10ge_submit_8rx(&rx->lanai[idx - 7],
&rx->shadow[idx - 7]);
else {
mb();
myri10ge_pio_copy(rx->wc_fifo,
&rx->shadow[idx - 7], 64);
}
myri10ge_submit_8rx(&rx->lanai[idx - 7],
&rx->shadow[idx - 7]);
}
}
}
@ -2229,18 +2217,6 @@ static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
(mgp->sram + cmd.data0);
if (myri10ge_wcfifo && mgp->wc_enabled) {
ss->tx.wc_fifo = (u8 __iomem *)
mgp->sram + MXGEFW_ETH_SEND_4 + 64 * slice;
ss->rx_small.wc_fifo = (u8 __iomem *)
mgp->sram + MXGEFW_ETH_RECV_SMALL + 64 * slice;
ss->rx_big.wc_fifo = (u8 __iomem *)
mgp->sram + MXGEFW_ETH_RECV_BIG + 64 * slice;
} else {
ss->tx.wc_fifo = NULL;
ss->rx_small.wc_fifo = NULL;
ss->rx_big.wc_fifo = NULL;
}
return status;
}
@ -2573,27 +2549,6 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
mb();
}
static inline void
myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
struct mcp_kreq_ether_send *src, int cnt)
{
tx->req += cnt;
mb();
while (cnt >= 4) {
myri10ge_pio_copy(tx->wc_fifo, src, 64);
mb();
src += 4;
cnt -= 4;
}
if (cnt > 0) {
/* pad it to 64 bytes. The src is 64 bytes bigger than it
* needs to be so that we don't overrun it */
myri10ge_pio_copy(tx->wc_fifo + MXGEFW_ETH_SEND_OFFSET(cnt),
src, 64);
mb();
}
}
/*
* Transmit a packet. We need to split the packet so that a single
* segment does not cross myri10ge->tx_boundary, so this makes segment
@ -2830,10 +2785,7 @@ again:
MXGEFW_FLAGS_FIRST)));
idx = ((count - 1) + tx->req) & tx->mask;
tx->info[idx].last = 1;
if (tx->wc_fifo == NULL)
myri10ge_submit_req(tx, tx->req_list, count);
else
myri10ge_submit_req_wc(tx, tx->req_list, count);
myri10ge_submit_req(tx, tx->req_list, count);
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
tx->stop_queue++;
@ -3768,14 +3720,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (mgp->sram_size > mgp->board_span) {
dev_err(&pdev->dev, "board span %ld bytes too small\n",
mgp->board_span);
goto abort_with_wc;
goto abort_with_mtrr;
}
mgp->sram = ioremap(mgp->iomem_base, mgp->board_span);
mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
if (mgp->sram == NULL) {
dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
mgp->board_span, mgp->iomem_base);
status = -ENXIO;
goto abort_with_wc;
goto abort_with_mtrr;
}
memcpy_fromio(mgp->eeprom_strings,
mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE,
@ -3876,7 +3828,7 @@ abort_with_firmware:
abort_with_ioremap:
iounmap(mgp->sram);
abort_with_wc:
abort_with_mtrr:
#ifdef CONFIG_MTRR
if (mgp->mtrr >= 0)
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);

View File

@ -355,7 +355,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
/* Read the 16 bytes of station address PROM.
We must first initialize registers, similar to NS8390_init(eifdev, 0).
We must first initialize registers, similar to NS8390p_init(eifdev, 0).
We can't reliably read the SAPROM address without this.
(I learned the hard way!). */
{

View File

@ -404,7 +404,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
/* Read the 16 bytes of station address PROM.
We must first initialize registers, similar to
NS8390_init(eifdev, 0).
NS8390p_init(eifdev, 0).
We can't reliably read the SAPROM address without this.
(I learned the hard way!). */
{

View File

@ -32,4 +32,4 @@
obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
netxen_nic_isr.o netxen_nic_ethtool.o netxen_nic_niu.o
netxen_nic_ethtool.o netxen_nic_niu.o netxen_nic_ctx.o

View File

@ -63,10 +63,12 @@
#include "netxen_nic_hw.h"
#define _NETXEN_NIC_LINUX_MAJOR 3
#define _NETXEN_NIC_LINUX_MINOR 4
#define _NETXEN_NIC_LINUX_SUBVERSION 18
#define NETXEN_NIC_LINUX_VERSIONID "3.4.18"
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
#define _NETXEN_NIC_LINUX_SUBVERSION 0
#define NETXEN_NIC_LINUX_VERSIONID "4.0.0"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define NETXEN_NUM_FLASH_SECTORS (64)
#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
@ -84,7 +86,7 @@
#define TX_RINGSIZE \
(sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
#define RCV_BUFFSIZE \
(sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count)
(sizeof(struct netxen_rx_buffer) * rds_ring->max_rx_desc_count)
#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
#define NETXEN_NETDEV_STATUS 0x1
@ -111,6 +113,13 @@
#define NX_P2_C0 0x24
#define NX_P2_C1 0x25
#define NX_P3_A0 0x30
#define NX_P3_A2 0x30
#define NX_P3_B0 0x40
#define NX_P3_B1 0x41
#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
#define FIRST_PAGE_GROUP_START 0
#define FIRST_PAGE_GROUP_END 0x100000
@ -125,6 +134,16 @@
#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
#define P2_MAX_MTU (8000)
#define P3_MAX_MTU (9600)
#define NX_ETHERMTU 1500
#define NX_MAX_ETHERHDR 32 /* This contains some padding */
#define NX_RX_NORMAL_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU)
#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU)
#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU)
#define NX_CT_DEFAULT_RX_BUF_LEN 2048
#define MAX_RX_BUFFER_LENGTH 1760
#define MAX_RX_JUMBO_BUFFER_LENGTH 8062
#define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512)
@ -132,7 +151,6 @@
#define RX_JUMBO_DMA_MAP_LEN \
(MAX_RX_JUMBO_BUFFER_LENGTH - 2)
#define RX_LRO_DMA_MAP_LEN (MAX_RX_LRO_BUFFER_LENGTH - 2)
#define NETXEN_ROM_ROUNDUP 0x80000000ULL
/*
* Maximum number of ring contexts
@ -140,16 +158,16 @@
#define MAX_RING_CTX 1
/* Opcodes to be used with the commands */
enum {
TX_ETHER_PKT = 0x01,
/* The following opcodes are for IP checksum */
TX_TCP_PKT,
TX_UDP_PKT,
TX_IP_PKT,
TX_TCP_LSO,
TX_IPSEC,
TX_IPSEC_CMD
};
#define TX_ETHER_PKT 0x01
#define TX_TCP_PKT 0x02
#define TX_UDP_PKT 0x03
#define TX_IP_PKT 0x04
#define TX_TCP_LSO 0x05
#define TX_TCP_LSO6 0x06
#define TX_IPSEC 0x07
#define TX_IPSEC_CMD 0x0a
#define TX_TCPV6_PKT 0x0b
#define TX_UDPV6_PKT 0x0c
/* The following opcodes are for internal consumption. */
#define NETXEN_CONTROL_OP 0x10
@ -191,6 +209,7 @@ enum {
#define MAX_RCV_DESCRIPTORS 16384
#define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4)
#define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4)
#define MAX_RCV_DESCRIPTORS_10G 8192
#define MAX_JUMBO_RCV_DESCRIPTORS 1024
#define MAX_LRO_RCV_DESCRIPTORS 64
#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
@ -219,8 +238,6 @@ enum {
#define MPORT_MULTI_FUNCTION_MODE 0x2222
#include "netxen_nic_phan_reg.h"
extern unsigned long long netxen_dma_mask;
extern unsigned long last_schedule_time;
/*
* NetXen host-peg signal message structure
@ -289,7 +306,7 @@ struct netxen_ring_ctx {
#define netxen_set_cmd_desc_port(cmd_desc, var) \
((cmd_desc)->port_ctxid |= ((var) & 0x0F))
#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \
((cmd_desc)->port_ctxid |= ((var) & 0xF0))
((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
#define netxen_set_cmd_desc_flags(cmd_desc, val) \
(cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \
@ -377,8 +394,8 @@ struct rcv_desc {
};
/* opcode field in status_desc */
#define RCV_NIC_PKT (0xA)
#define STATUS_NIC_PKT ((RCV_NIC_PKT) << 12)
#define NETXEN_NIC_RXPKT_DESC 0x04
#define NETXEN_OLD_RXPKT_DESC 0x3f
/* for status field in status_desc */
#define STATUS_NEED_CKSUM (1)
@ -410,6 +427,8 @@ struct rcv_desc {
(((sts_data) >> 28) & 0xFFFF)
#define netxen_get_sts_prot(sts_data) \
(((sts_data) >> 44) & 0x0F)
#define netxen_get_sts_pkt_offset(sts_data) \
(((sts_data) >> 48) & 0x1F)
#define netxen_get_sts_opcode(sts_data) \
(((sts_data) >> 58) & 0x03F)
@ -424,17 +443,30 @@ struct rcv_desc {
struct status_desc {
/* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
28-43 reference_handle, 44-47 protocol, 48-52 unused
28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
53-55 desc_cnt, 56-57 owner, 58-63 opcode
*/
__le64 status_desc_data;
__le32 hash_value;
u8 hash_type;
u8 msg_type;
u8 unused;
/* Bit pattern: 0-6 lro_count indicates frag sequence,
7 last_frag indicates last frag */
u8 lro;
union {
struct {
__le32 hash_value;
u8 hash_type;
u8 msg_type;
u8 unused;
union {
/* Bit pattern: 0-6 lro_count indicates frag
* sequence, 7 last_frag indicates last frag
*/
u8 lro;
/* chained buffers */
u8 nr_frags;
};
};
struct {
__le16 frag_handles[4];
};
};
} __attribute__ ((aligned(16)));
enum {
@ -464,7 +496,20 @@ typedef enum {
NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d,
NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e,
NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f
NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f,
NETXEN_BRDTYPE_P3_REF_QG = 0x0021,
NETXEN_BRDTYPE_P3_HMEZ = 0x0022,
NETXEN_BRDTYPE_P3_10G_CX4_LP = 0x0023,
NETXEN_BRDTYPE_P3_4_GB = 0x0024,
NETXEN_BRDTYPE_P3_IMEZ = 0x0025,
NETXEN_BRDTYPE_P3_10G_SFP_PLUS = 0x0026,
NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027,
NETXEN_BRDTYPE_P3_XG_LOM = 0x0028,
NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029,
NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
NETXEN_BRDTYPE_P3_10G_XFP = 0x0032
} netxen_brdtype_t;
typedef enum {
@ -747,6 +792,7 @@ struct netxen_cmd_buffer {
/* In rx_buffer, we do not need multiple fragments as is a single buffer */
struct netxen_rx_buffer {
struct list_head list;
struct sk_buff *skb;
u64 dma;
u16 ref_handle;
@ -765,7 +811,6 @@ struct netxen_rx_buffer {
* contains interrupt info as well shared hardware info.
*/
struct netxen_hardware_context {
struct pci_dev *pdev;
void __iomem *pci_base0;
void __iomem *pci_base1;
void __iomem *pci_base2;
@ -773,15 +818,20 @@ struct netxen_hardware_context {
unsigned long first_page_group_start;
void __iomem *db_base;
unsigned long db_len;
unsigned long pci_len0;
u8 cut_through;
int qdr_sn_window;
int ddr_mn_window;
unsigned long mn_win_crb;
unsigned long ms_win_crb;
u8 revision_id;
u16 board_type;
struct netxen_board_info boardcfg;
u32 xg_linkup;
u32 qg_linksup;
u32 linkup;
/* Address of cmd ring in Phantom */
struct cmd_desc_type0 *cmd_desc_head;
struct pci_dev *cmd_desc_pdev;
dma_addr_t cmd_desc_phys_addr;
struct netxen_adapter *adapter;
int pci_func;
@ -813,17 +863,17 @@ struct netxen_adapter_stats {
* Rcv Descriptor Context. One such per Rcv Descriptor. There may
* be one Rcv Descriptor for normal packets, one for jumbo and may be others.
*/
struct netxen_rcv_desc_ctx {
struct nx_host_rds_ring {
u32 flags;
u32 producer;
u32 rcv_pending; /* Num of bufs posted in phantom */
dma_addr_t phys_addr;
struct pci_dev *phys_pdev;
u32 crb_rcv_producer; /* reg offset */
struct rcv_desc *desc_head; /* address of rx ring in Phantom */
u32 max_rx_desc_count;
u32 dma_size;
u32 skb_size;
struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */
struct list_head free_list;
int begin_alloc;
};
@ -834,17 +884,319 @@ struct netxen_rcv_desc_ctx {
* present elsewhere.
*/
struct netxen_recv_context {
struct netxen_rcv_desc_ctx rcv_desc[NUM_RCV_DESC_RINGS];
u32 status_rx_producer;
u32 state;
u16 context_id;
u16 virt_port;
struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS];
u32 status_rx_consumer;
u32 crb_sts_consumer; /* reg offset */
dma_addr_t rcv_status_desc_phys_addr;
struct pci_dev *rcv_status_desc_pdev;
struct status_desc *rcv_status_desc_head;
};
#define NETXEN_NIC_MSI_ENABLED 0x02
#define NETXEN_DMA_MASK 0xfffffffe
#define NETXEN_DB_MAPSIZE_BYTES 0x1000
/* New HW context creation */
#define NX_OS_CRB_RETRY_COUNT 4000
#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \
(((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
#define NX_CDRP_CLEAR 0x00000000
#define NX_CDRP_CMD_BIT 0x80000000
/*
* All responses must have the NX_CDRP_CMD_BIT cleared
* in the crb NX_CDRP_CRB_OFFSET.
*/
#define NX_CDRP_FORM_RSP(rsp) (rsp)
#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0)
#define NX_CDRP_RSP_OK 0x00000001
#define NX_CDRP_RSP_FAIL 0x00000002
#define NX_CDRP_RSP_TIMEOUT 0x00000003
/*
* All commands must have the NX_CDRP_CMD_BIT set in
* the crb NX_CDRP_CRB_OFFSET.
*/
#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd))
#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0)
#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007
#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008
#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009
#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e
#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f
#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010
#define NX_CDRP_CMD_SET_MTU 0x00000012
#define NX_CDRP_CMD_MAX 0x00000013
#define NX_RCODE_SUCCESS 0
#define NX_RCODE_NO_HOST_MEM 1
#define NX_RCODE_NO_HOST_RESOURCE 2
#define NX_RCODE_NO_CARD_CRB 3
#define NX_RCODE_NO_CARD_MEM 4
#define NX_RCODE_NO_CARD_RESOURCE 5
#define NX_RCODE_INVALID_ARGS 6
#define NX_RCODE_INVALID_ACTION 7
#define NX_RCODE_INVALID_STATE 8
#define NX_RCODE_NOT_SUPPORTED 9
#define NX_RCODE_NOT_PERMITTED 10
#define NX_RCODE_NOT_READY 11
#define NX_RCODE_DOES_NOT_EXIST 12
#define NX_RCODE_ALREADY_EXISTS 13
#define NX_RCODE_BAD_SIGNATURE 14
#define NX_RCODE_CMD_NOT_IMPL 15
#define NX_RCODE_CMD_INVALID 16
#define NX_RCODE_TIMEOUT 17
#define NX_RCODE_CMD_FAILED 18
#define NX_RCODE_MAX_EXCEEDED 19
#define NX_RCODE_MAX 20
#define NX_DESTROY_CTX_RESET 0
#define NX_DESTROY_CTX_D3_RESET 1
#define NX_DESTROY_CTX_MAX 2
/*
* Capabilities
*/
#define NX_CAP_BIT(class, bit) (1 << bit)
#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0)
#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1)
#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2)
#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3)
#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4)
#define NX_CAP0_LRO NX_CAP_BIT(0, 5)
#define NX_CAP0_LSO NX_CAP_BIT(0, 6)
#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7)
#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8)
/*
* Context state
*/
#define NX_HOST_CTX_STATE_FREED 0
#define NX_HOST_CTX_STATE_ALLOCATED 1
#define NX_HOST_CTX_STATE_ACTIVE 2
#define NX_HOST_CTX_STATE_DISABLED 3
#define NX_HOST_CTX_STATE_QUIESCED 4
#define NX_HOST_CTX_STATE_MAX 5
/*
* Rx context
*/
typedef struct {
u64 host_phys_addr; /* Ring base addr */
u32 ring_size; /* Ring entries */
u16 msi_index;
u16 rsvd; /* Padding */
} nx_hostrq_sds_ring_t;
typedef struct {
u64 host_phys_addr; /* Ring base addr */
u64 buff_size; /* Packet buffer size */
u32 ring_size; /* Ring entries */
u32 ring_kind; /* Class of ring */
} nx_hostrq_rds_ring_t;
typedef struct {
u64 host_rsp_dma_addr; /* Response dma'd here */
u32 capabilities[4]; /* Flag bit vector */
u32 host_int_crb_mode; /* Interrupt crb usage */
u32 host_rds_crb_mode; /* RDS crb usage */
/* These ring offsets are relative to data[0] below */
u32 rds_ring_offset; /* Offset to RDS config */
u32 sds_ring_offset; /* Offset to SDS config */
u16 num_rds_rings; /* Count of RDS rings */
u16 num_sds_rings; /* Count of SDS rings */
u16 rsvd1; /* Padding */
u16 rsvd2; /* Padding */
u8 reserved[128]; /* reserve space for future expansion*/
/* MUST BE 64-bit aligned.
The following is packed:
- N hostrq_rds_rings
- N hostrq_sds_rings */
char data[0];
} nx_hostrq_rx_ctx_t;
typedef struct {
u32 host_producer_crb; /* Crb to use */
u32 rsvd1; /* Padding */
} nx_cardrsp_rds_ring_t;
typedef struct {
u32 host_consumer_crb; /* Crb to use */
u32 interrupt_crb; /* Crb to use */
} nx_cardrsp_sds_ring_t;
typedef struct {
/* These ring offsets are relative to data[0] below */
u32 rds_ring_offset; /* Offset to RDS config */
u32 sds_ring_offset; /* Offset to SDS config */
u32 host_ctx_state; /* Starting State */
u32 num_fn_per_port; /* How many PCI fn share the port */
u16 num_rds_rings; /* Count of RDS rings */
u16 num_sds_rings; /* Count of SDS rings */
u16 context_id; /* Handle for context */
u8 phys_port; /* Physical id of port */
u8 virt_port; /* Virtual/Logical id of port */
u8 reserved[128]; /* save space for future expansion */
/* MUST BE 64-bit aligned.
The following is packed:
- N cardrsp_rds_rings
- N cardrs_sds_rings */
char data[0];
} nx_cardrsp_rx_ctx_t;
#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
(sizeof(HOSTRQ_RX) + \
(rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \
(sds_rings)*(sizeof(nx_hostrq_sds_ring_t)))
#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
(sizeof(CARDRSP_RX) + \
(rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \
(sds_rings)*(sizeof(nx_cardrsp_sds_ring_t)))
/*
* Tx context
*/
typedef struct {
u64 host_phys_addr; /* Ring base addr */
u32 ring_size; /* Ring entries */
u32 rsvd; /* Padding */
} nx_hostrq_cds_ring_t;
typedef struct {
u64 host_rsp_dma_addr; /* Response dma'd here */
u64 cmd_cons_dma_addr; /* */
u64 dummy_dma_addr; /* */
u32 capabilities[4]; /* Flag bit vector */
u32 host_int_crb_mode; /* Interrupt crb usage */
u32 rsvd1; /* Padding */
u16 rsvd2; /* Padding */
u16 interrupt_ctl;
u16 msi_index;
u16 rsvd3; /* Padding */
nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */
u8 reserved[128]; /* future expansion */
} nx_hostrq_tx_ctx_t;
typedef struct {
u32 host_producer_crb; /* Crb to use */
u32 interrupt_crb; /* Crb to use */
} nx_cardrsp_cds_ring_t;
typedef struct {
u32 host_ctx_state; /* Starting state */
u16 context_id; /* Handle for context */
u8 phys_port; /* Physical id of port */
u8 virt_port; /* Virtual/Logical id of port */
nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
u8 reserved[128]; /* future expansion */
} nx_cardrsp_tx_ctx_t;
#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
/* CRB */
#define NX_HOST_RDS_CRB_MODE_UNIQUE 0
#define NX_HOST_RDS_CRB_MODE_SHARED 1
#define NX_HOST_RDS_CRB_MODE_CUSTOM 2
#define NX_HOST_RDS_CRB_MODE_MAX 3
#define NX_HOST_INT_CRB_MODE_UNIQUE 0
#define NX_HOST_INT_CRB_MODE_SHARED 1
#define NX_HOST_INT_CRB_MODE_NORX 2
#define NX_HOST_INT_CRB_MODE_NOTX 3
#define NX_HOST_INT_CRB_MODE_NORXTX 4
/* MAC */
#define MC_COUNT_P2 16
#define MC_COUNT_P3 38
#define NETXEN_MAC_NOOP 0
#define NETXEN_MAC_ADD 1
#define NETXEN_MAC_DEL 2
typedef struct nx_mac_list_s {
struct nx_mac_list_s *next;
uint8_t mac_addr[MAX_ADDR_LEN];
} nx_mac_list_t;
/*
* Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
* adjusted based on configured MTU.
*/
#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3
#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256
#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64
#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4
#define NETXEN_NIC_INTR_DEFAULT 0x04
typedef union {
struct {
uint16_t rx_packets;
uint16_t rx_time_us;
uint16_t tx_packets;
uint16_t tx_time_us;
} data;
uint64_t word;
} nx_nic_intr_coalesce_data_t;
typedef struct {
uint16_t stats_time_us;
uint16_t rate_sample_time;
uint16_t flags;
uint16_t rsvd_1;
uint32_t low_threshold;
uint32_t high_threshold;
nx_nic_intr_coalesce_data_t normal;
nx_nic_intr_coalesce_data_t low;
nx_nic_intr_coalesce_data_t high;
nx_nic_intr_coalesce_data_t irq;
} nx_nic_intr_coalesce_t;
typedef struct {
u64 qhdr;
u64 req_hdr;
u64 words[6];
} nx_nic_req_t;
typedef struct {
u8 op;
u8 tag;
u8 mac_addr[6];
} nx_mac_req_t;
#define MAX_PENDING_DESC_BLOCK_SIZE 64
#define NETXEN_NIC_MSI_ENABLED 0x02
#define NETXEN_NIC_MSIX_ENABLED 0x04
#define NETXEN_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
#define MSIX_ENTRIES_PER_ADAPTER 8
#define NETXEN_MSIX_TBL_SPACE 8192
#define NETXEN_PCI_REG_MSIX_TBL 0x44
#define NETXEN_DB_MAPSIZE_BYTES 0x1000
#define NETXEN_NETDEV_WEIGHT 120
#define NETXEN_ADAPTER_UP_MAGIC 777
#define NETXEN_NIC_PEG_TUNE 0
struct netxen_dummy_dma {
void *addr;
@ -854,46 +1206,65 @@ struct netxen_dummy_dma {
struct netxen_adapter {
struct netxen_hardware_context ahw;
struct netxen_adapter *master;
struct net_device *netdev;
struct pci_dev *pdev;
int pci_using_dac;
struct napi_struct napi;
struct net_device_stats net_stats;
unsigned char mac_addr[ETH_ALEN];
int mtu;
int portnum;
u8 physical_port;
u16 tx_context_id;
uint8_t mc_enabled;
uint8_t max_mc_count;
nx_mac_list_t *mac_list;
struct netxen_legacy_intr_set legacy_intr;
u32 crb_intr_mask;
struct work_struct watchdog_task;
struct timer_list watchdog_timer;
struct work_struct tx_timeout_task;
u32 curr_window;
u32 crb_win;
rwlock_t adapter_lock;
uint64_t dma_mask;
u32 cmd_producer;
__le32 *cmd_consumer;
u32 last_cmd_consumer;
u32 crb_addr_cmd_producer;
u32 crb_addr_cmd_consumer;
u32 max_tx_desc_count;
u32 max_rx_desc_count;
u32 max_jumbo_rx_desc_count;
u32 max_lro_rx_desc_count;
int max_rds_rings;
u32 flags;
u32 irq;
int driver_mismatch;
u32 temp;
u32 fw_major;
u8 msix_supported;
u8 max_possible_rss_rings;
struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
struct netxen_adapter_stats stats;
u16 portno;
u16 link_speed;
u16 link_duplex;
u16 state;
u16 link_autoneg;
int rx_csum;
int status;
spinlock_t stats_lock;
struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */
@ -905,25 +1276,33 @@ struct netxen_adapter {
int is_up;
struct netxen_dummy_dma dummy_dma;
nx_nic_intr_coalesce_t coal;
/* Context interface shared between card and host */
struct netxen_ring_ctx *ctx_desc;
struct pci_dev *ctx_desc_pdev;
dma_addr_t ctx_desc_phys_addr;
int intr_scheme;
int msi_mode;
int (*enable_phy_interrupts) (struct netxen_adapter *);
int (*disable_phy_interrupts) (struct netxen_adapter *);
void (*handle_phy_intr) (struct netxen_adapter *);
int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
int (*set_mtu) (struct netxen_adapter *, int);
int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t);
int (*unset_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t);
int (*phy_read) (struct netxen_adapter *, long reg, u32 *);
int (*phy_write) (struct netxen_adapter *, long reg, u32 val);
int (*init_port) (struct netxen_adapter *, int);
void (*init_niu) (struct netxen_adapter *);
int (*stop_port) (struct netxen_adapter *);
int (*hw_read_wx)(struct netxen_adapter *, ulong, void *, int);
int (*hw_write_wx)(struct netxen_adapter *, ulong, void *, int);
int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int);
int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int);
int (*pci_write_immediate)(struct netxen_adapter *, u64, u32);
u32 (*pci_read_immediate)(struct netxen_adapter *, u64);
void (*pci_write_normalize)(struct netxen_adapter *, u64, u32);
u32 (*pci_read_normalize)(struct netxen_adapter *, u64);
unsigned long (*pci_set_window)(struct netxen_adapter *,
unsigned long long);
}; /* netxen_adapter structure */
/*
@ -988,8 +1367,6 @@ int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter);
int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter);
int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter);
int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter);
void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter);
void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter);
int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
__u32 * readval);
int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
@ -998,27 +1375,61 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
/* Functions available from netxen_nic_hw.c */
int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu);
void netxen_nic_init_niu_gb(struct netxen_adapter *adapter);
void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw);
void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val);
int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off);
void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value);
void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value);
void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value);
void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value);
void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value);
int netxen_nic_get_board_info(struct netxen_adapter *adapter);
int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
int len);
int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
int len);
int netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter,
ulong off, void *data, int len);
int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
ulong off, void *data, int len);
int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
u64 off, void *data, int size);
int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
u64 off, void *data, int size);
int netxen_nic_pci_write_immediate_128M(struct netxen_adapter *adapter,
u64 off, u32 data);
u32 netxen_nic_pci_read_immediate_128M(struct netxen_adapter *adapter, u64 off);
void netxen_nic_pci_write_normalize_128M(struct netxen_adapter *adapter,
u64 off, u32 data);
u32 netxen_nic_pci_read_normalize_128M(struct netxen_adapter *adapter, u64 off);
unsigned long netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
unsigned long long addr);
void netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter,
u32 wndw);
int netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter,
ulong off, void *data, int len);
int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter,
ulong off, void *data, int len);
int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
u64 off, void *data, int size);
int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
u64 off, void *data, int size);
void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
unsigned long off, int data);
int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter,
u64 off, u32 data);
u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off);
void netxen_nic_pci_write_normalize_2M(struct netxen_adapter *adapter,
u64 off, u32 data);
u32 netxen_nic_pci_read_normalize_2M(struct netxen_adapter *adapter, u64 off);
unsigned long netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
unsigned long long addr);
/* Functions from netxen_nic_init.c */
void netxen_free_adapter_offload(struct netxen_adapter *adapter);
int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
int netxen_receive_peg_ready(struct netxen_adapter *adapter);
int netxen_load_firmware(struct netxen_adapter *adapter);
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
u8 *bytes, size_t size);
@ -1032,33 +1443,43 @@ void netxen_halt_pegs(struct netxen_adapter *adapter);
int netxen_rom_se(struct netxen_adapter *adapter, int addr);
/* Functions from netxen_nic_isr.c */
void netxen_initialize_adapter_sw(struct netxen_adapter *adapter);
void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
struct pci_dev **used_dev);
int netxen_alloc_sw_resources(struct netxen_adapter *adapter);
void netxen_free_sw_resources(struct netxen_adapter *adapter);
int netxen_alloc_hw_resources(struct netxen_adapter *adapter);
void netxen_free_hw_resources(struct netxen_adapter *adapter);
void netxen_release_rx_buffers(struct netxen_adapter *adapter);
void netxen_release_tx_buffers(struct netxen_adapter *adapter);
void netxen_initialize_adapter_ops(struct netxen_adapter *adapter);
int netxen_init_firmware(struct netxen_adapter *adapter);
void netxen_free_hw_resources(struct netxen_adapter *adapter);
void netxen_tso_check(struct netxen_adapter *adapter,
struct cmd_desc_type0 *desc, struct sk_buff *skb);
int netxen_nic_hw_resources(struct netxen_adapter *adapter);
void netxen_nic_clear_stats(struct netxen_adapter *adapter);
void netxen_watchdog_task(struct work_struct *work);
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
u32 ringid);
int netxen_process_cmd_ring(struct netxen_adapter *adapter);
u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
void netxen_nic_set_multi(struct net_device *netdev);
void netxen_p2_nic_set_multi(struct net_device *netdev);
void netxen_p3_nic_set_multi(struct net_device *netdev);
int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu);
int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
int netxen_nic_set_mac(struct net_device *netdev, void *p);
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
uint32_t crb_producer);
/*
* NetXen Board information
*/
#define NETXEN_MAX_SHORT_NAME 16
#define NETXEN_MAX_SHORT_NAME 32
struct netxen_brdinfo {
netxen_brdtype_t brdtype; /* type of board */
long ports; /* max no of physical ports */
@ -1072,6 +1493,17 @@ static const struct netxen_brdinfo netxen_boards[] = {
{NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"},
{NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"},
{NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"},
{NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "},
{NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"},
{NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"},
{NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"},
{NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"},
{NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
{NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
{NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
{NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"},
{NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
{NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
@ -1097,7 +1529,7 @@ dma_watchdog_shutdown_request(struct netxen_adapter *adapter)
u32 ctrl;
/* check if already inactive */
if (netxen_nic_hw_read_wx(adapter,
if (adapter->hw_read_wx(adapter,
NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
printk(KERN_ERR "failed to read dma watchdog status\n");
@ -1117,7 +1549,7 @@ dma_watchdog_shutdown_poll_result(struct netxen_adapter *adapter)
{
u32 ctrl;
if (netxen_nic_hw_read_wx(adapter,
if (adapter->hw_read_wx(adapter,
NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
printk(KERN_ERR "failed to read dma watchdog status\n");
@ -1129,7 +1561,7 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
{
u32 ctrl;
if (netxen_nic_hw_read_wx(adapter,
if (adapter->hw_read_wx(adapter,
NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
printk(KERN_ERR "failed to read dma watchdog status\n");

View File

@ -0,0 +1,710 @@
/*
* Copyright (C) 2003 - 2008 NetXen, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston,
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.
*
* Contact Information:
* info@netxen.com
* NetXen,
* 3965 Freedom Circle, Fourth floor,
* Santa Clara, CA 95054
*
*/
#include "netxen_nic_hw.h"
#include "netxen_nic.h"
#include "netxen_nic_phan_reg.h"
#define NXHAL_VERSION 1
static int
netxen_api_lock(struct netxen_adapter *adapter)
{
u32 done = 0, timeout = 0;
for (;;) {
/* Acquire PCIE HW semaphore5 */
netxen_nic_read_w0(adapter,
NETXEN_PCIE_REG(PCIE_SEM5_LOCK), &done);
if (done == 1)
break;
if (++timeout >= NX_OS_CRB_RETRY_COUNT) {
printk(KERN_ERR "%s: lock timeout.\n", __func__);
return -1;
}
msleep(1);
}
#if 0
netxen_nic_write_w1(adapter,
NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
#endif
return 0;
}
static int
netxen_api_unlock(struct netxen_adapter *adapter)
{
u32 val;
/* Release PCIE HW semaphore5 */
netxen_nic_read_w0(adapter,
NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK), &val);
return 0;
}
static u32
netxen_poll_rsp(struct netxen_adapter *adapter)
{
u32 raw_rsp, rsp = NX_CDRP_RSP_OK;
int timeout = 0;
do {
/* give atleast 1ms for firmware to respond */
msleep(1);
if (++timeout > NX_OS_CRB_RETRY_COUNT)
return NX_CDRP_RSP_TIMEOUT;
netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET,
&raw_rsp);
rsp = le32_to_cpu(raw_rsp);
} while (!NX_CDRP_IS_RSP(rsp));
return rsp;
}
static u32
netxen_issue_cmd(struct netxen_adapter *adapter,
u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
{
u32 rsp;
u32 signature = 0;
u32 rcode = NX_RCODE_SUCCESS;
signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
/* Acquire semaphore before accessing CRB */
if (netxen_api_lock(adapter))
return NX_RCODE_TIMEOUT;
netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET,
cpu_to_le32(signature));
netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET,
cpu_to_le32(arg1));
netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET,
cpu_to_le32(arg2));
netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET,
cpu_to_le32(arg3));
netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET,
cpu_to_le32(NX_CDRP_FORM_CMD(cmd)));
rsp = netxen_poll_rsp(adapter);
if (rsp == NX_CDRP_RSP_TIMEOUT) {
printk(KERN_ERR "%s: card response timeout.\n",
netxen_nic_driver_name);
rcode = NX_RCODE_TIMEOUT;
} else if (rsp == NX_CDRP_RSP_FAIL) {
netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode);
rcode = le32_to_cpu(rcode);
printk(KERN_ERR "%s: failed card response code:0x%x\n",
netxen_nic_driver_name, rcode);
}
/* Release semaphore */
netxen_api_unlock(adapter);
return rcode;
}
u32
nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu)
{
u32 rcode = NX_RCODE_SUCCESS;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
rcode = netxen_issue_cmd(adapter,
adapter->ahw.pci_func,
NXHAL_VERSION,
recv_ctx->context_id,
mtu,
0,
NX_CDRP_CMD_SET_MTU);
return rcode;
}
static int
nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
{
void *addr;
nx_hostrq_rx_ctx_t *prq;
nx_cardrsp_rx_ctx_t *prsp;
nx_hostrq_rds_ring_t *prq_rds;
nx_hostrq_sds_ring_t *prq_sds;
nx_cardrsp_rds_ring_t *prsp_rds;
nx_cardrsp_sds_ring_t *prsp_sds;
struct nx_host_rds_ring *rds_ring;
dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
u64 phys_addr;
int i, nrds_rings, nsds_rings;
size_t rq_size, rsp_size;
u32 cap, reg;
int err;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
/* only one sds ring for now */
nrds_rings = adapter->max_rds_rings;
nsds_rings = 1;
rq_size =
SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
rsp_size =
SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
addr = pci_alloc_consistent(adapter->pdev,
rq_size, &hostrq_phys_addr);
if (addr == NULL)
return -ENOMEM;
prq = (nx_hostrq_rx_ctx_t *)addr;
addr = pci_alloc_consistent(adapter->pdev,
rsp_size, &cardrsp_phys_addr);
if (addr == NULL) {
err = -ENOMEM;
goto out_free_rq;
}
prsp = (nx_cardrsp_rx_ctx_t *)addr;
prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
prq->capabilities[0] = cpu_to_le32(cap);
prq->host_int_crb_mode =
cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
prq->host_rds_crb_mode =
cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
prq->num_rds_rings = cpu_to_le16(nrds_rings);
prq->num_sds_rings = cpu_to_le16(nsds_rings);
prq->rds_ring_offset = 0;
prq->sds_ring_offset = prq->rds_ring_offset +
(sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + prq->rds_ring_offset);
for (i = 0; i < nrds_rings; i++) {
rds_ring = &recv_ctx->rds_rings[i];
prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
prq_rds[i].ring_size = cpu_to_le32(rds_ring->max_rx_desc_count);
prq_rds[i].ring_kind = cpu_to_le32(i);
prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
}
prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + prq->sds_ring_offset);
prq_sds[0].host_phys_addr =
cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
prq_sds[0].ring_size = cpu_to_le32(adapter->max_rx_desc_count);
/* only one msix vector for now */
prq_sds[0].msi_index = cpu_to_le32(0);
/* now byteswap offsets */
prq->rds_ring_offset = cpu_to_le32(prq->rds_ring_offset);
prq->sds_ring_offset = cpu_to_le32(prq->sds_ring_offset);
phys_addr = hostrq_phys_addr;
err = netxen_issue_cmd(adapter,
adapter->ahw.pci_func,
NXHAL_VERSION,
(u32)(phys_addr >> 32),
(u32)(phys_addr & 0xffffffff),
rq_size,
NX_CDRP_CMD_CREATE_RX_CTX);
if (err) {
printk(KERN_WARNING
"Failed to create rx ctx in firmware%d\n", err);
goto out_free_rsp;
}
prsp_rds = ((nx_cardrsp_rds_ring_t *)
&prsp->data[prsp->rds_ring_offset]);
for (i = 0; i < le32_to_cpu(prsp->num_rds_rings); i++) {
rds_ring = &recv_ctx->rds_rings[i];
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200);
}
prsp_sds = ((nx_cardrsp_sds_ring_t *)
&prsp->data[prsp->sds_ring_offset]);
reg = le32_to_cpu(prsp_sds[0].host_consumer_crb);
recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
reg = le32_to_cpu(prsp_sds[0].interrupt_crb);
adapter->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200);
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
recv_ctx->context_id = le16_to_cpu(prsp->context_id);
recv_ctx->virt_port = le16_to_cpu(prsp->virt_port);
out_free_rsp:
pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
out_free_rq:
pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
return err;
}
static void
nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
if (netxen_issue_cmd(adapter,
adapter->ahw.pci_func,
NXHAL_VERSION,
recv_ctx->context_id,
NX_DESTROY_CTX_RESET,
0,
NX_CDRP_CMD_DESTROY_RX_CTX)) {
printk(KERN_WARNING
"%s: Failed to destroy rx ctx in firmware\n",
netxen_nic_driver_name);
}
}
static int
nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
{
nx_hostrq_tx_ctx_t *prq;
nx_hostrq_cds_ring_t *prq_cds;
nx_cardrsp_tx_ctx_t *prsp;
void *rq_addr, *rsp_addr;
size_t rq_size, rsp_size;
u32 temp;
int err = 0;
u64 offset, phys_addr;
dma_addr_t rq_phys_addr, rsp_phys_addr;
rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
rq_addr = pci_alloc_consistent(adapter->pdev,
rq_size, &rq_phys_addr);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
rsp_addr = pci_alloc_consistent(adapter->pdev,
rsp_size, &rsp_phys_addr);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
}
memset(rq_addr, 0, rq_size);
prq = (nx_hostrq_tx_ctx_t *)rq_addr;
memset(rsp_addr, 0, rsp_size);
prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
prq->capabilities[0] = cpu_to_le32(temp);
prq->host_int_crb_mode =
cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
prq->interrupt_ctl = 0;
prq->msi_index = 0;
prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
offset = adapter->ctx_desc_phys_addr+sizeof(struct netxen_ring_ctx);
prq->cmd_cons_dma_addr = cpu_to_le64(offset);
prq_cds = &prq->cds_ring;
prq_cds->host_phys_addr =
cpu_to_le64(adapter->ahw.cmd_desc_phys_addr);
prq_cds->ring_size = cpu_to_le32(adapter->max_tx_desc_count);
phys_addr = rq_phys_addr;
err = netxen_issue_cmd(adapter,
adapter->ahw.pci_func,
NXHAL_VERSION,
(u32)(phys_addr >> 32),
((u32)phys_addr & 0xffffffff),
rq_size,
NX_CDRP_CMD_CREATE_TX_CTX);
if (err == NX_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
adapter->crb_addr_cmd_producer =
NETXEN_NIC_REG(temp - 0x200);
#if 0
adapter->tx_state =
le32_to_cpu(prsp->host_ctx_state);
#endif
adapter->tx_context_id =
le16_to_cpu(prsp->context_id);
} else {
printk(KERN_WARNING
"Failed to create tx ctx in firmware%d\n", err);
err = -EIO;
}
pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
out_free_rq:
pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
return err;
}
static void
nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
{
if (netxen_issue_cmd(adapter,
adapter->ahw.pci_func,
NXHAL_VERSION,
adapter->tx_context_id,
NX_DESTROY_CTX_RESET,
0,
NX_CDRP_CMD_DESTROY_TX_CTX)) {
printk(KERN_WARNING
"%s: Failed to destroy tx ctx in firmware\n",
netxen_nic_driver_name);
}
}
static u64 ctx_addr_sig_regs[][3] = {
{NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
{NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
{NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
{NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
};
#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
#define lower32(x) ((u32)((x) & 0xffffffff))
#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
static struct netxen_recv_crb recv_crb_registers[] = {
/* Instance 0 */
{
/* crb_rcv_producer: */
{
NETXEN_NIC_REG(0x100),
/* Jumbo frames */
NETXEN_NIC_REG(0x110),
/* LRO */
NETXEN_NIC_REG(0x120)
},
/* crb_sts_consumer: */
NETXEN_NIC_REG(0x138),
},
/* Instance 1 */
{
/* crb_rcv_producer: */
{
NETXEN_NIC_REG(0x144),
/* Jumbo frames */
NETXEN_NIC_REG(0x154),
/* LRO */
NETXEN_NIC_REG(0x164)
},
/* crb_sts_consumer: */
NETXEN_NIC_REG(0x17c),
},
/* Instance 2 */
{
/* crb_rcv_producer: */
{
NETXEN_NIC_REG(0x1d8),
/* Jumbo frames */
NETXEN_NIC_REG(0x1f8),
/* LRO */
NETXEN_NIC_REG(0x208)
},
/* crb_sts_consumer: */
NETXEN_NIC_REG(0x220),
},
/* Instance 3 */
{
/* crb_rcv_producer: */
{
NETXEN_NIC_REG(0x22c),
/* Jumbo frames */
NETXEN_NIC_REG(0x23c),
/* LRO */
NETXEN_NIC_REG(0x24c)
},
/* crb_sts_consumer: */
NETXEN_NIC_REG(0x264),
},
};
static int
netxen_init_old_ctx(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
int ctx, ring;
int func_id = adapter->portnum;
adapter->ctx_desc->cmd_ring_addr =
cpu_to_le64(adapter->ahw.cmd_desc_phys_addr);
adapter->ctx_desc->cmd_ring_size =
cpu_to_le32(adapter->max_tx_desc_count);
for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
recv_ctx = &adapter->recv_ctx[ctx];
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr =
cpu_to_le64(rds_ring->phys_addr);
adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
cpu_to_le32(rds_ring->max_rx_desc_count);
}
adapter->ctx_desc->sts_ring_addr =
cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
adapter->ctx_desc->sts_ring_size =
cpu_to_le32(adapter->max_rx_desc_count);
}
adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id),
lower32(adapter->ctx_desc_phys_addr));
adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_HI(func_id),
upper32(adapter->ctx_desc_phys_addr));
adapter->pci_write_normalize(adapter, CRB_CTX_SIGNATURE_REG(func_id),
NETXEN_CTX_SIGNATURE | func_id);
return 0;
}
static uint32_t sw_int_mask[4] = {
CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1,
CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3
};
int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
{
struct netxen_hardware_context *hw = &adapter->ahw;
u32 state = 0;
void *addr;
int err = 0;
int ctx, ring;
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
err = netxen_receive_peg_ready(adapter);
if (err) {
printk(KERN_ERR "Rcv Peg initialization not complete:%x.\n",
state);
return err;
}
addr = pci_alloc_consistent(adapter->pdev,
sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
&adapter->ctx_desc_phys_addr);
if (addr == NULL) {
DPRINTK(ERR, "failed to allocate hw context\n");
return -ENOMEM;
}
memset(addr, 0, sizeof(struct netxen_ring_ctx));
adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum);
adapter->ctx_desc->cmd_consumer_offset =
cpu_to_le64(adapter->ctx_desc_phys_addr +
sizeof(struct netxen_ring_ctx));
adapter->cmd_consumer =
(__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
/* cmd desc ring */
addr = pci_alloc_consistent(adapter->pdev,
sizeof(struct cmd_desc_type0) *
adapter->max_tx_desc_count,
&hw->cmd_desc_phys_addr);
if (addr == NULL) {
printk(KERN_ERR "%s failed to allocate tx desc ring\n",
netxen_nic_driver_name);
return -ENOMEM;
}
hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
recv_ctx = &adapter->recv_ctx[ctx];
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
/* rx desc ring */
rds_ring = &recv_ctx->rds_rings[ring];
addr = pci_alloc_consistent(adapter->pdev,
RCV_DESC_RINGSIZE,
&rds_ring->phys_addr);
if (addr == NULL) {
printk(KERN_ERR "%s failed to allocate rx "
"desc ring[%d]\n",
netxen_nic_driver_name, ring);
err = -ENOMEM;
goto err_out_free;
}
rds_ring->desc_head = (struct rcv_desc *)addr;
if (adapter->fw_major < 4)
rds_ring->crb_rcv_producer =
recv_crb_registers[adapter->portnum].
crb_rcv_producer[ring];
}
/* status desc ring */
addr = pci_alloc_consistent(adapter->pdev,
STATUS_DESC_RINGSIZE,
&recv_ctx->rcv_status_desc_phys_addr);
if (addr == NULL) {
printk(KERN_ERR "%s failed to allocate sts desc ring\n",
netxen_nic_driver_name);
err = -ENOMEM;
goto err_out_free;
}
recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
if (adapter->fw_major < 4)
recv_ctx->crb_sts_consumer =
recv_crb_registers[adapter->portnum].
crb_sts_consumer;
}
if (adapter->fw_major >= 4) {
adapter->intr_scheme = INTR_SCHEME_PERPORT;
adapter->msi_mode = MSI_MODE_MULTIFUNC;
err = nx_fw_cmd_create_rx_ctx(adapter);
if (err)
goto err_out_free;
err = nx_fw_cmd_create_tx_ctx(adapter);
if (err)
goto err_out_free;
} else {
adapter->intr_scheme = adapter->pci_read_normalize(adapter,
CRB_NIC_CAPABILITIES_FW);
adapter->msi_mode = adapter->pci_read_normalize(adapter,
CRB_NIC_MSI_MODE_FW);
adapter->crb_intr_mask = sw_int_mask[adapter->portnum];
err = netxen_init_old_ctx(adapter);
if (err) {
netxen_free_hw_resources(adapter);
return err;
}
}
return 0;
err_out_free:
netxen_free_hw_resources(adapter);
return err;
}
void netxen_free_hw_resources(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
int ctx, ring;
if (adapter->fw_major >= 4) {
nx_fw_cmd_destroy_tx_ctx(adapter);
nx_fw_cmd_destroy_rx_ctx(adapter);
}
if (adapter->ctx_desc != NULL) {
pci_free_consistent(adapter->pdev,
sizeof(struct netxen_ring_ctx) +
sizeof(uint32_t),
adapter->ctx_desc,
adapter->ctx_desc_phys_addr);
adapter->ctx_desc = NULL;
}
if (adapter->ahw.cmd_desc_head != NULL) {
pci_free_consistent(adapter->pdev,
sizeof(struct cmd_desc_type0) *
adapter->max_tx_desc_count,
adapter->ahw.cmd_desc_head,
adapter->ahw.cmd_desc_phys_addr);
adapter->ahw.cmd_desc_head = NULL;
}
for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
recv_ctx = &adapter->recv_ctx[ctx];
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
if (rds_ring->desc_head != NULL) {
pci_free_consistent(adapter->pdev,
RCV_DESC_RINGSIZE,
rds_ring->desc_head,
rds_ring->phys_addr);
rds_ring->desc_head = NULL;
}
}
if (recv_ctx->rcv_status_desc_head != NULL) {
pci_free_consistent(adapter->pdev,
STATUS_DESC_RINGSIZE,
recv_ctx->rcv_status_desc_head,
recv_ctx->rcv_status_desc_phys_addr);
recv_ctx->rcv_status_desc_head = NULL;
}
}
}

View File

@ -93,17 +93,21 @@ static void
netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct netxen_adapter *adapter = netdev_priv(dev);
unsigned long flags;
u32 fw_major = 0;
u32 fw_minor = 0;
u32 fw_build = 0;
strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
fw_major = readl(NETXEN_CRB_NORMALIZE(adapter,
NETXEN_FW_VERSION_MAJOR));
fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter,
NETXEN_FW_VERSION_MINOR));
fw_build = readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB));
write_lock_irqsave(&adapter->adapter_lock, flags);
fw_major = adapter->pci_read_normalize(adapter,
NETXEN_FW_VERSION_MAJOR);
fw_minor = adapter->pci_read_normalize(adapter,
NETXEN_FW_VERSION_MINOR);
fw_build = adapter->pci_read_normalize(adapter,
NETXEN_FW_VERSION_SUB);
write_unlock_irqrestore(&adapter->adapter_lock, flags);
sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
@ -159,9 +163,16 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
switch ((netxen_brdtype_t) boardinfo->board_type) {
case NETXEN_BRDTYPE_P2_SB35_4G:
case NETXEN_BRDTYPE_P2_SB31_2G:
case NETXEN_BRDTYPE_P3_REF_QG:
case NETXEN_BRDTYPE_P3_4_GB:
case NETXEN_BRDTYPE_P3_4_GB_MM:
case NETXEN_BRDTYPE_P3_10000_BASE_T:
ecmd->supported |= SUPPORTED_Autoneg;
ecmd->advertising |= ADVERTISED_Autoneg;
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
@ -171,12 +182,17 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
break;
case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
case NETXEN_BRDTYPE_P3_IMEZ:
case NETXEN_BRDTYPE_P3_XG_LOM:
case NETXEN_BRDTYPE_P3_HMEZ:
ecmd->supported |= SUPPORTED_MII;
ecmd->advertising |= ADVERTISED_MII;
ecmd->port = PORT_FIBRE;
ecmd->autoneg = AUTONEG_DISABLE;
break;
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
case NETXEN_BRDTYPE_P3_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
@ -349,19 +365,18 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct netxen_adapter *adapter = netdev_priv(dev);
__u32 mode, *regs_buff = p;
void __iomem *addr;
int i, window;
memset(p, 0, NETXEN_NIC_REGS_LEN);
regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
(adapter->pdev)->device;
/* which mode */
NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_MODE, &regs_buff[0]);
adapter->hw_read_wx(adapter, NETXEN_NIU_MODE, &regs_buff[0], 4);
mode = regs_buff[0];
/* Common registers to all the modes */
NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER,
&regs_buff[2]);
adapter->hw_read_wx(adapter,
NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER, &regs_buff[2], 4);
/* GB/XGB Mode */
mode = (mode / 2) - 1;
window = 0;
@ -372,9 +387,9 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
window = adapter->physical_port *
NETXEN_NIC_PORT_WINDOW;
NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode].
reg[i - 3] + window,
&regs_buff[i]);
adapter->hw_read_wx(adapter,
niu_registers[mode].reg[i - 3] + window,
&regs_buff[i], 4);
}
}
@ -398,7 +413,7 @@ static u32 netxen_nic_test_link(struct net_device *dev)
return !val;
}
} else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
return (val == XG_LINK_UP) ? 0 : 1;
}
return -EIO;
@ -427,6 +442,7 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
return 0;
}
#if 0
static int
netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 * bytes)
@ -447,7 +463,6 @@ netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
}
printk(KERN_INFO "%s: flash unlocked. \n",
netxen_nic_driver_name);
last_schedule_time = jiffies;
ret = netxen_flash_erase_secondary(adapter);
if (ret != FLASH_SUCCESS) {
printk(KERN_ERR "%s: Flash erase failed.\n",
@ -497,6 +512,7 @@ netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
return netxen_rom_fast_write_words(adapter, offset, bytes, eeprom->len);
}
#endif /* 0 */
static void
netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
@ -508,9 +524,9 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
ring->rx_jumbo_pending = 0;
for (i = 0; i < MAX_RCV_CTX; ++i) {
ring->rx_pending += adapter->recv_ctx[i].
rcv_desc[RCV_DESC_NORMAL_CTXID].max_rx_desc_count;
rds_rings[RCV_DESC_NORMAL_CTXID].max_rx_desc_count;
ring->rx_jumbo_pending += adapter->recv_ctx[i].
rcv_desc[RCV_DESC_JUMBO_CTXID].max_rx_desc_count;
rds_rings[RCV_DESC_JUMBO_CTXID].max_rx_desc_count;
}
ring->tx_pending = adapter->max_tx_desc_count;
@ -655,7 +671,7 @@ static int netxen_nic_reg_test(struct net_device *dev)
data_written = (u32)0xa5a5a5a5;
netxen_nic_reg_write(adapter, CRB_SCRATCHPAD_TEST, data_written);
data_read = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_SCRATCHPAD_TEST));
data_read = adapter->pci_read_normalize(adapter, CRB_SCRATCHPAD_TEST);
if (data_written != data_read)
return 1;
@ -736,6 +752,117 @@ static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data)
return 0;
}
static u32 netxen_nic_get_tso(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
return (dev->features & NETIF_F_TSO) != 0;
}
static int netxen_nic_set_tso(struct net_device *dev, u32 data)
{
if (data) {
struct netxen_adapter *adapter = netdev_priv(dev);
dev->features |= NETIF_F_TSO;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
dev->features |= NETIF_F_TSO6;
} else
dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
return 0;
}
/*
* Set the coalescing parameters. Currently only normal is supported.
* If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
* firmware coalescing to default.
*/
static int netxen_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
return -EINVAL;
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
return -EINVAL;
/*
* Return Error if unsupported values or
* unsupported parameters are set.
*/
if (ethcoal->rx_coalesce_usecs > 0xffff ||
ethcoal->rx_max_coalesced_frames > 0xffff ||
ethcoal->tx_coalesce_usecs > 0xffff ||
ethcoal->tx_max_coalesced_frames > 0xffff ||
ethcoal->rx_coalesce_usecs_irq ||
ethcoal->rx_max_coalesced_frames_irq ||
ethcoal->tx_coalesce_usecs_irq ||
ethcoal->tx_max_coalesced_frames_irq ||
ethcoal->stats_block_coalesce_usecs ||
ethcoal->use_adaptive_rx_coalesce ||
ethcoal->use_adaptive_tx_coalesce ||
ethcoal->pkt_rate_low ||
ethcoal->rx_coalesce_usecs_low ||
ethcoal->rx_max_coalesced_frames_low ||
ethcoal->tx_coalesce_usecs_low ||
ethcoal->tx_max_coalesced_frames_low ||
ethcoal->pkt_rate_high ||
ethcoal->rx_coalesce_usecs_high ||
ethcoal->rx_max_coalesced_frames_high ||
ethcoal->tx_coalesce_usecs_high ||
ethcoal->tx_max_coalesced_frames_high)
return -EINVAL;
if (!ethcoal->rx_coalesce_usecs ||
!ethcoal->rx_max_coalesced_frames) {
adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
adapter->coal.normal.data.rx_time_us =
NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
adapter->coal.normal.data.rx_packets =
NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
} else {
adapter->coal.flags = 0;
adapter->coal.normal.data.rx_time_us =
ethcoal->rx_coalesce_usecs;
adapter->coal.normal.data.rx_packets =
ethcoal->rx_max_coalesced_frames;
}
adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
adapter->coal.normal.data.tx_packets =
ethcoal->tx_max_coalesced_frames;
netxen_config_intr_coalesce(adapter);
return 0;
}
static int netxen_get_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
return -EINVAL;
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
return -EINVAL;
ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
ethcoal->rx_max_coalesced_frames =
adapter->coal.normal.data.rx_packets;
ethcoal->tx_max_coalesced_frames =
adapter->coal.normal.data.tx_packets;
return 0;
}
struct ethtool_ops netxen_nic_ethtool_ops = {
.get_settings = netxen_nic_get_settings,
.set_settings = netxen_nic_set_settings,
@ -745,17 +872,22 @@ struct ethtool_ops netxen_nic_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_eeprom_len = netxen_nic_get_eeprom_len,
.get_eeprom = netxen_nic_get_eeprom,
#if 0
.set_eeprom = netxen_nic_set_eeprom,
#endif
.get_ringparam = netxen_nic_get_ringparam,
.get_pauseparam = netxen_nic_get_pauseparam,
.set_pauseparam = netxen_nic_set_pauseparam,
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
.get_tso = netxen_nic_get_tso,
.set_tso = netxen_nic_set_tso,
.self_test = netxen_nic_diag_test,
.get_strings = netxen_nic_get_strings,
.get_ethtool_stats = netxen_nic_get_ethtool_stats,
.get_sset_count = netxen_get_sset_count,
.get_rx_csum = netxen_nic_get_rx_csum,
.set_rx_csum = netxen_nic_set_rx_csum,
.get_coalesce = netxen_get_intr_coalesce,
.set_coalesce = netxen_set_intr_coalesce,
};

View File

@ -126,7 +126,8 @@ enum {
NETXEN_HW_PEGR0_CRB_AGT_ADR,
NETXEN_HW_PEGR1_CRB_AGT_ADR,
NETXEN_HW_PEGR2_CRB_AGT_ADR,
NETXEN_HW_PEGR3_CRB_AGT_ADR
NETXEN_HW_PEGR3_CRB_AGT_ADR,
NETXEN_HW_PEGN4_CRB_AGT_ADR
};
/* Hub 5 */
@ -316,6 +317,8 @@ enum {
((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR)
#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \
((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR)
#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \
((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR)
#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \
((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR)
#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \
@ -435,6 +438,7 @@ enum {
#define NETXEN_CRB_ROMUSB \
NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH)
@ -446,6 +450,7 @@ enum {
#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND)
#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI)
#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN)
#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN)
#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS)
#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD
@ -461,11 +466,20 @@ enum {
#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
#define NETXEN_PCI_MAPSIZE 128
#define NETXEN_PCI_DDR_NET (0x00000000UL)
#define NETXEN_PCI_QDR_NET (0x04000000UL)
#define NETXEN_PCI_DIRECT_CRB (0x04400000UL)
#define NETXEN_PCI_CAMQM (0x04800000UL)
#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL)
#define NETXEN_PCI_OCM0 (0x05000000UL)
#define NETXEN_PCI_OCM0_MAX (0x050fffffUL)
@ -474,6 +488,13 @@ enum {
#define NETXEN_PCI_CRBSPACE (0x06000000UL)
#define NETXEN_PCI_128MB_SIZE (0x08000000UL)
#define NETXEN_PCI_32MB_SIZE (0x02000000UL)
#define NETXEN_PCI_2MB_SIZE (0x00200000UL)
#define NETXEN_PCI_MN_2M (0)
#define NETXEN_PCI_MS_2M (0x80000)
#define NETXEN_PCI_OCM0_2M (0x000c0000UL)
#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL)
#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL)
#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM)
@ -484,7 +505,14 @@ enum {
#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL)
#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL)
#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL)
#define NETXEN_ADDR_QDR_NET_MAX (0x00000003003fffffULL)
#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL)
#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
/*
* Register offsets for MN
*/
#define NETXEN_MIU_CONTROL (0x000)
#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL)
/* 200ms delay in each loop */
#define NETXEN_NIU_PHY_WAITLEN 200000
@ -550,6 +578,9 @@ enum {
#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018)
#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c)
#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080)
#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100)
#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \
(NETXEN_CRB_NIU + 0x30000 + (I)*0x10000)
#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \
@ -630,16 +661,76 @@ enum {
#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054)
#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058)
/* P3 802.3ap */
#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000)
#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000)
#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000)
#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000)
#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000)
#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000)
#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000)
#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000)
#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000)
#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000)
#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000)
#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000)
#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000)
#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000)
#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
/*
* Register offsets for MN
*/
#define MIU_CONTROL (0x000)
#define MIU_TEST_AGT_CTRL (0x090)
#define MIU_TEST_AGT_ADDR_LO (0x094)
#define MIU_TEST_AGT_ADDR_HI (0x098)
#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
#define MIU_TA_CTL_START 1
#define MIU_TA_CTL_ENABLE 2
#define MIU_TA_CTL_WRITE 4
#define MIU_TA_CTL_BUSY 8
#define SIU_TEST_AGT_CTRL (0x060)
#define SIU_TEST_AGT_ADDR_LO (0x064)
#define SIU_TEST_AGT_ADDR_HI (0x078)
#define SIU_TEST_AGT_WRDATA_LO (0x068)
#define SIU_TEST_AGT_WRDATA_HI (0x06c)
#define SIU_TEST_AGT_WRDATA(i) (0x068+(4*(i)))
#define SIU_TEST_AGT_RDDATA_LO (0x070)
#define SIU_TEST_AGT_RDDATA_HI (0x074)
#define SIU_TEST_AGT_RDDATA(i) (0x070+(4*(i)))
#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
/* XG Link status */
#define XG_LINK_UP 0x10
#define XG_LINK_DOWN 0x20
#define XG_LINK_UP_P3 0x01
#define XG_LINK_DOWN_P3 0x02
#define XG_LINK_STATE_P3_MASK 0xf
#define XG_LINK_STATE_P3(pcifn,val) \
(((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154))
#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158))
#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100))
#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124))
#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120))
@ -654,30 +745,71 @@ enum {
#define PCIX_INT_VECTOR (0x10100)
#define PCIX_INT_MASK (0x10104)
#define PCIX_MN_WINDOW_F0 (0x10200)
#define PCIX_MN_WINDOW(_f) (PCIX_MN_WINDOW_F0 + (0x20 * (_f)))
#define PCIX_MS_WINDOW (0x10204)
#define PCIX_SN_WINDOW_F0 (0x10208)
#define PCIX_SN_WINDOW(_f) (PCIX_SN_WINDOW_F0 + (0x20 * (_f)))
#define PCIX_CRB_WINDOW (0x10210)
#define PCIX_CRB_WINDOW_F0 (0x10210)
#define PCIX_CRB_WINDOW_F1 (0x10230)
#define PCIX_CRB_WINDOW_F2 (0x10250)
#define PCIX_CRB_WINDOW_F3 (0x10270)
#define PCIX_CRB_WINDOW_F4 (0x102ac)
#define PCIX_CRB_WINDOW_F5 (0x102bc)
#define PCIX_CRB_WINDOW_F6 (0x102cc)
#define PCIX_CRB_WINDOW_F7 (0x102dc)
#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \
(PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\
(PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4))))
#define PCIX_MN_WINDOW (0x10200)
#define PCIX_MN_WINDOW_F0 (0x10200)
#define PCIX_MN_WINDOW_F1 (0x10220)
#define PCIX_MN_WINDOW_F2 (0x10240)
#define PCIX_MN_WINDOW_F3 (0x10260)
#define PCIX_MN_WINDOW_F4 (0x102a0)
#define PCIX_MN_WINDOW_F5 (0x102b0)
#define PCIX_MN_WINDOW_F6 (0x102c0)
#define PCIX_MN_WINDOW_F7 (0x102d0)
#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \
(PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\
(PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4))))
#define PCIX_SN_WINDOW (0x10208)
#define PCIX_SN_WINDOW_F0 (0x10208)
#define PCIX_SN_WINDOW_F1 (0x10228)
#define PCIX_SN_WINDOW_F2 (0x10248)
#define PCIX_SN_WINDOW_F3 (0x10268)
#define PCIX_SN_WINDOW_F4 (0x102a8)
#define PCIX_SN_WINDOW_F5 (0x102b8)
#define PCIX_SN_WINDOW_F6 (0x102c8)
#define PCIX_SN_WINDOW_F7 (0x102d8)
#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \
(PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
(PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
#define PCIX_TARGET_STATUS (0x10118)
#define PCIX_TARGET_STATUS_F1 (0x10160)
#define PCIX_TARGET_STATUS_F2 (0x10164)
#define PCIX_TARGET_STATUS_F3 (0x10168)
#define PCIX_TARGET_STATUS_F4 (0x10360)
#define PCIX_TARGET_STATUS_F5 (0x10364)
#define PCIX_TARGET_STATUS_F6 (0x10368)
#define PCIX_TARGET_STATUS_F7 (0x1036c)
#define PCIX_TARGET_MASK (0x10128)
#define PCIX_TARGET_STATUS_F1 (0x10160)
#define PCIX_TARGET_MASK_F1 (0x10170)
#define PCIX_TARGET_STATUS_F2 (0x10164)
#define PCIX_TARGET_MASK_F2 (0x10174)
#define PCIX_TARGET_STATUS_F3 (0x10168)
#define PCIX_TARGET_MASK_F3 (0x10178)
#define PCIX_TARGET_MASK_F1 (0x10170)
#define PCIX_TARGET_MASK_F2 (0x10174)
#define PCIX_TARGET_MASK_F3 (0x10178)
#define PCIX_TARGET_MASK_F4 (0x10370)
#define PCIX_TARGET_MASK_F5 (0x10374)
#define PCIX_TARGET_MASK_F6 (0x10378)
#define PCIX_TARGET_MASK_F7 (0x1037c)
#define PCIX_MSI_F0 (0x13000)
#define PCIX_MSI_F1 (0x13004)
#define PCIX_MSI_F2 (0x13008)
#define PCIX_MSI_F3 (0x1300c)
#define PCIX_MSI_F4 (0x13010)
#define PCIX_MSI_F5 (0x13014)
#define PCIX_MSI_F6 (0x13018)
#define PCIX_MSI_F7 (0x1301c)
#define PCIX_MSI_F(i) (0x13000+((i)*4))
#define PCIX_PS_MEM_SPACE (0x90000)
@ -695,11 +827,102 @@ enum {
#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */
#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */
#define PCIE_SEM5_LOCK (0x1c028) /* API lock */
#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */
#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */
#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */
#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
#define PCIE_SETUP_FUNCTION (0x12040)
#define PCIE_SETUP_FUNCTION2 (0x12048)
#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
#define PCIE_CHICKEN3 (0x120c8)
#define PCIE_MAX_MASTER_SPLIT (0x14048)
#define NETXEN_PORT_MODE_NONE 0
#define NETXEN_PORT_MODE_XG 1
#define NETXEN_PORT_MODE_GB 2
#define NETXEN_PORT_MODE_802_3_AP 3
#define NETXEN_PORT_MODE_AUTO_NEG 4
#define NETXEN_PORT_MODE_AUTO_NEG_1G 5
#define NETXEN_PORT_MODE_AUTO_NEG_XG 6
#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24))
#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198))
#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
/*
* PCI Interrupt Vector Values.
*/
#define PCIX_INT_VECTOR_BIT_F0 0x0080
#define PCIX_INT_VECTOR_BIT_F1 0x0100
#define PCIX_INT_VECTOR_BIT_F2 0x0200
#define PCIX_INT_VECTOR_BIT_F3 0x0400
#define PCIX_INT_VECTOR_BIT_F4 0x0800
#define PCIX_INT_VECTOR_BIT_F5 0x1000
#define PCIX_INT_VECTOR_BIT_F6 0x2000
#define PCIX_INT_VECTOR_BIT_F7 0x4000
struct netxen_legacy_intr_set {
uint32_t int_vec_bit;
uint32_t tgt_status_reg;
uint32_t tgt_mask_reg;
uint32_t pci_int_reg;
};
#define NX_LEGACY_INTR_CONFIG \
{ \
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
.tgt_status_reg = ISR_INT_TARGET_STATUS, \
.tgt_mask_reg = ISR_INT_TARGET_MASK, \
.pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
.tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
.pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
.tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
.pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
.tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
.pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
.tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
.pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
.tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
.pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
.tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
.pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
.tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
.pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
}
#endif /* __NETXEN_NIC_HDR_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -82,19 +82,9 @@ struct netxen_adapter;
#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20)
#define NETXEN_NIC_LOCKED_READ_REG(X, Y) \
addr = pci_base_offset(adapter, X); \
*(u32 *)Y = readl((void __iomem*) addr);
struct netxen_port;
void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
void netxen_nic_flash_print(struct netxen_adapter *adapter);
int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off,
void *data, int len);
void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
unsigned long off, int data);
int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off,
void *data, int len);
typedef u8 netxen_ethernet_macaddr_t[6];
@ -432,7 +422,8 @@ typedef enum {
/* Promiscous mode options (GbE mode only) */
typedef enum {
NETXEN_NIU_PROMISC_MODE = 0,
NETXEN_NIU_NON_PROMISC_MODE
NETXEN_NIU_NON_PROMISC_MODE,
NETXEN_NIU_ALLMULTI_MODE
} netxen_niu_prom_mode_t;
/*
@ -478,42 +469,6 @@ typedef enum {
#define netxen_xg_soft_reset(config_word) \
((config_word) |= 1 << 4)
/*
* MAC Control Register
*
* Bit 0-1 : id_pool0
* Bit 2 : enable_xtnd0
* Bit 4-5 : id_pool1
* Bit 6 : enable_xtnd1
* Bit 8-9 : id_pool2
* Bit 10 : enable_xtnd2
* Bit 12-13 : id_pool3
* Bit 14 : enable_xtnd3
* Bit 24-25 : mode_select
* Bit 28-31 : enable_pool
*/
#define netxen_nic_mcr_set_id_pool0(config, val) \
((config) |= ((val) &0x03))
#define netxen_nic_mcr_set_enable_xtnd0(config) \
((config) |= 1 << 3)
#define netxen_nic_mcr_set_id_pool1(config, val) \
((config) |= (((val) & 0x03) << 4))
#define netxen_nic_mcr_set_enable_xtnd1(config) \
((config) |= 1 << 6)
#define netxen_nic_mcr_set_id_pool2(config, val) \
((config) |= (((val) & 0x03) << 8))
#define netxen_nic_mcr_set_enable_xtnd2(config) \
((config) |= 1 << 10)
#define netxen_nic_mcr_set_id_pool3(config, val) \
((config) |= (((val) & 0x03) << 12))
#define netxen_nic_mcr_set_enable_xtnd3(config) \
((config) |= 1 << 14)
#define netxen_nic_mcr_set_mode_select(config, val) \
((config) |= (((val) & 0x03) << 24))
#define netxen_nic_mcr_set_enable_pool(config, val) \
((config) |= (((val) & 0x0f) << 28))
/* Set promiscuous mode for a GbE interface */
int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
netxen_niu_prom_mode_t mode);
@ -538,4 +493,15 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter);
int netxen_niu_disable_xg_port(struct netxen_adapter *adapter);
typedef struct {
unsigned valid;
unsigned start_128M;
unsigned end_128M;
unsigned start_2M;
} crb_128M_2M_sub_block_map_t;
typedef struct {
crb_128M_2M_sub_block_map_t sub_block[16];
} crb_128M_2M_block_map_t;
#endif /* __NETXEN_NIC_HW_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,220 +0,0 @@
/*
* Copyright (C) 2003 - 2006 NetXen, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston,
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.
*
* Contact Information:
* info@netxen.com
* NetXen,
* 3965 Freedom Circle, Fourth floor,
* Santa Clara, CA 95054
*/
#include <linux/netdevice.h>
#include <linux/delay.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
#include "netxen_nic_phan_reg.h"
/*
* netxen_nic_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*/
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &adapter->net_stats;
memset(stats, 0, sizeof(*stats));
/* total packets received */
stats->rx_packets = adapter->stats.no_rcv;
/* total packets transmitted */
stats->tx_packets = adapter->stats.xmitedframes +
adapter->stats.xmitfinished;
/* total bytes received */
stats->rx_bytes = adapter->stats.rxbytes;
/* total bytes transmitted */
stats->tx_bytes = adapter->stats.txbytes;
/* bad packets received */
stats->rx_errors = adapter->stats.rcvdbadskb;
/* packet transmit problems */
stats->tx_errors = adapter->stats.nocmddescriptor;
/* no space in linux buffers */
stats->rx_dropped = adapter->stats.rxdropped;
/* no space available in linux */
stats->tx_dropped = adapter->stats.txdropped;
return stats;
}
static void netxen_indicate_link_status(struct netxen_adapter *adapter,
u32 link)
{
struct net_device *netdev = adapter->netdev;
if (link)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
}
#if 0
void netxen_handle_port_int(struct netxen_adapter *adapter, u32 enable)
{
__u32 int_src;
/* This should clear the interrupt source */
if (adapter->phy_read)
adapter->phy_read(adapter,
NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
&int_src);
if (int_src == 0) {
DPRINTK(INFO, "No phy interrupts for port #%d\n", portno);
return;
}
if (adapter->disable_phy_interrupts)
adapter->disable_phy_interrupts(adapter);
if (netxen_get_phy_int_jabber(int_src))
DPRINTK(INFO, "Jabber interrupt \n");
if (netxen_get_phy_int_polarity_changed(int_src))
DPRINTK(INFO, "POLARITY CHANGED int \n");
if (netxen_get_phy_int_energy_detect(int_src))
DPRINTK(INFO, "ENERGY DETECT INT \n");
if (netxen_get_phy_int_downshift(int_src))
DPRINTK(INFO, "DOWNSHIFT INT \n");
/* write it down later.. */
if ((netxen_get_phy_int_speed_changed(int_src))
|| (netxen_get_phy_int_link_status_changed(int_src))) {
__u32 status;
DPRINTK(INFO, "SPEED CHANGED OR LINK STATUS CHANGED \n");
if (adapter->phy_read
&& adapter->phy_read(adapter,
NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
&status) == 0) {
if (netxen_get_phy_int_link_status_changed(int_src)) {
if (netxen_get_phy_link(status)) {
printk(KERN_INFO "%s: %s Link UP\n",
netxen_nic_driver_name,
adapter->netdev->name);
} else {
printk(KERN_INFO "%s: %s Link DOWN\n",
netxen_nic_driver_name,
adapter->netdev->name);
}
netxen_indicate_link_status(adapter,
netxen_get_phy_link
(status));
}
}
}
if (adapter->enable_phy_interrupts)
adapter->enable_phy_interrupts(adapter);
}
#endif /* 0 */
static void netxen_nic_isr_other(struct netxen_adapter *adapter)
{
int portno = adapter->portnum;
u32 val, linkup, qg_linksup;
/* verify the offset */
val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
val = val >> adapter->physical_port;
if (val == adapter->ahw.qg_linksup)
return;
qg_linksup = adapter->ahw.qg_linksup;
adapter->ahw.qg_linksup = val;
DPRINTK(INFO, "link update 0x%08x\n", val);
linkup = val & 1;
if (linkup != (qg_linksup & 1)) {
printk(KERN_INFO "%s: %s PORT %d link %s\n",
adapter->netdev->name,
netxen_nic_driver_name, portno,
((linkup == 0) ? "down" : "up"));
netxen_indicate_link_status(adapter, linkup);
if (linkup)
netxen_nic_set_link_parameters(adapter);
}
}
void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter)
{
netxen_nic_isr_other(adapter);
}
#if 0
int netxen_nic_link_ok(struct netxen_adapter *adapter)
{
switch (adapter->ahw.board_type) {
case NETXEN_NIC_GBE:
return ((adapter->ahw.qg_linksup) & 1);
case NETXEN_NIC_XGBE:
return ((adapter->ahw.xg_linkup) & 1);
default:
printk(KERN_ERR"%s: Function: %s, Unknown board type\n",
netxen_nic_driver_name, __FUNCTION__);
break;
}
return 0;
}
#endif /* 0 */
void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 val;
/* WINDOW = 1 */
val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
val >>= (adapter->physical_port * 8);
val &= 0xff;
if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) {
printk(KERN_INFO "%s: %s NIC Link is down\n",
netxen_nic_driver_name, netdev->name);
adapter->ahw.xg_linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
} else if (adapter->ahw.xg_linkup == 0 && val == XG_LINK_UP) {
printk(KERN_INFO "%s: %s NIC Link is up\n",
netxen_nic_driver_name, netdev->name);
adapter->ahw.xg_linkup = 1;
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -46,9 +46,8 @@ static int phy_lock(struct netxen_adapter *adapter)
int done = 0, timeout = 0;
while (!done) {
done =
readl(pci_base_offset
(adapter, NETXEN_PCIE_REG(PCIE_SEM3_LOCK)));
done = netxen_nic_reg_read(adapter,
NETXEN_PCIE_REG(PCIE_SEM3_LOCK));
if (done == 1)
break;
if (timeout >= phy_lock_timeout) {
@ -63,14 +62,14 @@ static int phy_lock(struct netxen_adapter *adapter)
}
}
writel(PHY_LOCK_DRIVER,
NETXEN_CRB_NORMALIZE(adapter, NETXEN_PHY_LOCK_ID));
netxen_crb_writelit_adapter(adapter,
NETXEN_PHY_LOCK_ID, PHY_LOCK_DRIVER);
return 0;
}
static int phy_unlock(struct netxen_adapter *adapter)
{
readl(pci_base_offset(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK)));
adapter->pci_read_immediate(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK));
return 0;
}
@ -109,7 +108,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
* so it cannot be in reset
*/
if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
&mac_cfg0, 4))
return -EIO;
if (netxen_gb_get_soft_reset(mac_cfg0)) {
@ -119,7 +118,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
netxen_gb_rx_reset_pb(temp);
netxen_gb_tx_reset_mac(temp);
netxen_gb_rx_reset_mac(temp);
if (netxen_nic_hw_write_wx(adapter,
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_0(0),
&temp, 4))
return -EIO;
@ -129,22 +128,22 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
address = 0;
netxen_gb_mii_mgmt_reg_addr(address, reg);
netxen_gb_mii_mgmt_phy_addr(address, phy);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
&address, 4))
return -EIO;
command = 0; /* turn off any prior activity */
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
&command, 4))
return -EIO;
/* send read command */
netxen_gb_mii_mgmt_set_read_cycle(command);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
&command, 4))
return -EIO;
status = 0;
do {
if (netxen_nic_hw_read_wx(adapter,
if (adapter->hw_read_wx(adapter,
NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
&status, 4))
return -EIO;
@ -154,7 +153,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
&& (timeout++ < NETXEN_NIU_PHY_WAITMAX));
if (timeout < NETXEN_NIU_PHY_WAITMAX) {
if (netxen_nic_hw_read_wx(adapter,
if (adapter->hw_read_wx(adapter,
NETXEN_NIU_GB_MII_MGMT_STATUS(0),
readval, 4))
return -EIO;
@ -163,7 +162,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
result = -1;
if (restore)
if (netxen_nic_hw_write_wx(adapter,
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_0(0),
&mac_cfg0, 4))
return -EIO;
@ -201,7 +200,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
* cannot be in reset
*/
if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
&mac_cfg0, 4))
return -EIO;
if (netxen_gb_get_soft_reset(mac_cfg0)) {
@ -212,7 +211,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
netxen_gb_tx_reset_mac(temp);
netxen_gb_rx_reset_mac(temp);
if (netxen_nic_hw_write_wx(adapter,
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_0(0),
&temp, 4))
return -EIO;
@ -220,24 +219,24 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
}
command = 0; /* turn off any prior activity */
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
&command, 4))
return -EIO;
address = 0;
netxen_gb_mii_mgmt_reg_addr(address, reg);
netxen_gb_mii_mgmt_phy_addr(address, phy);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
&address, 4))
return -EIO;
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0),
&val, 4))
return -EIO;
status = 0;
do {
if (netxen_nic_hw_read_wx(adapter,
if (adapter->hw_read_wx(adapter,
NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
&status, 4))
return -EIO;
@ -252,7 +251,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
/* restore the state of port 0 MAC in case we tampered with it */
if (restore)
if (netxen_nic_hw_write_wx(adapter,
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_0(0),
&mac_cfg0, 4))
return -EIO;
@ -401,14 +400,16 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
{
int result = 0;
__u32 status;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
return 0;
if (adapter->disable_phy_interrupts)
adapter->disable_phy_interrupts(adapter);
mdelay(2);
if (0 ==
netxen_niu_gbe_phy_read(adapter,
NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
&status)) {
if (0 == netxen_niu_gbe_phy_read(adapter,
NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, &status)) {
if (netxen_get_phy_link(status)) {
if (netxen_get_phy_speed(status) == 2) {
netxen_niu_gbe_set_gmii_mode(adapter, port, 1);
@ -456,12 +457,12 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
{
u32 portnum = adapter->physical_port;
netxen_crb_writelit_adapter(adapter,
NETXEN_NIU_XGE_CONFIG_1+(0x10000*portnum), 0x1447);
netxen_crb_writelit_adapter(adapter,
NETXEN_NIU_XGE_CONFIG_0+(0x10000*portnum), 0x5);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
netxen_crb_writelit_adapter(adapter,
NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
netxen_crb_writelit_adapter(adapter,
NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
}
return 0;
}
@ -581,10 +582,10 @@ static int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
if ((phy < 0) || (phy > 3))
return -EINVAL;
if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy),
if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy),
&stationhigh, 4))
return -EIO;
if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
&stationlow, 4))
return -EIO;
((__le32 *)val)[1] = cpu_to_le32(stationhigh);
@ -613,14 +614,14 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
temp[0] = temp[1] = 0;
memcpy(temp + 2, addr, 2);
val = le32_to_cpu(*(__le32 *)temp);
if (netxen_nic_hw_write_wx
(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4))
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4))
return -EIO;
memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32));
val = le32_to_cpu(*(__le32 *)temp);
if (netxen_nic_hw_write_wx
(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4))
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4))
return -2;
netxen_niu_macaddr_get(adapter,
@ -654,7 +655,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
mac_cfg0 = 0;
netxen_gb_soft_reset(mac_cfg0);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
&mac_cfg0, 4))
return -EIO;
mac_cfg0 = 0;
@ -666,7 +667,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
netxen_gb_tx_reset_mac(mac_cfg0);
netxen_gb_rx_reset_mac(mac_cfg0);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
&mac_cfg0, 4))
return -EIO;
mac_cfg1 = 0;
@ -679,7 +680,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
if (mode == NETXEN_NIU_10_100_MB) {
netxen_gb_set_intfmode(mac_cfg1, 1);
if (netxen_nic_hw_write_wx(adapter,
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_1(port),
&mac_cfg1, 4))
return -EIO;
@ -692,7 +693,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
} else if (mode == NETXEN_NIU_1000_MB) {
netxen_gb_set_intfmode(mac_cfg1, 2);
if (netxen_nic_hw_write_wx(adapter,
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_GB_MAC_CONFIG_1(port),
&mac_cfg1, 4))
return -EIO;
@ -704,7 +705,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
}
mii_cfg = 0;
netxen_gb_set_mii_mgmt_clockselect(mii_cfg, 7);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port),
&mii_cfg, 4))
return -EIO;
mac_cfg0 = 0;
@ -713,7 +714,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
netxen_gb_unset_rx_flowctl(mac_cfg0);
netxen_gb_unset_tx_flowctl(mac_cfg0);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
&mac_cfg0, 4))
return -EIO;
return 0;
@ -730,7 +731,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
return -EINVAL;
mac_cfg0 = 0;
netxen_gb_soft_reset(mac_cfg0);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
&mac_cfg0, 4))
return -EIO;
return 0;
@ -746,7 +747,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
return -EINVAL;
mac_cfg = 0;
if (netxen_nic_hw_write_wx(adapter,
if (adapter->hw_write_wx(adapter,
NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), &mac_cfg, 4))
return -EIO;
return 0;
@ -763,7 +764,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
return -EINVAL;
/* save previous contents */
if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
&reg, 4))
return -EIO;
if (mode == NETXEN_NIU_PROMISC_MODE) {
@ -801,7 +802,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
return -EIO;
}
}
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
&reg, 4))
return -EIO;
return 0;
@ -826,13 +827,13 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
case 0:
memcpy(temp + 2, addr, 2);
val = le32_to_cpu(*(__le32 *)temp);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
&val, 4))
return -EIO;
memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
val = le32_to_cpu(*(__le32 *)temp);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
&val, 4))
return -EIO;
break;
@ -840,13 +841,13 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
case 1:
memcpy(temp + 2, addr, 2);
val = le32_to_cpu(*(__le32 *)temp);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1,
if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1,
&val, 4))
return -EIO;
memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
val = le32_to_cpu(*(__le32 *)temp);
if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI,
if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI,
&val, 4))
return -EIO;
break;
@ -877,10 +878,10 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter,
if (phy != 0)
return -EINVAL;
if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
if (adapter->hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
&stationhigh, 4))
return -EIO;
if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
if (adapter->hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
&stationlow, 4))
return -EIO;
((__le32 *)val)[1] = cpu_to_le32(stationhigh);
@ -901,7 +902,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
if (port > NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
if (netxen_nic_hw_read_wx(adapter,
if (adapter->hw_read_wx(adapter,
NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), &reg, 4))
return -EIO;
if (mode == NETXEN_NIU_PROMISC_MODE)
@ -909,6 +910,11 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
else
reg = (reg & ~0x2000UL);
if (mode == NETXEN_NIU_ALLMULTI_MODE)
reg = (reg | 0x1000UL);
else
reg = (reg & ~0x1000UL);
netxen_crb_writelit_adapter(adapter,
NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);

View File

@ -42,8 +42,11 @@
#define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c)
#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10) /* C0 EPG BUG */
#define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14)
#define CRB_HOST_CMD_ADDR_HI NETXEN_NIC_REG(0x18) /* host add:cmd ring */
#define CRB_HOST_CMD_ADDR_LO NETXEN_NIC_REG(0x1c)
#define NX_CDRP_CRB_OFFSET NETXEN_NIC_REG(0x18)
#define NX_ARG1_CRB_OFFSET NETXEN_NIC_REG(0x1c)
#define NX_ARG2_CRB_OFFSET NETXEN_NIC_REG(0x20)
#define NX_ARG3_CRB_OFFSET NETXEN_NIC_REG(0x24)
#define NX_SIGN_CRB_OFFSET NETXEN_NIC_REG(0x28)
#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20) /* 4 regs for perf */
#define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x24)
#define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x28)
@ -73,8 +76,8 @@
#define CRB_RX_LRO_MID_TIMER NETXEN_NIC_REG(0x88)
#define CRB_DMA_MAX_RCV_BUFS NETXEN_NIC_REG(0x8c)
#define CRB_MAX_DMA_ENTRIES NETXEN_NIC_REG(0x90)
#define CRB_XG_STATE NETXEN_NIC_REG(0x94) /* XG Link status */
#define CRB_AGENT_GO NETXEN_NIC_REG(0x98) /* NIC pkt gen agent */
#define CRB_XG_STATE NETXEN_NIC_REG(0x94) /* XG Link status */
#define CRB_XG_STATE_P3 NETXEN_NIC_REG(0x98) /* XG PF Link status */
#define CRB_AGENT_TX_SIZE NETXEN_NIC_REG(0x9c)
#define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xa0)
#define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xa4)
@ -97,7 +100,9 @@
#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0)
#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4)
#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
#define CRB_HOST_DUMMY_BUF NETXEN_NIC_REG(0xfc)
#define CRB_RCVPEG_STATE NETXEN_NIC_REG(0x13c)
#define CRB_CMD_PRODUCER_OFFSET_1 NETXEN_NIC_REG(0x1ac)
#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0)
#define CRB_CMD_PRODUCER_OFFSET_2 NETXEN_NIC_REG(0x1b8)
@ -147,29 +152,15 @@
#define nx_get_temp_state(x) ((x) & 0xffff)
#define nx_encode_temp(val, state) (((val) << 16) | (state))
/* CRB registers per Rcv Descriptor ring */
struct netxen_rcv_desc_crb {
u32 crb_rcv_producer_offset __attribute__ ((aligned(512)));
u32 crb_rcv_consumer_offset;
u32 crb_globalrcv_ring;
u32 crb_rcv_ring_size;
};
/*
* CRB registers used by the receive peg logic.
*/
struct netxen_recv_crb {
struct netxen_rcv_desc_crb rcv_desc_crb[NUM_RCV_DESC_RINGS];
u32 crb_rcvstatus_ring;
u32 crb_rcv_status_producer;
u32 crb_rcv_status_consumer;
u32 crb_rcvpeg_state;
u32 crb_status_ring_size;
u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
u32 crb_sts_consumer;
};
extern struct netxen_recv_crb recv_crb_registers[];
/*
* Temperature control.
*/

View File

@ -158,11 +158,10 @@ static int m88e1111_config_init(struct phy_device *phydev)
{
int err;
int temp;
int mode;
/* Enable Fiber/Copper auto selection */
temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO;
phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
temp = phy_read(phydev, MII_BMCR);
@ -198,9 +197,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
temp &= ~(MII_M1111_HWCFG_MODE_MASK);
mode = phy_read(phydev, MII_M1111_PHY_EXT_CR);
if (mode & MII_M1111_HWCFG_FIBER_COPPER_RES)
if (temp & MII_M1111_HWCFG_FIBER_COPPER_RES)
temp |= MII_M1111_HWCFG_MODE_FIBER_RGMII;
else
temp |= MII_M1111_HWCFG_MODE_COPPER_RGMII;

View File

@ -50,8 +50,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
#define DRV_VERSION "0.16"
#define DRV_RELDATE "10Nov2007"
#define DRV_VERSION "0.18"
#define DRV_RELDATE "13Jul2008"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@ -91,6 +91,14 @@
#define MISR 0x3C /* Status register */
#define MIER 0x40 /* INT enable register */
#define MSK_INT 0x0000 /* Mask off interrupts */
#define RX_FINISH 0x0001 /* RX finished */
#define RX_NO_DESC 0x0002 /* No RX descriptor available */
#define RX_FIFO_FULL 0x0004 /* RX FIFO full */
#define RX_EARLY 0x0008 /* RX early */
#define TX_FINISH 0x0010 /* TX finished */
#define TX_EARLY 0x0080 /* TX early */
#define EVENT_OVRFL 0x0100 /* Event counter overflow */
#define LINK_CHANGED 0x0200 /* PHY link changed */
#define ME_CISR 0x44 /* Event counter INT status */
#define ME_CIER 0x48 /* Event counter INT enable */
#define MR_CNT 0x50 /* Successfully received packet counter */
@ -130,6 +138,21 @@
#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
#define MCAST_MAX 4 /* Max number multicast addresses to filter */
/* Descriptor status */
#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
#define DSC_RX_OK 0x4000 /* RX was successful */
#define DSC_RX_ERR 0x0800 /* RX PHY error */
#define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */
#define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */
#define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */
#define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */
#define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */
#define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */
#define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */
#define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */
#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
/* PHY settings */
#define ICPLUS_PHY_ID 0x0243
@ -139,10 +162,10 @@ MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
#define RX_INT 0x0001
#define TX_INT 0x0010
#define RX_NO_DESC_INT 0x0002
#define INT_MASK (RX_INT | TX_INT)
/* RX and TX interrupts that we handle */
#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
#define TX_INTS (TX_FINISH)
#define INT_MASK (RX_INTS | TX_INTS)
struct r6040_descriptor {
u16 status, len; /* 0-3 */
@ -167,7 +190,7 @@ struct r6040_private {
struct r6040_descriptor *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
u16 tx_free_desc, rx_free_desc, phy_addr, phy_mode;
u16 tx_free_desc, phy_addr, phy_mode;
u16 mcr0, mcr1;
u16 switch_sig;
struct net_device *dev;
@ -183,7 +206,7 @@ static char version[] __devinitdata = KERN_INFO DRV_NAME
static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
/* Read a word data from PHY Chip */
static int phy_read(void __iomem *ioaddr, int phy_addr, int reg)
static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
{
int limit = 2048;
u16 cmd;
@ -200,7 +223,7 @@ static int phy_read(void __iomem *ioaddr, int phy_addr, int reg)
}
/* Write a word data from PHY Chip */
static void phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val)
static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val)
{
int limit = 2048;
u16 cmd;
@ -216,20 +239,20 @@ static void phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val)
}
}
static int mdio_read(struct net_device *dev, int mii_id, int reg)
static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
return (phy_read(ioaddr, lp->phy_addr, reg));
return (r6040_phy_read(ioaddr, lp->phy_addr, reg));
}
static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
phy_write(ioaddr, lp->phy_addr, reg, val);
r6040_phy_write(ioaddr, lp->phy_addr, reg, val);
}
static void r6040_free_txbufs(struct net_device *dev)
@ -283,58 +306,101 @@ static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
desc->vndescp = desc_ring;
}
/* Allocate skb buffer for rx descriptor */
static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
{
struct r6040_descriptor *descptr;
void __iomem *ioaddr = lp->base;
descptr = lp->rx_insert_ptr;
while (lp->rx_free_desc < RX_DCNT) {
descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!descptr->skb_ptr)
break;
descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
descptr->skb_ptr->data,
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
descptr->status = 0x8000;
descptr = descptr->vndescp;
lp->rx_free_desc++;
/* Trigger RX DMA */
iowrite16(lp->mcr0 | 0x0002, ioaddr);
}
lp->rx_insert_ptr = descptr;
}
static void r6040_alloc_txbufs(struct net_device *dev)
static void r6040_init_txbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
lp->tx_free_desc = TX_DCNT;
lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
}
static void r6040_alloc_rxbufs(struct net_device *dev)
static int r6040_alloc_rxbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
lp->rx_free_desc = 0;
struct r6040_descriptor *desc;
struct sk_buff *skb;
int rc;
lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
rx_buf_alloc(lp, dev);
/* Allocate skbs for the rx descriptors */
desc = lp->rx_ring;
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
printk(KERN_ERR "%s: failed to alloc skb for rx\n", dev->name);
rc = -ENOMEM;
goto err_exit;
}
desc->skb_ptr = skb;
desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
desc->skb_ptr->data,
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
desc->status = DSC_OWNER_MAC;
desc = desc->vndescp;
} while (desc != lp->rx_ring);
return 0;
err_exit:
/* Deallocate all previously allocated skbs */
r6040_free_rxbufs(dev);
return rc;
}
static void r6040_init_mac_regs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int limit = 2048;
u16 cmd;
/* Mask Off Interrupt */
iowrite16(MSK_INT, ioaddr + MIER);
/* Reset RDC MAC */
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
if (cmd & 0x1)
break;
}
/* Reset internal state machine */
iowrite16(2, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
udelay(5000);
/* MAC Bus Control Register */
iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
/* Buffer Size Register */
iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
/* Write TX ring start address */
iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
/* Write RX ring start address */
iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
/* Set interrupt waiting time and packet numbers */
iowrite16(0, ioaddr + MT_ICR);
iowrite16(0, ioaddr + MR_ICR);
/* Enable interrupts */
iowrite16(INT_MASK, ioaddr + MIER);
/* Enable TX and RX */
iowrite16(lp->mcr0 | 0x0002, ioaddr);
/* Let TX poll the descriptors
* we may got called by r6040_tx_timeout which has left
* some unsent tx buffers */
iowrite16(0x01, ioaddr + MTPR);
}
static void r6040_tx_timeout(struct net_device *dev)
@ -342,27 +408,16 @@ static void r6040_tx_timeout(struct net_device *dev)
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
printk(KERN_WARNING "%s: transmit timed out, status %4.4x, PHY status "
"%4.4x\n",
printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x "
"status %4.4x, PHY status %4.4x\n",
dev->name, ioread16(ioaddr + MIER),
mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
disable_irq(dev->irq);
napi_disable(&priv->napi);
spin_lock(&priv->lock);
/* Clear all descriptors */
r6040_free_txbufs(dev);
r6040_free_rxbufs(dev);
r6040_alloc_txbufs(dev);
r6040_alloc_rxbufs(dev);
/* Reset MAC */
iowrite16(MAC_RST, ioaddr + MCR1);
spin_unlock(&priv->lock);
enable_irq(dev->irq);
ioread16(ioaddr + MISR),
r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
dev->stats.tx_errors++;
netif_wake_queue(dev);
/* Reset MAC and re-init all registers */
r6040_init_mac_regs(dev);
}
static struct net_device_stats *r6040_get_stats(struct net_device *dev)
@ -424,6 +479,7 @@ static int r6040_close(struct net_device *dev)
del_timer_sync(&lp->timer);
spin_lock_irq(&lp->lock);
napi_disable(&lp->napi);
netif_stop_queue(dev);
r6040_down(dev);
spin_unlock_irq(&lp->lock);
@ -432,23 +488,23 @@ static int r6040_close(struct net_device *dev)
}
/* Status of PHY CHIP */
static int phy_mode_chk(struct net_device *dev)
static int r6040_phy_mode_chk(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int phy_dat;
/* PHY Link Status Check */
phy_dat = phy_read(ioaddr, lp->phy_addr, 1);
phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
if (!(phy_dat & 0x4))
phy_dat = 0x8000; /* Link Failed, full duplex */
/* PHY Chip Auto-Negotiation Status */
phy_dat = phy_read(ioaddr, lp->phy_addr, 1);
phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
if (phy_dat & 0x0020) {
/* Auto Negotiation Mode */
phy_dat = phy_read(ioaddr, lp->phy_addr, 5);
phy_dat &= phy_read(ioaddr, lp->phy_addr, 4);
phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5);
phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4);
if (phy_dat & 0x140)
/* Force full duplex */
phy_dat = 0x8000;
@ -456,7 +512,7 @@ static int phy_mode_chk(struct net_device *dev)
phy_dat = 0;
} else {
/* Force Mode */
phy_dat = phy_read(ioaddr, lp->phy_addr, 0);
phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0);
if (phy_dat & 0x100)
phy_dat = 0x8000;
else
@ -468,12 +524,12 @@ static int phy_mode_chk(struct net_device *dev)
static void r6040_set_carrier(struct mii_if_info *mii)
{
if (phy_mode_chk(mii->dev)) {
if (r6040_phy_mode_chk(mii->dev)) {
/* autoneg is off: Link is always assumed to be up */
if (!netif_carrier_ok(mii->dev))
netif_carrier_on(mii->dev);
} else
phy_mode_chk(mii->dev);
r6040_phy_mode_chk(mii->dev);
}
static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@ -494,73 +550,72 @@ static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int r6040_rx(struct net_device *dev, int limit)
{
struct r6040_private *priv = netdev_priv(dev);
int count;
void __iomem *ioaddr = priv->base;
struct r6040_descriptor *descptr = priv->rx_remove_ptr;
struct sk_buff *skb_ptr, *new_skb;
int count = 0;
u16 err;
for (count = 0; count < limit; ++count) {
struct r6040_descriptor *descptr = priv->rx_remove_ptr;
struct sk_buff *skb_ptr;
/* Disable RX interrupt */
iowrite16(ioread16(ioaddr + MIER) & (~RX_INT), ioaddr + MIER);
descptr = priv->rx_remove_ptr;
/* Check for errors */
err = ioread16(ioaddr + MLSR);
if (err & 0x0400)
dev->stats.rx_errors++;
/* RX FIFO over-run */
if (err & 0x8000)
dev->stats.rx_fifo_errors++;
/* RX descriptor unavailable */
if (err & 0x0080)
dev->stats.rx_frame_errors++;
/* Received packet with length over buffer lenght */
if (err & 0x0020)
dev->stats.rx_over_errors++;
/* Received packet with too long or short */
if (err & (0x0010 | 0x0008))
dev->stats.rx_length_errors++;
/* Received packet with CRC errors */
if (err & 0x0004) {
spin_lock(&priv->lock);
dev->stats.rx_crc_errors++;
spin_unlock(&priv->lock);
}
while (priv->rx_free_desc) {
/* No RX packet */
if (descptr->status & 0x8000)
break;
skb_ptr = descptr->skb_ptr;
if (!skb_ptr) {
printk(KERN_ERR "%s: Inconsistent RX"
"descriptor chain\n",
dev->name);
break;
/* Limit not reached and the descriptor belongs to the CPU */
while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
/* Read the descriptor status */
err = descptr->status;
/* Global error status set */
if (err & DSC_RX_ERR) {
/* RX dribble */
if (err & DSC_RX_ERR_DRI)
dev->stats.rx_frame_errors++;
/* Buffer lenght exceeded */
if (err & DSC_RX_ERR_BUF)
dev->stats.rx_length_errors++;
/* Packet too long */
if (err & DSC_RX_ERR_LONG)
dev->stats.rx_length_errors++;
/* Packet < 64 bytes */
if (err & DSC_RX_ERR_RUNT)
dev->stats.rx_length_errors++;
/* CRC error */
if (err & DSC_RX_ERR_CRC) {
spin_lock(&priv->lock);
dev->stats.rx_crc_errors++;
spin_unlock(&priv->lock);
}
descptr->skb_ptr = NULL;
skb_ptr->dev = priv->dev;
/* Do not count the CRC */
skb_put(skb_ptr, descptr->len - 4);
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
/* Send to upper layer */
netif_receive_skb(skb_ptr);
dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += descptr->len;
/* To next descriptor */
descptr = descptr->vndescp;
priv->rx_free_desc--;
goto next_descr;
}
priv->rx_remove_ptr = descptr;
/* Packet successfully received */
new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!new_skb) {
dev->stats.rx_dropped++;
goto next_descr;
}
skb_ptr = descptr->skb_ptr;
skb_ptr->dev = priv->dev;
/* Do not count the CRC */
skb_put(skb_ptr, descptr->len - 4);
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
/* Send to upper layer */
netif_receive_skb(skb_ptr);
dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += descptr->len - 4;
/* put new skb into descriptor */
descptr->skb_ptr = new_skb;
descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
descptr->skb_ptr->data,
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
next_descr:
/* put the descriptor back to the MAC */
descptr->status = DSC_OWNER_MAC;
descptr = descptr->vndescp;
count++;
}
/* Allocate new RX buffer */
if (priv->rx_free_desc < RX_DCNT)
rx_buf_alloc(priv, priv->dev);
priv->rx_remove_ptr = descptr;
return count;
}
@ -584,7 +639,7 @@ static void r6040_tx(struct net_device *dev)
if (err & (0x2000 | 0x4000))
dev->stats.tx_carrier_errors++;
if (descptr->status & 0x8000)
if (descptr->status & DSC_OWNER_MAC)
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
@ -616,7 +671,7 @@ static int r6040_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
netif_rx_complete(dev, napi);
/* Enable RX interrupt */
iowrite16(ioread16(ioaddr + MIER) | RX_INT, ioaddr + MIER);
iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
}
return work_done;
}
@ -638,13 +693,22 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
return IRQ_NONE;
/* RX interrupt request */
if (status & 0x01) {
if (status & RX_INTS) {
if (status & RX_NO_DESC) {
/* RX descriptor unavailable */
dev->stats.rx_dropped++;
dev->stats.rx_missed_errors++;
}
if (status & RX_FIFO_FULL)
dev->stats.rx_fifo_errors++;
/* Mask off RX interrupt */
iowrite16(ioread16(ioaddr + MIER) & ~RX_INTS, ioaddr + MIER);
netif_rx_schedule(dev, &lp->napi);
iowrite16(TX_INT, ioaddr + MIER);
}
/* TX interrupt request */
if (status & 0x10)
if (status & TX_INTS)
r6040_tx(dev);
return IRQ_HANDLED;
@ -660,52 +724,48 @@ static void r6040_poll_controller(struct net_device *dev)
#endif
/* Init RDC MAC */
static void r6040_up(struct net_device *dev)
static int r6040_up(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int ret;
/* Initialise and alloc RX/TX buffers */
r6040_alloc_txbufs(dev);
r6040_alloc_rxbufs(dev);
r6040_init_txbufs(dev);
ret = r6040_alloc_rxbufs(dev);
if (ret)
return ret;
/* Buffer Size Register */
iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
/* Read the PHY ID */
lp->switch_sig = phy_read(ioaddr, 0, 2);
lp->switch_sig = r6040_phy_read(ioaddr, 0, 2);
if (lp->switch_sig == ICPLUS_PHY_ID) {
phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
lp->phy_mode = 0x8000;
} else {
/* PHY Mode Check */
phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
if (PHY_MODE == 0x3100)
lp->phy_mode = phy_mode_chk(dev);
lp->phy_mode = r6040_phy_mode_chk(dev);
else
lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
}
/* MAC Bus Control Register */
iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
/* MAC TX/RX Enable */
/* Set duplex mode */
lp->mcr0 |= lp->phy_mode;
iowrite16(lp->mcr0, ioaddr);
/* set interrupt waiting time and packet numbers */
iowrite16(0x0F06, ioaddr + MT_ICR);
iowrite16(0x0F06, ioaddr + MR_ICR);
/* improve performance (by RDC guys) */
phy_write(ioaddr, 30, 17, (phy_read(ioaddr, 30, 17) | 0x4000));
phy_write(ioaddr, 30, 17, ~((~phy_read(ioaddr, 30, 17)) | 0x2000));
phy_write(ioaddr, 0, 19, 0x0000);
phy_write(ioaddr, 0, 30, 0x01F0);
r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
r6040_phy_write(ioaddr, 0, 19, 0x0000);
r6040_phy_write(ioaddr, 0, 30, 0x01F0);
/* Interrupt Mask Register */
iowrite16(INT_MASK, ioaddr + MIER);
/* Initialize all MAC registers */
r6040_init_mac_regs(dev);
return 0;
}
/*
@ -721,7 +781,7 @@ static void r6040_timer(unsigned long data)
/* Polling PHY Chip Status */
if (PHY_MODE == 0x3100)
phy_mode = phy_mode_chk(dev);
phy_mode = r6040_phy_mode_chk(dev);
else
phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
@ -784,7 +844,14 @@ static int r6040_open(struct net_device *dev)
return -ENOMEM;
}
r6040_up(dev);
ret = r6040_up(dev);
if (ret) {
pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
lp->tx_ring_dma);
pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
lp->rx_ring_dma);
return ret;
}
napi_enable(&lp->napi);
netif_start_queue(dev);
@ -830,7 +897,7 @@ static int r6040_start_xmit(struct sk_buff *skb, struct net_device *dev)
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
descptr->status = 0x8000;
descptr->status = DSC_OWNER_MAC;
/* Trigger the MAC to check the TX descriptor */
iowrite16(0x01, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
@ -987,24 +1054,27 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err)
return err;
goto err_out;
/* this should always be supported */
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses"
"not supported by the card\n");
return -ENODEV;
goto err_out;
}
if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses"
"not supported by the card\n");
return -ENODEV;
goto err_out;
}
/* IO Size check */
if (pci_resource_len(pdev, 0) < io_size) {
printk(KERN_ERR "Insufficient PCI resources, aborting\n");
return -EIO;
printk(KERN_ERR DRV_NAME "Insufficient PCI resources, aborting\n");
err = -EIO;
goto err_out;
}
pioaddr = pci_resource_start(pdev, 0); /* IO map base address */
@ -1012,24 +1082,26 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
printk(KERN_ERR "Failed to allocate etherdev\n");
return -ENOMEM;
printk(KERN_ERR DRV_NAME "Failed to allocate etherdev\n");
err = -ENOMEM;
goto err_out;
}
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
lp->pdev = pdev;
if (pci_request_regions(pdev, DRV_NAME)) {
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
goto err_out_free_dev;
}
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
printk(KERN_ERR "ioremap failed for device %s\n",
pci_name(pdev));
return -EIO;
err = -EIO;
goto err_out_free_res;
}
/* Init system & device */
@ -1049,6 +1121,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Link new device into r6040_root_dev */
lp->pdev = pdev;
lp->dev = dev;
/* Init RDC private data */
lp->mcr0 = 0x1002;
@ -1070,8 +1143,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
#endif
netif_napi_add(dev, &lp->napi, r6040_poll, 64);
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = mdio_read;
lp->mii_if.mdio_write = mdio_write;
lp->mii_if.mdio_read = r6040_mdio_read;
lp->mii_if.mdio_write = r6040_mdio_write;
lp->mii_if.phy_id = lp->phy_addr;
lp->mii_if.phy_id_mask = 0x1f;
lp->mii_if.reg_num_mask = 0x1f;
@ -1080,17 +1153,17 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
printk(KERN_ERR DRV_NAME ": Failed to register net device\n");
goto err_out_res;
goto err_out_unmap;
}
return 0;
err_out_res:
err_out_unmap:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_disable:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
err_out_free_dev:
free_netdev(dev);
err_out:
return err;
}

View File

@ -1418,8 +1418,10 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl_hw_phy_config(dev);
dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(0x82, 0x01);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(0x82, 0x01);
}
pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
@ -3032,13 +3034,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
tmp = rtl8169_rx_config | rx_mode |
(RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
(tp->mac_version == RTL_GIGA_MAC_VER_12) ||
(tp->mac_version == RTL_GIGA_MAC_VER_13) ||
(tp->mac_version == RTL_GIGA_MAC_VER_14) ||
(tp->mac_version == RTL_GIGA_MAC_VER_15) ||
(tp->mac_version == RTL_GIGA_MAC_VER_16) ||
(tp->mac_version == RTL_GIGA_MAC_VER_17)) {
if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
u32 data = mc_filter[0];
mc_filter[0] = swab32(mc_filter[1]);

View File

@ -19,6 +19,7 @@
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include "net_driver.h"
#include "gmii.h"
#include "ethtool.h"
@ -832,7 +833,23 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
if (rss_cpus == 0) {
cpumask_t core_mask;
int cpu;
cpus_clear(core_mask);
efx->rss_queues = 0;
for_each_online_cpu(cpu) {
if (!cpu_isset(cpu, core_mask)) {
++efx->rss_queues;
cpus_or(core_mask, core_mask,
topology_core_siblings(cpu));
}
}
} else {
efx->rss_queues = rss_cpus;
}
efx->rss_queues = min(efx->rss_queues, max_channel + 1);
efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
@ -1762,7 +1779,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
efx->reset_pending = method;
queue_work(efx->workqueue, &efx->reset_work);
queue_work(efx->reset_workqueue, &efx->reset_work);
}
/**************************************************************************
@ -1907,14 +1924,28 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
goto fail1;
}
efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!efx->reset_workqueue) {
rc = -ENOMEM;
goto fail2;
}
return 0;
fail2:
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
fail1:
return rc;
}
static void efx_fini_struct(struct efx_nic *efx)
{
if (efx->reset_workqueue) {
destroy_workqueue(efx->reset_workqueue);
efx->reset_workqueue = NULL;
}
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@ -1977,7 +2008,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
* scheduled from this point because efx_stop_all() has been
* called, we are no longer registered with driverlink, and
* the net_device's have been removed. */
flush_workqueue(efx->workqueue);
flush_workqueue(efx->reset_workqueue);
efx_pci_remove_main(efx);
@ -2098,7 +2129,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
* scheduled since efx_stop_all() has been called, and we
* have not and never have been registered with either
* the rtnetlink or driverlink layers. */
cancel_work_sync(&efx->reset_work);
flush_workqueue(efx->reset_workqueue);
/* Retry if a recoverably reset event has been scheduled */
if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&

View File

@ -223,13 +223,8 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
.getsda = falcon_getsda,
.getscl = falcon_getscl,
.udelay = 5,
/*
* This is the number of system clock ticks after which
* i2c-algo-bit gives up waiting for SCL to become high.
* It must be at least 2 since the first tick can happen
* immediately after it starts waiting.
*/
.timeout = 2,
/* Wait up to 50 ms for slave to let us pull SCL high */
.timeout = DIV_ROUND_UP(HZ, 20),
};
/**************************************************************************
@ -2479,12 +2474,11 @@ int falcon_probe_nic(struct efx_nic *efx)
/* Initialise I2C adapter */
efx->i2c_adap.owner = THIS_MODULE;
efx->i2c_adap.class = I2C_CLASS_HWMON;
nic_data->i2c_data = falcon_i2c_bit_operations;
nic_data->i2c_data.data = efx;
efx->i2c_adap.algo_data = &nic_data->i2c_data;
efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
strcpy(efx->i2c_adap.name, "SFC4000 GPIO");
strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
rc = i2c_bit_add_bus(&efx->i2c_adap);
if (rc)
goto fail5;

View File

@ -616,7 +616,9 @@ union efx_multicast_hash {
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
* @workqueue: Workqueue for resets, port reconfigures and the HW monitor
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @reset_workqueue: Workqueue for resets. Work item will acquire RTNL.
* @reset_work: Scheduled reset workitem
* @monitor_work: Hardware monitor workitem
* @membase_phys: Memory BAR value as physical address
@ -684,6 +686,7 @@ struct efx_nic {
const struct efx_nic_type *type;
int legacy_irq;
struct workqueue_struct *workqueue;
struct workqueue_struct *reset_workqueue;
struct work_struct reset_work;
struct delayed_work monitor_work;
resource_size_t membase_phys;

View File

@ -5514,22 +5514,6 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
netif_wake_queue(dev); /* Unlock the TX ring */
break;
case DE4X5_SET_PROM: /* Set Promiscuous Mode */
if (!capable(CAP_NET_ADMIN)) return -EPERM;
omr = inl(DE4X5_OMR);
omr |= OMR_PR;
outl(omr, DE4X5_OMR);
dev->flags |= IFF_PROMISC;
break;
case DE4X5_CLR_PROM: /* Clear Promiscuous Mode */
if (!capable(CAP_NET_ADMIN)) return -EPERM;
omr = inl(DE4X5_OMR);
omr &= ~OMR_PR;
outl(omr, DE4X5_OMR);
dev->flags &= ~IFF_PROMISC;
break;
case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
if (!capable(CAP_NET_ADMIN)) return -EPERM;
printk("%s: Boo!\n", dev->name);

View File

@ -1004,8 +1004,7 @@ struct de4x5_ioctl {
*/
#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
#define DE4X5_SET_PROM 0x03 /* Set Promiscuous Mode */
#define DE4X5_CLR_PROM 0x04 /* Clear Promiscuous Mode */
/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */
#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
#define DE4X5_SET_MCA 0x07 /* Set a multicast address */

View File

@ -900,7 +900,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
return -EINVAL;
rtnl_lock();
ret = update_filter(&tun->txflt, (void *) __user arg);
ret = update_filter(&tun->txflt, (void __user *)arg);
rtnl_unlock();
return ret;

View File

@ -50,10 +50,18 @@ static int is_activesync(struct usb_interface_descriptor *desc)
&& desc->bInterfaceProtocol == 1;
}
static int is_wireless_rndis(struct usb_interface_descriptor *desc)
{
return desc->bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER
&& desc->bInterfaceSubClass == 1
&& desc->bInterfaceProtocol == 3;
}
#else
#define is_rndis(desc) 0
#define is_activesync(desc) 0
#define is_wireless_rndis(desc) 0
#endif
@ -110,7 +118,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* of cdc-acm, it'll fail RNDIS requests cleanly.
*/
rndis = is_rndis(&intf->cur_altsetting->desc)
|| is_activesync(&intf->cur_altsetting->desc);
|| is_activesync(&intf->cur_altsetting->desc)
|| is_wireless_rndis(&intf->cur_altsetting->desc);
memset(info, 0, sizeof *info);
info->control = intf;

View File

@ -576,6 +576,10 @@ static const struct usb_device_id products [] = {
/* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
.driver_info = (unsigned long) &rndis_info,
}, {
/* RNDIS for tethering */
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
.driver_info = (unsigned long) &rndis_info,
},
{ }, // END
};

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,7 @@
*/
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
@ -22,15 +23,13 @@
* Debug Facility Stuff
*/
DEFINE_PER_CPU(char[256], ctcm_dbf_txt_buf);
struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
[CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, 5, NULL},
[CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, 3, NULL},
[CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, 3, NULL},
[CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 64, 5, NULL},
[CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 64, 3, NULL},
[CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 64, 3, NULL},
[CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL},
[CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL},
[CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL},
[CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL},
[CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL},
[CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL},
};
void ctcm_unregister_dbf_views(void)
@ -65,3 +64,17 @@ int ctcm_register_dbf_views(void)
return 0;
}
void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
{
char dbf_txt_buf[64];
va_list args;
if (level > (ctcm_dbf[dbf_nix].id)->level)
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf);
}

View File

@ -20,16 +20,17 @@
#else
#define do_debug 0
#endif
#ifdef DEBUGCCW
#define do_debug_ccw 1
#define DEBUGDATA 1
#else
#define do_debug_ccw 0
#endif
#ifdef DEBUGDATA
#define do_debug_data 1
#else
#define do_debug_data 0
#endif
#ifdef DEBUGCCW
#define do_debug_ccw 1
#else
#define do_debug_ccw 0
#endif
/* define dbf debug levels similar to kernel msg levels */
#define CTC_DBF_ALWAYS 0 /* always print this */
@ -42,8 +43,6 @@
#define CTC_DBF_INFO 5 /* informational */
#define CTC_DBF_DEBUG 6 /* debug-level messages */
DECLARE_PER_CPU(char[256], ctcm_dbf_txt_buf);
enum ctcm_dbf_names {
CTCM_DBF_SETUP,
CTCM_DBF_ERROR,
@ -67,6 +66,7 @@ extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
int ctcm_register_dbf_views(void);
void ctcm_unregister_dbf_views(void);
void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...);
static inline const char *strtail(const char *s, int n)
{
@ -74,12 +74,6 @@ static inline const char *strtail(const char *s, int n)
return (l > n) ? s + (l - n) : s;
}
/* sort out levels early to avoid unnecessary sprintfs */
static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level)
{
return (dbf_grp->level >= level);
}
#define CTCM_FUNTAIL strtail((char *)__func__, 16)
#define CTCM_DBF_TEXT(name, level, text) \
@ -94,16 +88,7 @@ static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level)
} while (0)
#define CTCM_DBF_TEXT_(name, level, text...) \
do { \
if (ctcm_dbf_passes(ctcm_dbf[CTCM_DBF_##name].id, level)) { \
char *ctcm_dbf_txt_buf = \
get_cpu_var(ctcm_dbf_txt_buf); \
sprintf(ctcm_dbf_txt_buf, text); \
debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, \
level, ctcm_dbf_txt_buf); \
put_cpu_var(ctcm_dbf_txt_buf); \
} \
} while (0)
ctcm_dbf_longtext(CTCM_DBF_##name, level, text)
/*
* cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
@ -112,13 +97,13 @@ static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level)
*/
#define CTCM_DBF_DEV_NAME(cat, dev, text) \
do { \
CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) : %s", \
CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) :- %s", \
CTCM_FUNTAIL, dev->name, text); \
} while (0)
#define MPC_DBF_DEV_NAME(cat, dev, text) \
do { \
CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) : %s", \
CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) := %s", \
CTCM_FUNTAIL, dev->name, text); \
} while (0)
@ -137,13 +122,13 @@ static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level)
*/
#define CTCM_DBF_DEV(cat, dev, text) \
do { \
CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) : %s", \
CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) :-: %s", \
CTCM_FUNTAIL, dev, text); \
} while (0)
#define MPC_DBF_DEV(cat, dev, text) \
do { \
CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) : %s", \
CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) :=: %s", \
CTCM_FUNTAIL, dev, text); \
} while (0)

View File

@ -190,7 +190,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
{
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"ccw error %s (%s): %04x\n", ch->id, msg, rc);
"%s(%s): %s: %04x\n",
CTCM_FUNTAIL, ch->id, msg, rc);
switch (rc) {
case -EBUSY:
ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg);
@ -212,7 +213,7 @@ void ctcm_purge_skb_queue(struct sk_buff_head *q)
{
struct sk_buff *skb;
CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
while ((skb = skb_dequeue(q))) {
atomic_dec(&skb->users);
@ -251,6 +252,8 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
unsigned long duration;
struct timespec done_stamp = current_kernel_time(); /* xtime */
CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
duration =
(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
(done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
@ -258,8 +261,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
ch->prof.tx_time = duration;
if (ch->irb->scsw.cmd.count != 0)
ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
dev->name, ch->irb->scsw.cmd.count);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): TX not complete, remaining %d bytes",
CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
fsm_deltimer(&ch->timer);
while ((skb = skb_dequeue(&ch->io_queue))) {
priv->stats.tx_packets++;
@ -334,7 +338,8 @@ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->priv;
CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CTC_STATE_TXIDLE);
fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
@ -361,15 +366,17 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
fsm_deltimer(&ch->timer);
if (len < 8) {
ctcm_pr_debug("%s: got packet with length %d < 8\n",
dev->name, len);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s): got packet with length %d < 8\n",
CTCM_FUNTAIL, dev->name, len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
goto again;
}
if (len > ch->max_bufsize) {
ctcm_pr_debug("%s: got packet with length %d > %d\n",
dev->name, len, ch->max_bufsize);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s): got packet with length %d > %d\n",
CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
goto again;
@ -388,8 +395,9 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
break;
}
if ((len < block_len) || (len > check_len)) {
ctcm_pr_debug("%s: got block length %d != rx length %d\n",
dev->name, block_len, len);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s): got block length %d != rx length %d\n",
CTCM_FUNTAIL, dev->name, block_len, len);
if (do_debug)
ctcmpc_dump_skb(skb, 0);
@ -425,17 +433,23 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
*/
static void chx_firstio(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
int rc;
struct channel *ch = arg;
int fsmstate = fsm_getstate(fi);
CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s) : %02x",
CTCM_FUNTAIL, ch->id, fsmstate);
if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
ctcm_pr_debug("%s: remote side issued READ?, init.\n", ch->id);
ch->sense_rc = 0; /* reset unit check report control */
if (fsmstate == CTC_STATE_TXIDLE)
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): remote side issued READ?, init.\n",
CTCM_FUNTAIL, ch->id);
fsm_deltimer(&ch->timer);
if (ctcm_checkalloc_buffer(ch))
return;
if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
if ((fsmstate == CTC_STATE_SETUPWAIT) &&
(ch->protocol == CTCM_PROTO_OS390)) {
/* OS/390 resp. z/OS */
if (CHANNEL_DIRECTION(ch->flags) == READ) {
@ -451,7 +465,6 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
}
return;
}
/*
* Don't setup a timer for receiving the initial RX frame
* if in compatibility mode, since VM TCP delays the initial
@ -505,11 +518,10 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg)
__u16 buflen;
int rc;
CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
fsm_deltimer(&ch->timer);
buflen = *((__u16 *)ch->trans_skb->data);
if (do_debug)
ctcm_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
__func__, dev->name, buflen);
if (buflen >= CTCM_INITIAL_BLOCKLEN) {
if (ctcm_checkalloc_buffer(ch))
@ -524,9 +536,9 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg)
} else
fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
} else {
if (do_debug)
ctcm_pr_debug("%s: Initial RX count %d not %d\n",
dev->name, buflen, CTCM_INITIAL_BLOCKLEN);
CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
__func__, dev->name,
buflen, CTCM_INITIAL_BLOCKLEN);
chx_firstio(fi, event, arg);
}
}
@ -548,14 +560,12 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
fsm_deltimer(&ch->timer);
if (IS_MPC(ch)) {
timeout = 1500;
if (do_debug)
ctcm_pr_debug("ctcm enter: %s(): cp=%i ch=0x%p id=%s\n",
__FUNCTION__, smp_processor_id(), ch, ch->id);
CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
}
fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
fsm_newstate(fi, CTC_STATE_SETUPWAIT);
if (do_debug_ccw && IS_MPC(ch))
ctcmpc_dumpit((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
@ -583,24 +593,12 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
*/
static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
int rc;
struct net_device *dev;
struct channel *ch = arg;
unsigned long saveflags;
int rc;
CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
if (ch == NULL) {
ctcm_pr_warn("chx_start ch=NULL\n");
return;
}
if (ch->netdev == NULL) {
ctcm_pr_warn("chx_start dev=NULL, id=%s\n", ch->id);
return;
}
dev = ch->netdev;
if (do_debug)
ctcm_pr_debug("%s: %s channel start\n", dev->name,
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
if (ch->trans_skb != NULL) {
@ -618,11 +616,12 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
ch->ccw[1].count = 0;
}
if (ctcm_checkalloc_buffer(ch)) {
ctcm_pr_notice("%s: %s trans_skb allocation delayed "
"until first transfer\n", dev->name,
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): %s trans_skb alloc delayed "
"until first transfer",
CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
}
ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[0].count = 0;
@ -661,7 +660,6 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
int rc;
int oldstate;
CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (IS_MPC(ch))
fsm_deltimer(&ch->sweep_timer);
@ -684,7 +682,7 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
fsm_deltimer(&ch->timer);
if (event != CTC_EVENT_STOP) {
fsm_newstate(fi, oldstate);
ctcm_ccw_check_rc(ch, rc, (char *)__FUNCTION__);
ctcm_ccw_check_rc(ch, rc, (char *)__func__);
}
}
}
@ -703,7 +701,9 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->priv;
CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
"%s(%s): %s[%d]\n",
CTCM_FUNTAIL, dev->name, ch->id, state);
fsm_deltimer(&ch->timer);
if (IS_MPC(ch))
@ -743,7 +743,6 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
*/
static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
{
CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
}
@ -771,7 +770,6 @@ static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
*/
static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
{
CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
}
@ -809,8 +807,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
}
CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
"%s : %s error during %s channel setup state=%s\n",
dev->name, ctc_ch_event_names[event],
"%s(%s) : %s error during %s channel setup state=%s\n",
CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
fsm_getstate_str(fi));
@ -838,10 +836,12 @@ static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
int oldstate;
int rc;
CTCM_DBF_TEXT(TRACE, CTC_DBF_NOTICE, __FUNCTION__);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s: %s[%d] of %s\n",
CTCM_FUNTAIL, ch->id, event, dev->name);
fsm_deltimer(&ch->timer);
ctcm_pr_debug("%s: %s channel restart\n", dev->name,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
oldstate = fsm_getstate(fi);
fsm_newstate(fi, CTC_STATE_STARTWAIT);
@ -876,13 +876,10 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->priv;
CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__);
if (event == CTC_EVENT_TIMER) {
if (!IS_MPCDEV(dev))
/* TODO : check if MPC deletes timer somewhere */
fsm_deltimer(&ch->timer);
ctcm_pr_debug("%s: Timeout during RX init handshake\n",
dev->name);
if (ch->retry++ < 3)
ctcm_chx_restart(fi, event, arg);
else {
@ -907,9 +904,10 @@ static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->priv;
CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): RX %s busy, init. fail",
CTCM_FUNTAIL, dev->name, ch->id);
fsm_newstate(fi, CTC_STATE_RXERR);
ctcm_pr_warn("%s: RX busy. Initialization failed\n", dev->name);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
}
@ -927,11 +925,10 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->priv;
CTCM_DBF_DEV_NAME(TRACE, dev, "Got remote disconnect, re-initializing");
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s: %s: remote disconnect - re-init ...",
CTCM_FUNTAIL, dev->name);
fsm_deltimer(&ch->timer);
if (do_debug)
ctcm_pr_debug("%s: Got remote disconnect, "
"re-initializing ...\n", dev->name);
/*
* Notify device statemachine
*/
@ -961,8 +958,6 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
if (event == CTC_EVENT_TIMER) {
fsm_deltimer(&ch->timer);
CTCM_DBF_DEV_NAME(ERROR, dev,
"Timeout during TX init handshake");
if (ch->retry++ < 3)
ctcm_chx_restart(fi, event, arg);
else {
@ -971,9 +966,8 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
}
} else {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s : %s error during channel setup state=%s",
dev->name, ctc_ch_event_names[event],
fsm_getstate_str(fi));
"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
ctc_ch_event_names[event], fsm_getstate_str(fi));
ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name);
}
@ -993,15 +987,15 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
struct ctcm_priv *priv = dev->priv;
struct sk_buff *skb;
if (do_debug)
ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
__FUNCTION__, smp_processor_id(), ch, ch->id);
CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
fsm_deltimer(&ch->timer);
if (ch->retry++ > 3) {
struct mpc_group *gptr = priv->mpcg;
ctcm_pr_debug("%s: TX retry failed, restarting channel\n",
dev->name);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
"%s: %s: retries exceeded",
CTCM_FUNTAIL, ch->id);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
/* call restart if not MPC or if MPC and mpcg fsm is ready.
use gptr as mpc indicator */
@ -1010,7 +1004,9 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
goto done;
}
ctcm_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s : %s: retry %d",
CTCM_FUNTAIL, ch->id, ch->retry);
skb = skb_peek(&ch->io_queue);
if (skb) {
int rc = 0;
@ -1018,8 +1014,9 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
clear_normalized_cda(&ch->ccw[4]);
ch->ccw[4].count = skb->len;
if (set_normalized_cda(&ch->ccw[4], skb->data)) {
ctcm_pr_debug("%s: IDAL alloc failed, chan restart\n",
dev->name);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
"%s: %s: IDAL alloc failed",
CTCM_FUNTAIL, ch->id);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
ctcm_chx_restart(fi, event, arg);
goto done;
@ -1061,22 +1058,21 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->priv;
int rd = CHANNEL_DIRECTION(ch->flags);
CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctcm_pr_warn("%s %s : unrecoverable channel error\n",
CTC_DRIVER_NAME, dev->name);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s: %s: %s unrecoverable channel error",
CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX");
if (IS_MPC(ch)) {
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
}
if (CHANNEL_DIRECTION(ch->flags) == READ) {
ctcm_pr_debug("%s: RX I/O error\n", dev->name);
if (rd == READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
ctcm_pr_debug("%s: TX I/O error\n", dev->name);
fsm_newstate(fi, CTC_STATE_TXERR);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
}
@ -1216,27 +1212,27 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
struct sk_buff *skb;
int first = 1;
int i;
struct timespec done_stamp;
__u32 data_space;
unsigned long duration;
struct sk_buff *peekskb;
int rc;
struct th_header *header;
struct pdu *p_header;
struct timespec done_stamp = current_kernel_time(); /* xtime */
if (do_debug)
ctcm_pr_debug("%s cp:%i enter: %s()\n",
dev->name, smp_processor_id(), __FUNCTION__);
CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
__func__, dev->name, smp_processor_id());
done_stamp = current_kernel_time(); /* xtime */
duration = (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000
+ (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
duration =
(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
(done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
if (duration > ch->prof.tx_time)
ch->prof.tx_time = duration;
if (ch->irb->scsw.cmd.count != 0)
ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
dev->name, ch->irb->scsw.cmd.count);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"%s(%s): TX not complete, remaining %d bytes",
CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
fsm_deltimer(&ch->timer);
while ((skb = skb_dequeue(&ch->io_queue))) {
priv->stats.tx_packets++;
@ -1250,7 +1246,6 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
}
spin_lock(&ch->collect_lock);
clear_normalized_cda(&ch->ccw[4]);
if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
spin_unlock(&ch->collect_lock);
fsm_newstate(fi, CTC_STATE_TXIDLE);
@ -1269,17 +1264,13 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
i = 0;
if (do_debug_data)
ctcm_pr_debug("ctcmpc: %s() building "
"trans_skb from collect_q \n", __FUNCTION__);
p_header = NULL;
data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
if (do_debug_data)
ctcm_pr_debug("ctcmpc: %s() building trans_skb from collect_q"
" data_space:%04x\n", __FUNCTION__, data_space);
p_header = NULL;
CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
" data_space:%04x\n",
__func__, data_space);
while ((skb = skb_dequeue(&ch->collect_queue))) {
memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
p_header = (struct pdu *)
@ -1290,15 +1281,12 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
else
p_header->pdu_flag |= 0x20;
if (do_debug_data) {
ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n",
__FUNCTION__, ch->trans_skb->len);
ctcm_pr_debug("ctcmpc: %s() pdu header and data"
" for up to 32 bytes sent to vtam\n",
__FUNCTION__);
ctcmpc_dumpit((char *)p_header,
min_t(int, skb->len, 32));
}
CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
__func__, ch->trans_skb->len);
CTCM_PR_DBGDATA("%s: pdu header and data for up"
" to 32 bytes sent to vtam\n", __func__);
CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
ch->collect_len -= skb->len;
data_space -= skb->len;
priv->stats.tx_packets++;
@ -1314,46 +1302,38 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
if (p_header)
p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
header = kzalloc(TH_HEADER_LENGTH, gfp_type());
if (!header) {
printk(KERN_WARNING "ctcmpc: OUT OF MEMORY IN %s()"
": Data Lost \n", __FUNCTION__);
spin_unlock(&ch->collect_lock);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto done;
goto done;
}
header->th_ch_flag = TH_HAS_PDU; /* Normal data */
ch->th_seq_num++;
header->th_seq_num = ch->th_seq_num;
if (do_debug_data)
ctcm_pr_debug("%s: ToVTAM_th_seq= %08x\n" ,
__FUNCTION__, ch->th_seq_num);
CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
__func__, ch->th_seq_num);
memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
TH_HEADER_LENGTH); /* put the TH on the packet */
kfree(header);
if (do_debug_data) {
ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n",
__FUNCTION__, ch->trans_skb->len);
ctcm_pr_debug("ctcmpc: %s() up-to-50 bytes of trans_skb "
"data to vtam from collect_q\n", __FUNCTION__);
ctcmpc_dumpit((char *)ch->trans_skb->data,
CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
__func__, ch->trans_skb->len);
CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
"data to vtam from collect_q\n", __func__);
CTCM_D3_DUMP((char *)ch->trans_skb->data,
min_t(int, ch->trans_skb->len, 50));
}
spin_unlock(&ch->collect_lock);
clear_normalized_cda(&ch->ccw[1]);
if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
dev_kfree_skb_any(ch->trans_skb);
ch->trans_skb = NULL;
printk(KERN_WARNING
"ctcmpc: %s()CCW failure - data lost\n",
__FUNCTION__);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
"%s: %s: IDAL alloc failed",
CTCM_FUNTAIL, ch->id);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
return;
}
@ -1373,7 +1353,6 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
}
done:
ctcm_clear_busy(dev);
ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
return;
}
@ -1393,26 +1372,25 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
struct mpc_group *grp = priv->mpcg;
struct sk_buff *skb = ch->trans_skb;
struct sk_buff *new_skb;
unsigned long saveflags = 0; /* avoids compiler warning */
unsigned long saveflags = 0; /* avoids compiler warning */
int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
if (do_debug_data) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n",
dev->name, smp_processor_id(), ch->id);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx: maxbuf: %04x "
"len: %04x\n", ch->max_bufsize, len);
}
CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
CTCM_FUNTAIL, dev->name, smp_processor_id(),
ch->id, ch->max_bufsize, len);
fsm_deltimer(&ch->timer);
if (skb == NULL) {
ctcm_pr_debug("ctcmpc exit: %s() TRANS_SKB = NULL \n",
__FUNCTION__);
goto again;
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): TRANS_SKB = NULL",
CTCM_FUNTAIL, dev->name);
goto again;
}
if (len < TH_HEADER_LENGTH) {
ctcm_pr_info("%s: got packet with invalid length %d\n",
dev->name, len);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): packet length %d to short",
CTCM_FUNTAIL, dev->name, len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
} else {
@ -1422,11 +1400,9 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
if (new_skb == NULL) {
printk(KERN_INFO "ctcmpc:%s() NEW_SKB = NULL\n",
__FUNCTION__);
printk(KERN_WARNING "ctcmpc: %s() MEMORY ALLOC FAILED"
" - DATA LOST - MPC FAILED\n",
__FUNCTION__);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%d): skb allocation failed",
CTCM_FUNTAIL, dev->name);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto again;
}
@ -1479,9 +1455,8 @@ again:
break;
}
if (do_debug)
ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n",
dev->name, __FUNCTION__, ch, ch->id);
CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
__func__, dev->name, ch, ch->id);
}
@ -1497,15 +1472,16 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->priv;
struct mpc_group *gptr = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
__func__, ch->id, ch);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
"%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
fsm_getstate(gptr->fsm), ch->protocol);
if (do_debug) {
struct mpc_group *gptr = priv->mpcg;
ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n",
__FUNCTION__, ch, ch->id);
ctcm_pr_debug("%s() %s chstate:%i grpstate:%i chprotocol:%i\n",
__FUNCTION__, ch->id, fsm_getstate(fi),
fsm_getstate(gptr->fsm), ch->protocol);
}
if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
@ -1531,9 +1507,8 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
done:
if (do_debug)
ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
__FUNCTION__, ch, ch->id);
CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
__func__, ch->id, ch);
return;
}
@ -1556,12 +1531,9 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
unsigned long saveflags = 0; /* avoids compiler warning */
fsm_deltimer(&ch->timer);
ctcm_pr_debug("%s cp:%i enter: %s()\n",
dev->name, smp_processor_id(), __FUNCTION__);
if (do_debug)
ctcm_pr_debug("%s() %s chstate:%i grpstate:%i\n",
__FUNCTION__, ch->id,
fsm_getstate(fi), fsm_getstate(grp->fsm));
CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
__func__, ch->id, dev->name, smp_processor_id(),
fsm_getstate(fi), fsm_getstate(grp->fsm));
fsm_newstate(fi, CTC_STATE_RXIDLE);
/* XID processing complete */
@ -1575,9 +1547,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
ch->ccw[1].count = ch->max_bufsize;
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0],
sizeof(struct ccw1) * 3);
CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
if (event == CTC_EVENT_START)
/* see remark about conditional locking */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
@ -1598,9 +1568,6 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
done:
if (do_debug)
ctcm_pr_debug("ctcmpc exit: %s %s()\n",
dev->name, __FUNCTION__);
return;
}
@ -1616,13 +1583,9 @@ static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
struct ctcm_priv *priv = dev->priv;
struct mpc_group *grp = priv->mpcg;
if (do_debug) {
ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s"
"GrpState:%s ChState:%s\n",
__FUNCTION__, smp_processor_id(), ch, ch->id,
fsm_getstate_str(grp->fsm),
fsm_getstate_str(ch->fsm));
}
CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
__func__, dev->name, ch->id, ch, smp_processor_id(),
fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
@ -1664,11 +1627,7 @@ static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
break;
}
if (do_debug)
ctcm_pr_debug("ctcmpc exit : %s(): cp=%i ch=0x%p id=%s\n",
__FUNCTION__, smp_processor_id(), ch, ch->id);
return;
}
/*
@ -1683,11 +1642,9 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
struct ctcm_priv *priv = dev->priv;
struct mpc_group *grp = priv->mpcg;
ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n",
dev->name,
__FUNCTION__, ch->id,
fsm_getstate_str(grp->fsm),
fsm_getstate_str(ch->fsm));
CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
__func__, dev->name, ch->id,
fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
fsm_deltimer(&ch->timer);
@ -1750,16 +1707,12 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
if (ch->in_mpcgroup)
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
else
printk(KERN_WARNING "ctcmpc: %s() Not all channels have"
" been added to group\n", __FUNCTION__);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): channel %s not added to group",
CTCM_FUNTAIL, dev->name, ch->id);
done:
if (do_debug)
ctcm_pr_debug("ctcmpc exit : %s()%s ch=0x%p id=%s\n",
__FUNCTION__, dev->name, ch, ch->id);
return;
}
/*
@ -1774,13 +1727,7 @@ static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
struct ctcm_priv *priv = dev->priv;
struct mpc_group *grp = priv->mpcg;
ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n",
dev->name, __FUNCTION__, ch->id,
fsm_getstate_str(grp->fsm),
fsm_getstate_str(ch->fsm));
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
return;
}
@ -1802,19 +1749,16 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
int rc = 0;
unsigned long saveflags = 0;
if (do_debug)
ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
__FUNCTION__, smp_processor_id(), ach, ach->id);
CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ach, ach->id);
if (grp->in_sweep == 0)
goto done;
if (do_debug_data) {
ctcm_pr_debug("ctcmpc: %s() 1: ToVTAM_th_seq= %08x\n" ,
__FUNCTION__, wch->th_seq_num);
ctcm_pr_debug("ctcmpc: %s() 1: FromVTAM_th_seq= %08x\n" ,
__FUNCTION__, rch->th_seq_num);
}
CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
__func__, wch->th_seq_num);
CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
__func__, rch->th_seq_num);
if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
/* give the previous IO time to complete */
@ -1853,11 +1797,9 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
header->sw.th_last_seq = wch->th_seq_num;
if (do_debug_ccw)
ctcmpc_dumpit((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
ctcm_pr_debug("ctcmpc: %s() sweep packet\n", __FUNCTION__);
ctcmpc_dumpit((char *)header, TH_SWEEP_LENGTH);
CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
fsm_newstate(wch->fsm, CTC_STATE_TX);
@ -1876,19 +1818,13 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
ctcm_clear_busy_do(dev);
}
if (do_debug_data) {
ctcm_pr_debug("ctcmpc: %s()2: ToVTAM_th_seq= %08x\n" ,
__FUNCTION__, wch->th_seq_num);
ctcm_pr_debug("ctcmpc: %s()2: FromVTAM_th_seq= %08x\n" ,
__FUNCTION__, rch->th_seq_num);
}
CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
__func__, wch->th_seq_num, rch->th_seq_num);
if (rc != 0)
ctcm_ccw_check_rc(wch, rc, "send sweep");
done:
if (do_debug)
ctcm_pr_debug("ctcmpc exit: %s() %s\n", __FUNCTION__, ach->id);
return;
}
@ -2149,9 +2085,8 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
ch->th_seq_num = 0x00;
if (do_debug)
ctcm_pr_debug("ctcm: %s() CH_th_seq= %08x\n",
__FUNCTION__, ch->th_seq_num);
CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
__func__, ch->th_seq_num);
}
if (IS_MPC(priv))
fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
@ -2199,8 +2134,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->priv;
int dev_stat = fsm_getstate(fi);
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
"%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
dev->name, dev->priv, dev_stat, event);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT_RXTX:

View File

@ -84,20 +84,19 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
skb_pull(pskb, LL_HEADER_LENGTH);
if ((ch->protocol == CTCM_PROTO_S390) &&
(header->type != ETH_P_IP)) {
if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
ch->logflags |= LOG_FLAG_ILLEGALPKT;
/*
* Check packet type only if we stick strictly
* to S/390's protocol of OS390. This only
* supports IP. Otherwise allow any packet
* type.
*/
ctcm_pr_warn("%s Illegal packet type 0x%04x "
"received, dropping\n",
dev->name, header->type);
ch->logflags |= LOG_FLAG_ILLEGALPKT;
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Illegal packet type 0x%04x"
" - dropping",
CTCM_FUNTAIL, dev->name, header->type);
}
priv->stats.rx_dropped++;
priv->stats.rx_frame_errors++;
return;
@ -105,11 +104,11 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
pskb->protocol = ntohs(header->type);
if (header->length <= LL_HEADER_LENGTH) {
if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
ctcm_pr_warn(
"%s Illegal packet size %d "
"received (MTU=%d blocklen=%d), "
"dropping\n", dev->name, header->length,
dev->mtu, len);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Illegal packet size %d(%d,%d)"
"- dropping",
CTCM_FUNTAIL, dev->name,
header->length, dev->mtu, len);
ch->logflags |= LOG_FLAG_ILLEGALSIZE;
}
@ -122,10 +121,10 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
if ((header->length > skb_tailroom(pskb)) ||
(header->length > len)) {
if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
ctcm_pr_warn(
"%s Illegal packet size %d (beyond the"
" end of received data), dropping\n",
dev->name, header->length);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Packet size %d (overrun)"
" - dropping", CTCM_FUNTAIL,
dev->name, header->length);
ch->logflags |= LOG_FLAG_OVERRUN;
}
@ -139,9 +138,9 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
skb = dev_alloc_skb(pskb->len);
if (!skb) {
if (!(ch->logflags & LOG_FLAG_NOMEM)) {
ctcm_pr_warn(
"%s Out of memory in ctcm_unpack_skb\n",
dev->name);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): MEMORY allocation error",
CTCM_FUNTAIL, dev->name);
ch->logflags |= LOG_FLAG_NOMEM;
}
priv->stats.rx_dropped++;
@ -184,7 +183,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
*/
static void channel_free(struct channel *ch)
{
CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id);
ch->flags &= ~CHANNEL_FLAGS_INUSE;
fsm_newstate(ch->fsm, CTC_STATE_IDLE);
}
@ -251,19 +250,12 @@ static struct channel *channel_get(enum channel_types type,
{
struct channel *ch = channels;
if (do_debug) {
char buf[64];
sprintf(buf, "%s(%d, %s, %d)\n",
CTCM_FUNTAIL, type, id, direction);
CTCM_DBF_TEXT(TRACE, CTC_DBF_INFO, buf);
}
while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
ch = ch->next;
if (!ch) {
char buf[64];
sprintf(buf, "%s(%d, %s, %d) not found in channel list\n",
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%d, %s, %d) not found in channel list\n",
CTCM_FUNTAIL, type, id, direction);
CTCM_DBF_TEXT(ERROR, CTC_DBF_ERROR, buf);
} else {
if (ch->flags & CHANNEL_FLAGS_INUSE)
ch = NULL;
@ -283,8 +275,9 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
if (!IS_ERR(irb))
return 0;
CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN, "irb error %ld on device %s\n",
PTR_ERR(irb), cdev->dev.bus_id);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
"irb error %ld on device %s\n",
PTR_ERR(irb), cdev->dev.bus_id);
switch (PTR_ERR(irb)) {
case -EIO:
@ -307,58 +300,85 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
* ch The channel, the sense code belongs to.
* sense The sense code to inspect.
*/
static inline void ccw_unit_check(struct channel *ch, unsigned char sense)
static inline void ccw_unit_check(struct channel *ch, __u8 sense)
{
CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): %02x",
CTCM_FUNTAIL, ch->id, sense);
if (sense & SNS0_INTERVENTION_REQ) {
if (sense & 0x01) {
ctcm_pr_debug("%s: Interface disc. or Sel. reset "
"(remote)\n", ch->id);
if (ch->sense_rc != 0x01) {
ctcm_pr_debug("%s: Interface disc. or Sel. "
"reset (remote)\n", ch->id);
ch->sense_rc = 0x01;
}
fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
} else {
ctcm_pr_debug("%s: System reset (remote)\n", ch->id);
if (ch->sense_rc != SNS0_INTERVENTION_REQ) {
ctcm_pr_debug("%s: System reset (remote)\n",
ch->id);
ch->sense_rc = SNS0_INTERVENTION_REQ;
}
fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
}
} else if (sense & SNS0_EQUIPMENT_CHECK) {
if (sense & SNS0_BUS_OUT_CHECK) {
ctcm_pr_warn("%s: Hardware malfunction (remote)\n",
ch->id);
if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): remote HW error %02x",
CTCM_FUNTAIL, ch->id, sense);
ch->sense_rc = SNS0_BUS_OUT_CHECK;
}
fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
} else {
ctcm_pr_warn("%s: Read-data parity error (remote)\n",
ch->id);
if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): remote read parity error %02x",
CTCM_FUNTAIL, ch->id, sense);
ch->sense_rc = SNS0_EQUIPMENT_CHECK;
}
fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
}
} else if (sense & SNS0_BUS_OUT_CHECK) {
if (sense & 0x04) {
ctcm_pr_warn("%s: Data-streaming timeout)\n", ch->id);
fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
} else {
ctcm_pr_warn("%s: Data-transfer parity error\n",
ch->id);
fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): BUS OUT error %02x",
CTCM_FUNTAIL, ch->id, sense);
ch->sense_rc = SNS0_BUS_OUT_CHECK;
}
if (sense & 0x04) /* data-streaming timeout */
fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
else /* Data-transfer parity error */
fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
} else if (sense & SNS0_CMD_REJECT) {
ctcm_pr_warn("%s: Command reject\n", ch->id);
if (ch->sense_rc != SNS0_CMD_REJECT) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): Command rejected",
CTCM_FUNTAIL, ch->id);
ch->sense_rc = SNS0_CMD_REJECT;
}
} else if (sense == 0) {
ctcm_pr_debug("%s: Unit check ZERO\n", ch->id);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): Unit check ZERO",
CTCM_FUNTAIL, ch->id);
fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
} else {
ctcm_pr_warn("%s: Unit Check with sense code: %02x\n",
ch->id, sense);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): Unit check code %02x unknown",
CTCM_FUNTAIL, ch->id, sense);
fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
}
}
int ctcm_ch_alloc_buffer(struct channel *ch)
{
CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
clear_normalized_cda(&ch->ccw[1]);
ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
if (ch->trans_skb == NULL) {
ctcm_pr_warn("%s: Couldn't alloc %s trans_skb\n",
ch->id,
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s trans_skb allocation error",
CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
return -ENOMEM;
}
@ -367,9 +387,9 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
dev_kfree_skb(ch->trans_skb);
ch->trans_skb = NULL;
ctcm_pr_warn("%s: set_normalized_cda for %s "
"trans_skb failed, dropping packets\n",
ch->id,
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s set norm_cda failed",
CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
return -ENOMEM;
}
@ -516,7 +536,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
atomic_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -EBUSY;
return -ENOMEM;
}
skb_reset_tail_pointer(ch->trans_skb);
@ -570,15 +590,12 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
struct th_sweep *header;
struct sk_buff *sweep_skb;
struct channel *ch;
int rc = 0;
/* int rc = 0; */
priv = dev->priv;
grp = priv->mpcg;
ch = priv->channel[WRITE];
if (do_debug)
MPC_DBF_DEV_NAME(TRACE, dev, ch->id);
/* sweep processing is not complete until response and request */
/* has completed for all read channels in group */
if (grp->in_sweep == 0) {
@ -590,17 +607,16 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
if (sweep_skb == NULL) {
printk(KERN_INFO "Couldn't alloc sweep_skb\n");
rc = -ENOMEM;
goto done;
/* rc = -ENOMEM; */
goto nomem;
}
header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
if (!header) {
dev_kfree_skb_any(sweep_skb);
rc = -ENOMEM;
goto done;
/* rc = -ENOMEM; */
goto nomem;
}
header->th.th_seg = 0x00 ;
@ -621,12 +637,10 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
return;
done:
if (rc != 0) {
grp->in_sweep = 0;
ctcm_clear_busy(dev);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
nomem:
grp->in_sweep = 0;
ctcm_clear_busy(dev);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
return;
}
@ -648,11 +662,9 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
unsigned long saveflags = 0; /* avoids compiler warning */
__u16 block_len;
if (do_debug)
ctcm_pr_debug(
"ctcm enter: %s(): %s cp=%i ch=0x%p id=%s state=%s\n",
__FUNCTION__, dev->name, smp_processor_id(), ch,
ch->id, fsm_getstate_str(ch->fsm));
CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
__func__, dev->name, smp_processor_id(), ch,
ch->id, fsm_getstate_str(ch->fsm));
if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
spin_lock_irqsave(&ch->collect_lock, saveflags);
@ -660,14 +672,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
if (!p_header) {
printk(KERN_WARNING "ctcm: OUT OF MEMORY IN %s():"
" Data Lost \n", __FUNCTION__);
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto done;
goto nomem_exit;
}
p_header->pdu_offset = skb->len;
@ -682,13 +688,10 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
PDU_HEADER_LENGTH);
if (do_debug_data) {
ctcm_pr_debug("ctcm: %s() Putting on collect_q"
" - skb len: %04x \n", __FUNCTION__, skb->len);
ctcm_pr_debug("ctcm: %s() pdu header and data"
" for up to 32 bytes\n", __FUNCTION__);
ctcmpc_dump32((char *)skb->data, skb->len);
}
CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
"pdu header and data for up to 32 bytes:\n",
__func__, dev->name, skb->len);
CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
skb_queue_tail(&ch->collect_queue, skb);
ch->collect_len += skb->len;
@ -713,12 +716,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
if (hi) {
nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY"
"- Data Lost \n", __FUNCTION__);
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto done;
goto nomem_exit;
} else {
memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
atomic_inc(&nskb->users);
@ -730,15 +728,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
if (!p_header) {
printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY"
": Data Lost \n", __FUNCTION__);
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
if (!p_header)
goto nomem_exit;
p_header->pdu_offset = skb->len;
p_header->pdu_proto = 0x01;
@ -768,15 +759,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
header = kmalloc(TH_HEADER_LENGTH, gfp_type());
if (!header) {
printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY: Data Lost \n",
__FUNCTION__);
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
if (!header)
goto nomem_exit;
header->th_seg = 0x00;
header->th_ch_flag = TH_HAS_PDU; /* Normal data */
@ -785,41 +769,31 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->th_seq_num++;
header->th_seq_num = ch->th_seq_num;
if (do_debug_data)
ctcm_pr_debug("ctcm: %s() ToVTAM_th_seq= %08x\n" ,
__FUNCTION__, ch->th_seq_num);
CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
__func__, dev->name, ch->th_seq_num);
/* put the TH on the packet */
memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
kfree(header);
if (do_debug_data) {
ctcm_pr_debug("ctcm: %s(): skb len: %04x \n",
__FUNCTION__, skb->len);
ctcm_pr_debug("ctcm: %s(): pdu header and data for up to 32 "
"bytes sent to vtam\n", __FUNCTION__);
ctcmpc_dump32((char *)skb->data, skb->len);
}
CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
"up to 32 bytes sent to vtam:\n",
__func__, dev->name, skb->len);
CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
ch->ccw[4].count = skb->len;
if (set_normalized_cda(&ch->ccw[4], skb->data)) {
/*
* idal allocation failed, try via copying to
* trans_skb. trans_skb usually has a pre-allocated
* idal.
* idal allocation failed, try via copying to trans_skb.
* trans_skb usually has a pre-allocated idal.
*/
if (ctcm_checkalloc_buffer(ch)) {
/*
* Remove our header. It gets added
* again on retransmit.
* Remove our header.
* It gets added again on retransmit.
*/
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
printk(KERN_WARNING "ctcm: %s()OUT OF MEMORY:"
" Data Lost \n", __FUNCTION__);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto done;
goto nomem_exit;
}
skb_reset_tail_pointer(ch->trans_skb);
@ -829,14 +803,11 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
atomic_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
if (do_debug_data) {
ctcm_pr_debug("ctcm: %s() TRANS skb len: %d \n",
__FUNCTION__, ch->trans_skb->len);
ctcm_pr_debug("ctcm: %s up to 32 bytes of data"
" sent to vtam\n", __FUNCTION__);
ctcmpc_dump32((char *)ch->trans_skb->data,
ch->trans_skb->len);
}
CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
"up to 32 bytes sent to vtam:\n",
__func__, dev->name, ch->trans_skb->len);
CTCM_D3_DUMP((char *)ch->trans_skb->data,
min_t(int, 32, ch->trans_skb->len));
} else {
skb_queue_tail(&ch->io_queue, skb);
ccw_idx = 3;
@ -865,13 +836,21 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
}
if (ch->th_seq_num > 0xf0000000) /* Chose 4Billion at random. */
if (ch->th_seq_num > 0xf0000000) /* Chose at random. */
ctcmpc_send_sweep_req(ch);
goto done;
nomem_exit:
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT,
"%s(%s): MEMORY allocation ERROR\n",
CTCM_FUNTAIL, ch->id);
rc = -ENOMEM;
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
done:
if (do_debug)
ctcm_pr_debug("ctcm exit: %s %s()\n", dev->name, __FUNCTION__);
return 0;
CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name);
return rc;
}
/**
@ -888,20 +867,19 @@ done:
/* first merge version - leaving both functions separated */
static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
{
int rc = 0;
struct ctcm_priv *priv;
CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
priv = dev->priv;
struct ctcm_priv *priv = dev->priv;
if (skb == NULL) {
ctcm_pr_warn("%s: NULL sk_buff passed\n", dev->name);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): NULL sk_buff passed",
CTCM_FUNTAIL, dev->name);
priv->stats.tx_dropped++;
return 0;
}
if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
ctcm_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
dev->name, LL_HEADER_LENGTH + 2);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Got sk_buff with head room < %ld bytes",
CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2);
dev_kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
@ -925,51 +903,43 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
rc = 1;
return rc;
return 1;
return 0;
}
/* unmerged MPC variant of ctcm_tx */
static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
{
int len = 0;
struct ctcm_priv *priv = NULL;
struct mpc_group *grp = NULL;
struct ctcm_priv *priv = dev->priv;
struct mpc_group *grp = priv->mpcg;
struct sk_buff *newskb = NULL;
if (do_debug)
ctcm_pr_debug("ctcmpc enter: %s(): skb:%0lx\n",
__FUNCTION__, (unsigned long)skb);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"ctcmpc enter: %s(): skb:%0lx\n",
__FUNCTION__, (unsigned long)skb);
priv = dev->priv;
grp = priv->mpcg;
/*
* Some sanity checks ...
*/
if (skb == NULL) {
ctcm_pr_warn("ctcmpc: %s: NULL sk_buff passed\n", dev->name);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): NULL sk_buff passed",
CTCM_FUNTAIL, dev->name);
priv->stats.tx_dropped++;
goto done;
}
if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_WARN,
"%s: Got sk_buff with head room < %ld bytes\n",
dev->name, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
"%s(%s): Got sk_buff with head room < %ld bytes",
CTCM_FUNTAIL, dev->name,
TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
if (do_debug_data)
ctcmpc_dump32((char *)skb->data, skb->len);
CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
if (!newskb) {
printk(KERN_WARNING "ctcmpc: %s() OUT OF MEMORY-"
"Data Lost\n",
__FUNCTION__);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
"%s: %s: __dev_alloc_skb failed",
__func__, dev->name);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
@ -993,9 +963,9 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
dev_kfree_skb_any(skb);
printk(KERN_INFO "ctcmpc: %s() DATA RCVD - MPC GROUP "
"NOT ACTIVE - DROPPED\n",
__FUNCTION__);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): inactive MPCGROUP - dropped",
CTCM_FUNTAIL, dev->name);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
@ -1003,8 +973,9 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
}
if (ctcm_test_and_set_busy(dev)) {
printk(KERN_WARNING "%s:DEVICE ERR - UNRECOVERABLE DATA LOSS\n",
__FUNCTION__);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device busy - dropped",
CTCM_FUNTAIL, dev->name);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
@ -1015,12 +986,9 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) {
printk(KERN_WARNING "ctcmpc: %s() DEVICE ERROR"
": Data Lost \n",
__FUNCTION__);
printk(KERN_WARNING "ctcmpc: %s() DEVICE ERROR"
" - UNRECOVERABLE DATA LOSS\n",
__FUNCTION__);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device error - dropped",
CTCM_FUNTAIL, dev->name);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
@ -1054,8 +1022,6 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
struct ctcm_priv *priv;
int max_bufsize;
CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
if (new_mtu < 576 || new_mtu > 65527)
return -EINVAL;
@ -1087,30 +1053,13 @@ static struct net_device_stats *ctcm_stats(struct net_device *dev)
return &((struct ctcm_priv *)dev->priv)->stats;
}
static void ctcm_netdev_unregister(struct net_device *dev)
{
CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
if (!dev)
return;
unregister_netdev(dev);
}
static int ctcm_netdev_register(struct net_device *dev)
{
CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
return register_netdev(dev);
}
static void ctcm_free_netdevice(struct net_device *dev)
{
struct ctcm_priv *priv;
struct mpc_group *grp;
CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
if (!dev)
return;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s(%s)", CTCM_FUNTAIL, dev->name);
priv = dev->priv;
if (priv) {
grp = priv->mpcg;
@ -1171,7 +1120,9 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup);
if (!dev) {
ctcm_pr_err("%s: Out of memory\n", __FUNCTION__);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
"%s: MEMORY allocation ERROR",
CTCM_FUNTAIL);
return NULL;
}
dev->priv = priv;
@ -1209,6 +1160,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
}
CTCMY_DBF_DEV(SETUP, dev, "finished");
return dev;
}
@ -1226,18 +1178,24 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
struct net_device *dev;
struct ctcm_priv *priv;
struct ccwgroup_device *cgdev;
int cstat;
int dstat;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"Enter %s(%s)", CTCM_FUNTAIL, &cdev->dev.bus_id);
CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __FUNCTION__);
if (ctcm_check_irb_error(cdev, irb))
return;
cgdev = dev_get_drvdata(&cdev->dev);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
/* Check for unsolicited interrupts. */
if (cgdev == NULL) {
ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n",
cdev->dev.bus_id, irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
ctcm_pr_warn("ctcm: Got unsolicited irq: c-%02x d-%02x\n",
cstat, dstat);
return;
}
@ -1254,26 +1212,22 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
return;
}
dev = (struct net_device *)(ch->netdev);
dev = ch->netdev;
if (dev == NULL) {
ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n",
__FUNCTION__, cdev->dev.bus_id, ch);
__func__, cdev->dev.bus_id, ch);
return;
}
if (do_debug)
ctcm_pr_debug("%s: interrupt for device: %s "
"received c-%02x d-%02x\n",
dev->name,
ch->id,
irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): int. for %s: cstat=%02x dstat=%02x",
CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
/* Copy interruption response block. */
memcpy(ch->irb, irb, sizeof(struct irb));
/* Check for good subchannel return code, otherwise error message */
if (irb->scsw.cmd.cstat) {
/* Check for good subchannel return code, otherwise error message */
fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n",
dev->name, ch->id, irb->scsw.cmd.cstat,
@ -1283,6 +1237,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
/* Check the reason-code of a unit check */
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if ((irb->ecw[0] & ch->sense_rc) == 0)
/* print it only once */
CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
"%s(%s): sense=%02x, ds=%02x",
CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat);
ccw_unit_check(ch, irb->ecw[0]);
return;
}
@ -1320,14 +1279,18 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
struct ctcm_priv *priv;
int rc;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s %p", __FUNCTION__, cgdev);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s %p",
__func__, cgdev);
if (!get_device(&cgdev->dev))
return -ENODEV;
priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
if (!priv) {
ctcm_pr_err("%s: Out of memory\n", __FUNCTION__);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s: memory allocation failure",
CTCM_FUNTAIL);
put_device(&cgdev->dev);
return -ENOMEM;
}
@ -1364,10 +1327,13 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type,
int ccw_num;
int rc = 0;
CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s(%s), type %d, proto %d",
__func__, cdev->dev.bus_id, type, priv->protocol);
ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (ch == NULL)
goto nomem_return;
return -ENOMEM;
ch->protocol = priv->protocol;
if (IS_MPC(priv)) {
@ -1478,7 +1444,7 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type,
if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s (%s) already in list, using old entry",
__FUNCTION__, (*c)->id);
__func__, (*c)->id);
goto free_return;
}
@ -1498,11 +1464,10 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type,
return 0;
nomem_return:
ctcm_pr_warn("ctcm: Out of memory in %s\n", __FUNCTION__);
rc = -ENOMEM;
free_return: /* note that all channel pointers are 0 or valid */
kfree(ch->ccw); /* TODO: check that again */
kfree(ch->ccw);
kfree(ch->discontact_th);
kfree_fsm(ch->fsm);
kfree(ch->irb);
@ -1540,48 +1505,48 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
enum channel_types type;
struct ctcm_priv *priv;
struct net_device *dev;
struct ccw_device *cdev0;
struct ccw_device *cdev1;
int ret;
CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
priv = dev_get_drvdata(&cgdev->dev);
if (!priv)
return -ENODEV;
type = get_channel_type(&cgdev->cdev[0]->id);
cdev0 = cgdev->cdev[0];
cdev1 = cgdev->cdev[1];
snprintf(read_id, CTCM_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
snprintf(write_id, CTCM_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
type = get_channel_type(&cdev0->id);
ret = add_channel(cgdev->cdev[0], type, priv);
snprintf(read_id, CTCM_ID_SIZE, "ch-%s", cdev0->dev.bus_id);
snprintf(write_id, CTCM_ID_SIZE, "ch-%s", cdev1->dev.bus_id);
ret = add_channel(cdev0, type, priv);
if (ret)
return ret;
ret = add_channel(cgdev->cdev[1], type, priv);
ret = add_channel(cdev1, type, priv);
if (ret)
return ret;
ret = ccw_device_set_online(cgdev->cdev[0]);
ret = ccw_device_set_online(cdev0);
if (ret != 0) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_WARN,
"ccw_device_set_online (cdev[0]) failed ");
ctcm_pr_warn("ccw_device_set_online (cdev[0]) failed "
"with ret = %d\n", ret);
/* may be ok to fail now - can be done later */
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s) set_online rc=%d",
CTCM_FUNTAIL, read_id, ret);
}
ret = ccw_device_set_online(cgdev->cdev[1]);
ret = ccw_device_set_online(cdev1);
if (ret != 0) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_WARN,
"ccw_device_set_online (cdev[1]) failed ");
ctcm_pr_warn("ccw_device_set_online (cdev[1]) failed "
"with ret = %d\n", ret);
/* may be ok to fail now - can be done later */
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s) set_online rc=%d",
CTCM_FUNTAIL, write_id, ret);
}
dev = ctcm_init_netdevice(priv);
if (dev == NULL) {
ctcm_pr_warn("ctcm_init_netdevice failed\n");
goto out;
}
if (dev == NULL)
goto out;
for (direction = READ; direction <= WRITE; direction++) {
priv->channel[direction] =
@ -1590,8 +1555,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
if (priv->channel[direction] == NULL) {
if (direction == WRITE)
channel_free(priv->channel[READ]);
ctcm_free_netdevice(dev);
goto out;
goto out_dev;
}
priv->channel[direction]->netdev = dev;
priv->channel[direction]->protocol = priv->protocol;
@ -1600,26 +1564,24 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
/* sysfs magic */
SET_NETDEV_DEV(dev, &cgdev->dev);
if (ctcm_netdev_register(dev) != 0) {
ctcm_free_netdevice(dev);
goto out;
}
if (register_netdev(dev))
goto out_dev;
if (ctcm_add_attributes(&cgdev->dev)) {
ctcm_netdev_unregister(dev);
/* dev->priv = NULL; why that ???? */
ctcm_free_netdevice(dev);
goto out;
unregister_netdev(dev);
goto out_dev;
}
strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"setup(%s) ok : r/w = %s / %s, proto : %d",
dev->name, priv->channel[READ]->id,
"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
priv->channel[READ]->id,
priv->channel[WRITE]->id, priv->protocol);
return 0;
out_dev:
ctcm_free_netdevice(dev);
out:
ccw_device_set_offline(cgdev->cdev[1]);
ccw_device_set_offline(cgdev->cdev[0]);
@ -1658,8 +1620,7 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
channel_free(priv->channel[WRITE]);
if (dev) {
ctcm_netdev_unregister(dev);
/* dev->priv = NULL; why that ??? */
unregister_netdev(dev);
ctcm_free_netdevice(dev);
}
@ -1682,13 +1643,16 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
static void ctcm_remove_device(struct ccwgroup_device *cgdev)
{
struct ctcm_priv *priv;
struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, __FUNCTION__);
BUG_ON(priv == NULL);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"removing device %s, r/w = %s/%s, proto : %d",
priv->channel[READ]->netdev->name,
priv->channel[READ]->id, priv->channel[WRITE]->id,
priv->protocol);
priv = dev_get_drvdata(&cgdev->dev);
if (!priv)
return;
if (cgdev->state == CCWGROUP_ONLINE)
ctcm_shutdown_device(cgdev);
ctcm_remove_files(&cgdev->dev);
@ -1748,8 +1712,6 @@ static int __init ctcm_init(void)
ret = ctcm_register_dbf_views();
if (ret) {
ctcm_pr_crit("ctcm_init failed with ctcm_register_dbf_views "
"rc = %d\n", ret);
return ret;
}
ret = register_cu3088_discipline(&ctcm_group_driver);

View File

@ -22,9 +22,9 @@
#define CTC_DRIVER_NAME "ctcm"
#define CTC_DEVICE_NAME "ctc"
#define CTC_DEVICE_GENE "ctc%d"
#define MPC_DEVICE_NAME "mpc"
#define MPC_DEVICE_GENE "mpc%d"
#define CTC_DEVICE_GENE CTC_DEVICE_NAME "%d"
#define MPC_DEVICE_GENE MPC_DEVICE_NAME "%d"
#define CHANNEL_FLAGS_READ 0
#define CHANNEL_FLAGS_WRITE 1
@ -48,6 +48,30 @@
#define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg)
#define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg)
#define CTCM_PR_DEBUG(fmt, arg...) \
do { \
if (do_debug) \
printk(KERN_DEBUG fmt, ##arg); \
} while (0)
#define CTCM_PR_DBGDATA(fmt, arg...) \
do { \
if (do_debug_data) \
printk(KERN_DEBUG fmt, ##arg); \
} while (0)
#define CTCM_D3_DUMP(buf, len) \
do { \
if (do_debug_data) \
ctcmpc_dumpit(buf, len); \
} while (0)
#define CTCM_CCW_DUMP(buf, len) \
do { \
if (do_debug_ccw) \
ctcmpc_dumpit(buf, len); \
} while (0)
/*
* CCW commands, used in this driver.
*/
@ -161,8 +185,9 @@ struct channel {
fsm_instance *fsm; /* finite state machine of this channel */
struct net_device *netdev; /* corresponding net_device */
struct ctcm_profile prof;
unsigned char *trans_skb_data;
__u8 *trans_skb_data;
__u16 logflags;
__u8 sense_rc; /* last unit check sense code report control */
};
struct ctcm_priv {

File diff suppressed because it is too large Load Diff

View File

@ -231,7 +231,7 @@ static inline void ctcmpc_dump32(char *buf, int len)
int ctcmpc_open(struct net_device *);
void ctcm_ccw_check_rc(struct channel *, int, char *);
void mpc_group_ready(unsigned long adev);
int mpc_channel_action(struct channel *ch, int direction, int action);
void mpc_channel_action(struct channel *ch, int direction, int action);
void mpc_action_send_discontact(unsigned long thischan);
void mpc_action_discontact(fsm_instance *fi, int event, void *arg);
void ctcmpc_bh(unsigned long thischan);

View File

@ -2651,7 +2651,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tag = (u16 *)(new_skb->data + 12);
*tag = __constant_htons(ETH_P_8021Q);
*(tag + 1) = htons(vlan_tx_tag_get(new_skb));
VLAN_TX_SKB_CB(new_skb)->magic = 0;
new_skb->vlan_tci = 0;
}
}

View File

@ -103,10 +103,6 @@ struct fs_mii_bb_platform_info {
struct fs_mii_bit mdio_dir;
struct fs_mii_bit mdio_dat;
struct fs_mii_bit mdc_dat;
int mdio_port; /* port & bit for MDIO */
int mdio_bit;
int mdc_port; /* port & bit for MDC */
int mdc_bit;
int delay; /* delay in us */
int irq[32]; /* irqs per phy's */
};