[PATCH] chelsio: add support for other 10G boards

Add support for other versions of the 10G Chelsio boards.
This is basically a port of the vendor driver with the
TOE features removed.

Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
Stephen Hemminger 2006-12-01 16:36:16 -08:00 committed by Jeff Garzik
parent 415294ecbb
commit f1d3d38af7
23 changed files with 6040 additions and 370 deletions

View File

@ -2361,9 +2361,9 @@ config CHELSIO_T1
tristate "Chelsio 10Gb Ethernet support"
depends on PCI
help
This driver supports Chelsio N110 and N210 models 10Gb Ethernet
cards. More information about adapter features and performance
tuning is in <file:Documentation/networking/cxgb.txt>.
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
performance tuning is in <file:Documentation/networking/cxgb.txt>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.

View File

@ -1,11 +1,10 @@
#
# Chelsio 10Gb NIC driver for Linux.
# Chelsio T1 driver
#
obj-$(CONFIG_CHELSIO_T1) += cxgb.o
EXTRA_CFLAGS += -Idrivers/net/chelsio $(DEBUG_FLAGS)
cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \
mv88x201x.o my3126.o $(cxgb-y)
cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o

View File

@ -45,6 +45,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/init.h>
@ -53,13 +54,30 @@
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
#define DRV_VERSION "2.1.1"
#define DRV_VERSION "2.2"
#define PFX DRV_NAME ": "
#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
/*
* More powerful macro that selectively prints messages based on msg_enable.
* For info and debugging messages.
*/
#define CH_MSG(adapter, level, category, fmt, ...) do { \
if ((adapter)->msg_enable & NETIF_MSG_##category) \
printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
## __VA_ARGS__); \
} while (0)
#ifdef DEBUG
# define CH_DBG(adapter, category, fmt, ...) \
CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
#else
# define CH_DBG(fmt, ...)
#endif
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
@ -71,10 +89,6 @@
typedef struct adapter adapter_t;
void t1_elmer0_ext_intr(adapter_t *adapter);
void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
int speed, int duplex, int fc);
struct t1_rx_mode {
struct net_device *dev;
u32 idx;
@ -97,26 +111,53 @@ static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
}
#define MAX_NPORTS 4
#define PORT_MASK ((1 << MAX_NPORTS) - 1)
#define NMTUS 8
#define TCB_SIZE 128
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
enum {
CHBT_BOARD_N110,
CHBT_BOARD_N210
CHBT_BOARD_N210,
CHBT_BOARD_7500,
CHBT_BOARD_8000,
CHBT_BOARD_CHT101,
CHBT_BOARD_CHT110,
CHBT_BOARD_CHT210,
CHBT_BOARD_CHT204,
CHBT_BOARD_CHT204V,
CHBT_BOARD_CHT204E,
CHBT_BOARD_CHN204,
CHBT_BOARD_COUGAR,
CHBT_BOARD_6800,
CHBT_BOARD_SIMUL,
};
enum {
CHBT_TERM_FPGA,
CHBT_TERM_T1,
CHBT_TERM_T2
CHBT_TERM_T2,
CHBT_TERM_T3
};
enum {
CHBT_MAC_CHELSIO_A,
CHBT_MAC_IXF1010,
CHBT_MAC_PM3393,
CHBT_MAC_VSC7321,
CHBT_MAC_DUMMY
};
enum {
CHBT_PHY_88E1041,
CHBT_PHY_88E1111,
CHBT_PHY_88X2010,
CHBT_PHY_XPAK,
CHBT_PHY_MY3126,
CHBT_PHY_8244,
CHBT_PHY_DUMMY
};
enum {
@ -150,16 +191,43 @@ struct chelsio_pci_params {
unsigned char is_pcix;
};
struct tp_params {
unsigned int pm_size;
unsigned int cm_size;
unsigned int pm_rx_base;
unsigned int pm_tx_base;
unsigned int pm_rx_pg_size;
unsigned int pm_tx_pg_size;
unsigned int pm_rx_num_pgs;
unsigned int pm_tx_num_pgs;
unsigned int rx_coalescing_size;
unsigned int use_5tuple_mode;
};
struct mc5_params {
unsigned int mode; /* selects MC5 width */
unsigned int nservers; /* size of server region */
unsigned int nroutes; /* size of routing region */
};
/* Default MC5 region sizes */
#define DEFAULT_SERVER_REGION_LEN 256
#define DEFAULT_RT_REGION_LEN 1024
struct adapter_params {
struct sge_params sge;
struct mc5_params mc5;
struct tp_params tp;
struct chelsio_pci_params pci;
const struct board_info *brd_info;
unsigned short mtus[NMTUS];
unsigned int nports; /* # of ethernet ports */
unsigned int stats_update_period;
unsigned short chip_revision;
unsigned char chip_version;
unsigned char is_asic;
};
struct link_config {
@ -207,6 +275,7 @@ struct adapter {
/* Terminator modules. */
struct sge *sge;
struct peespi *espi;
struct petp *tp;
struct port_info port[MAX_NPORTS];
struct work_struct stats_update_task;
@ -217,6 +286,7 @@ struct adapter {
/* guards async operations */
spinlock_t async_lock ____cacheline_aligned;
u32 slow_intr_mask;
int t1powersave;
};
enum { /* adapter flags */
@ -255,6 +325,11 @@ struct board_info {
const char *desc;
};
static inline int t1_is_asic(const adapter_t *adapter)
{
return adapter->params.is_asic;
}
extern struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
@ -284,13 +359,15 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
return board_info(adap)->clock_core / 1000000;
}
extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
extern void t1_interrupts_enable(adapter_t *adapter);
extern void t1_interrupts_disable(adapter_t *adapter);
extern void t1_interrupts_clear(adapter_t *adapter);
extern int elmer0_ext_intr_handler(adapter_t *adapter);
extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
extern int t1_slow_intr_handler(adapter_t *adapter);
extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
@ -304,9 +381,7 @@ extern int t1_init_hw_modules(adapter_t *adapter);
extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
extern void t1_free_sw_modules(adapter_t *adapter);
extern void t1_fatal_err(adapter_t *adapter);
extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
extern void t1_link_changed(adapter_t *adapter, int port_id);
extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
int speed, int duplex, int pause);
#endif /* _CXGB_COMMON_H_ */

View File

@ -52,7 +52,14 @@ struct mdio_ops {
/* PHY interrupt types */
enum {
cphy_cause_link_change = 0x1,
cphy_cause_error = 0x2
cphy_cause_error = 0x2,
cphy_cause_fifo_error = 0x3
};
enum {
PHY_LINK_UP = 0x1,
PHY_AUTONEG_RDY = 0x2,
PHY_AUTONEG_EN = 0x4
};
struct cphy;
@ -81,7 +88,18 @@ struct cphy_ops {
/* A PHY instance */
struct cphy {
int addr; /* PHY address */
int state; /* Link status state machine */
adapter_t *adapter; /* associated adapter */
struct work_struct phy_update;
u16 bmsr;
int count;
int act_count;
int act_on;
u32 elmer_gpo;
struct cphy_ops *ops; /* PHY operations */
int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
@ -142,6 +160,10 @@ struct gphy {
int (*reset)(adapter_t *adapter);
};
extern struct gphy t1_my3126_ops;
extern struct gphy t1_mv88e1xxx_ops;
extern struct gphy t1_vsc8244_ops;
extern struct gphy t1_xpak_ops;
extern struct gphy t1_mv88x201x_ops;
extern struct gphy t1_dummy_phy_ops;

View File

@ -46,24 +46,385 @@
#endif
enum CPL_opcode {
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_OPEN_RPL = 0x2,
CPL_PASS_ESTABLISH = 0x3,
CPL_PASS_ACCEPT_REQ = 0xE,
CPL_PASS_ACCEPT_RPL = 0x4,
CPL_ACT_OPEN_REQ = 0x5,
CPL_ACT_OPEN_RPL = 0x6,
CPL_CLOSE_CON_REQ = 0x7,
CPL_CLOSE_CON_RPL = 0x8,
CPL_CLOSE_LISTSRV_REQ = 0x9,
CPL_CLOSE_LISTSRV_RPL = 0xA,
CPL_ABORT_REQ = 0xB,
CPL_ABORT_RPL = 0xC,
CPL_PEER_CLOSE = 0xD,
CPL_ACT_ESTABLISH = 0x17,
CPL_GET_TCB = 0x24,
CPL_GET_TCB_RPL = 0x25,
CPL_SET_TCB = 0x26,
CPL_SET_TCB_FIELD = 0x27,
CPL_SET_TCB_RPL = 0x28,
CPL_PCMD = 0x29,
CPL_PCMD_READ = 0x31,
CPL_PCMD_READ_RPL = 0x32,
CPL_RX_DATA = 0xA0,
CPL_RX_DATA_DDP = 0xA1,
CPL_RX_DATA_ACK = 0xA3,
CPL_RX_PKT = 0xAD,
CPL_RX_ISCSI_HDR = 0xAF,
CPL_TX_DATA_ACK = 0xB0,
CPL_TX_DATA = 0xB1,
CPL_TX_PKT = 0xB2,
CPL_TX_PKT_LSO = 0xB6,
CPL_RTE_DELETE_REQ = 0xC0,
CPL_RTE_DELETE_RPL = 0xC1,
CPL_RTE_WRITE_REQ = 0xC2,
CPL_RTE_WRITE_RPL = 0xD3,
CPL_RTE_READ_REQ = 0xC3,
CPL_RTE_READ_RPL = 0xC4,
CPL_L2T_WRITE_REQ = 0xC5,
CPL_L2T_WRITE_RPL = 0xD4,
CPL_L2T_READ_REQ = 0xC6,
CPL_L2T_READ_RPL = 0xC7,
CPL_SMT_WRITE_REQ = 0xC8,
CPL_SMT_WRITE_RPL = 0xD5,
CPL_SMT_READ_REQ = 0xC9,
CPL_SMT_READ_RPL = 0xCA,
CPL_ARP_MISS_REQ = 0xCD,
CPL_ARP_MISS_RPL = 0xCE,
CPL_MIGRATE_C2T_REQ = 0xDC,
CPL_MIGRATE_C2T_RPL = 0xDD,
CPL_ERROR = 0xD7,
/* internal: driver -> TOM */
CPL_MSS_CHANGE = 0xE1
};
enum { /* TX_PKT_LSO ethernet types */
#define NUM_CPL_CMDS 256
enum CPL_error {
CPL_ERR_NONE = 0,
CPL_ERR_TCAM_PARITY = 1,
CPL_ERR_TCAM_FULL = 3,
CPL_ERR_CONN_RESET = 20,
CPL_ERR_CONN_EXIST = 22,
CPL_ERR_ARP_MISS = 23,
CPL_ERR_BAD_SYN = 24,
CPL_ERR_CONN_TIMEDOUT = 30,
CPL_ERR_XMIT_TIMEDOUT = 31,
CPL_ERR_PERSIST_TIMEDOUT = 32,
CPL_ERR_FINWAIT2_TIMEDOUT = 33,
CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_GENERAL = 99
};
enum {
CPL_CONN_POLICY_AUTO = 0,
CPL_CONN_POLICY_ASK = 1,
CPL_CONN_POLICY_DENY = 3
};
enum {
ULP_MODE_NONE = 0,
ULP_MODE_TCPDDP = 1,
ULP_MODE_ISCSI = 2,
ULP_MODE_IWARP = 3,
ULP_MODE_SSL = 4
};
enum {
CPL_PASS_OPEN_ACCEPT,
CPL_PASS_OPEN_REJECT
};
enum {
CPL_ABORT_SEND_RST = 0,
CPL_ABORT_NO_RST,
CPL_ABORT_POST_CLOSE_REQ = 2
};
enum { // TX_PKT_LSO ethernet types
CPL_ETH_II,
CPL_ETH_II_VLAN,
CPL_ETH_802_3,
CPL_ETH_802_3_VLAN
};
struct cpl_rx_data {
union opcode_tid {
u32 opcode_tid;
u8 opcode;
};
#define S_OPCODE 24
#define V_OPCODE(x) ((x) << S_OPCODE)
#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
#define G_TID(x) ((x) & 0xFFFFFF)
/* tid is assumed to be 24-bits */
#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
/* extract the TID from a CPL command */
#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
struct tcp_options {
u16 mss;
u8 wsf;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd:4;
u8 ecn:1;
u8 sack:1;
u8 tstamp:1;
#else
u8 tstamp:1;
u8 sack:1;
u8 ecn:1;
u8 rsvd:4;
#endif
};
struct cpl_pass_open_req {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 opt0h;
u32 opt0l;
u32 peer_netmask;
u32 opt1;
};
struct cpl_pass_open_rpl {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u8 resvd[7];
u8 status;
};
struct cpl_pass_establish {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 tos_tid;
u8 l2t_idx;
u8 rsvd[3];
u32 snd_isn;
u32 rcv_isn;
};
struct cpl_pass_accept_req {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 tos_tid;
struct tcp_options tcp_options;
u8 dst_mac[6];
u16 vlan_tag;
u8 src_mac[6];
u8 rsvd[2];
u32 rcv_isn;
u32 unknown_tcp_options;
};
struct cpl_pass_accept_rpl {
union opcode_tid ot;
u32 rsvd0;
u32 rsvd1;
u32 peer_ip;
u32 opt0h;
union {
u32 opt0l;
struct {
u8 rsvd[3];
u8 status;
};
};
};
struct cpl_act_open_req {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 opt0h;
u32 opt0l;
u32 iff_vlantag;
u32 rsvd;
};
struct cpl_act_open_rpl {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 new_tid;
u8 rsvd[3];
u8 status;
};
struct cpl_act_establish {
union opcode_tid ot;
u16 local_port;
u16 peer_port;
u32 local_ip;
u32 peer_ip;
u32 tos_tid;
u32 rsvd;
u32 snd_isn;
u32 rcv_isn;
};
struct cpl_get_tcb {
union opcode_tid ot;
u32 rsvd;
};
struct cpl_get_tcb_rpl {
union opcode_tid ot;
u16 len;
u8 rsvd;
u8 status;
};
struct cpl_set_tcb {
union opcode_tid ot;
u16 len;
u16 rsvd;
};
struct cpl_set_tcb_field {
union opcode_tid ot;
u8 rsvd[3];
u8 offset;
u32 mask;
u32 val;
};
struct cpl_set_tcb_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
};
struct cpl_pcmd {
union opcode_tid ot;
u16 dlen_in;
u16 dlen_out;
u32 pcmd_parm[2];
};
struct cpl_pcmd_read {
union opcode_tid ot;
u32 rsvd1;
u16 rsvd2;
u32 addr;
u16 len;
};
struct cpl_pcmd_read_rpl {
union opcode_tid ot;
u16 len;
};
struct cpl_close_con_req {
union opcode_tid ot;
u32 rsvd;
};
struct cpl_close_con_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
u32 snd_nxt;
u32 rcv_nxt;
};
struct cpl_close_listserv_req {
union opcode_tid ot;
u32 rsvd;
};
struct cpl_close_listserv_rpl {
union opcode_tid ot;
u8 rsvd[3];
u8 status;
};
struct cpl_abort_req {
union opcode_tid ot;
u32 rsvd0;
u8 rsvd1;
u8 cmd;
u8 rsvd2[6];
};
struct cpl_abort_rpl {
union opcode_tid ot;
u32 rsvd0;
u8 rsvd1;
u8 status;
u8 rsvd2[6];
};
struct cpl_peer_close {
union opcode_tid ot;
u32 rsvd;
};
struct cpl_tx_data {
union opcode_tid ot;
u32 len;
u32 rsvd0;
u16 urg;
u16 flags;
};
struct cpl_tx_data_ack {
union opcode_tid ot;
u32 ack_seq;
};
struct cpl_rx_data {
union opcode_tid ot;
u32 len;
u32 seq;
u16 urg;
u8 rsvd1;
u8 rsvd;
u8 status;
};
struct cpl_rx_data_ack {
union opcode_tid ot;
u32 credit;
};
struct cpl_rx_data_ddp {
union opcode_tid ot;
u32 len;
u32 seq;
u32 nxt_seq;
u32 ulp_crc;
u16 ddp_status;
u8 rsvd;
u8 status;
};
@ -99,9 +460,9 @@ struct cpl_tx_pkt_lso {
u8 ip_csum_dis:1;
u8 l4_csum_dis:1;
u8 vlan_valid:1;
u8 rsvd:1;
u8 :1;
#else
u8 rsvd:1;
u8 :1;
u8 vlan_valid:1;
u8 l4_csum_dis:1;
u8 ip_csum_dis:1;
@ -110,8 +471,7 @@ struct cpl_tx_pkt_lso {
u16 vlan;
__be32 len;
u32 rsvd2;
u8 rsvd3;
u8 rsvd[5];
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 tcp_hdr_words:4;
u8 ip_hdr_words:4;
@ -138,8 +498,142 @@ struct cpl_rx_pkt {
u8 iff:4;
#endif
u16 csum;
__be16 vlan;
u16 vlan;
u16 len;
};
struct cpl_l2t_write_req {
union opcode_tid ot;
u32 params;
u8 rsvd1[2];
u8 dst_mac[6];
};
struct cpl_l2t_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_l2t_read_req {
union opcode_tid ot;
u8 rsvd[3];
u8 l2t_idx;
};
struct cpl_l2t_read_rpl {
union opcode_tid ot;
u32 params;
u8 rsvd1[2];
u8 dst_mac[6];
};
struct cpl_smt_write_req {
union opcode_tid ot;
u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd1:1;
u8 mtu_idx:3;
u8 iff:4;
#else
u8 iff:4;
u8 mtu_idx:3;
u8 rsvd1:1;
#endif
u16 rsvd2;
u16 rsvd3;
u8 src_mac1[6];
u16 rsvd4;
u8 src_mac0[6];
};
struct cpl_smt_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_smt_read_req {
union opcode_tid ot;
u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd1:4;
u8 iff:4;
#else
u8 iff:4;
u8 rsvd1:4;
#endif
u16 rsvd2;
};
struct cpl_smt_read_rpl {
union opcode_tid ot;
u8 status;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd1:1;
u8 mtu_idx:3;
u8 rsvd0:4;
#else
u8 rsvd0:4;
u8 mtu_idx:3;
u8 rsvd1:1;
#endif
u16 rsvd2;
u16 rsvd3;
u8 src_mac1[6];
u16 rsvd4;
u8 src_mac0[6];
};
struct cpl_rte_delete_req {
union opcode_tid ot;
u32 params;
};
struct cpl_rte_delete_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_rte_write_req {
union opcode_tid ot;
u32 params;
u32 netmask;
u32 faddr;
};
struct cpl_rte_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
struct cpl_rte_read_req {
union opcode_tid ot;
u32 params;
};
struct cpl_rte_read_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd0[2];
u8 l2t_idx;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd1:7;
u8 select:1;
#else
u8 select:1;
u8 rsvd1:7;
#endif
u8 rsvd2[3];
u32 addr;
};
struct cpl_mss_change {
union opcode_tid ot;
u32 mss;
};
#endif /* _CXGB_CPL5_CMD_H_ */

View File

@ -53,7 +53,9 @@
#include "gmac.h"
#include "cphy.h"
#include "sge.h"
#include "tp.h"
#include "espi.h"
#include "elmer0.h"
#include <linux/workqueue.h>
@ -73,10 +75,9 @@ static inline void cancel_mac_stats_update(struct adapter *ap)
#define MAX_RX_JUMBO_BUFFERS 16384
#define MAX_TX_BUFFERS_HIGH 16384U
#define MAX_TX_BUFFERS_LOW 1536U
#define MAX_TX_BUFFERS 1460U
#define MIN_FL_ENTRIES 32
#define PORT_MASK ((1 << MAX_NPORTS) - 1)
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@ -94,8 +95,17 @@ MODULE_LICENSE("GPL");
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0);
MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
#define HCLOCK 0x0
#define LCLOCK 0x1
/* T1 cards powersave mode */
static int t1_clock(struct adapter *adapter, int mode);
static int t1powersave = 1; /* HW default is powersave mode. */
module_param(t1powersave, int, 0);
MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
static const char pci_speed[][4] = {
"33", "66", "100", "133"
@ -135,7 +145,7 @@ static void link_report(struct port_info *p)
}
}
void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
int speed, int duplex, int pause)
{
struct port_info *p = &adapter->port[port_id];
@ -147,6 +157,22 @@ void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
netif_carrier_off(p->dev);
link_report(p);
/* multi-ports: inform toe */
if ((speed > 0) && (adapter->params.nports > 1)) {
unsigned int sched_speed = 10;
switch (speed) {
case SPEED_1000:
sched_speed = 1000;
break;
case SPEED_100:
sched_speed = 100;
break;
case SPEED_10:
sched_speed = 10;
break;
}
t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
}
}
}
@ -165,8 +191,10 @@ static void link_start(struct port_info *p)
static void enable_hw_csum(struct adapter *adapter)
{
if (adapter->flags & TSO_CAPABLE)
t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
t1_tp_set_tcp_checksum_offload(adapter, 1);
t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
if (adapter->flags & UDP_CSUM_CAPABLE)
t1_tp_set_udp_checksum_offload(adapter->tp, 1);
t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
}
/*
@ -468,6 +496,18 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*data++ = (u64)t->tx_reg_pkts;
*data++ = (u64)t->tx_lso_pkts;
*data++ = (u64)t->tx_do_cksum;
if (adapter->espi) {
const struct espi_intr_counts *e;
e = t1_espi_get_intr_counts(adapter->espi);
*data++ = (u64) e->DIP2_parity_err;
*data++ = (u64) e->DIP4_err;
*data++ = (u64) e->rx_drops;
*data++ = (u64) e->tx_drops;
*data++ = (u64) e->rx_ovflw;
*data++ = (u64) e->parity_err;
}
}
static inline void reg_block_dump(struct adapter *ap, void *buf,
@ -491,6 +531,15 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
memset(buf, 0, T2_REGMAP_SIZE);
reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@ -729,7 +778,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
static int get_eeprom_len(struct net_device *dev)
{
return EEPROM_SIZE;
struct adapter *adapter = dev->priv;
return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
}
#define EEPROM_MAGIC(ap) \
@ -914,7 +965,7 @@ static void ext_intr_task(void *data)
{
struct adapter *adapter = data;
elmer0_ext_intr_handler(adapter);
t1_elmer0_ext_intr_handler(adapter);
/* Now reenable external interrupts */
spin_lock_irq(&adapter->async_lock);
@ -1074,16 +1125,19 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->vlan_rx_register = vlan_rx_register;
netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
#endif
/* T204: disable TSO */
if (!(is_T2(adapter)) || bi->port_number != 4) {
adapter->flags |= TSO_CAPABLE;
netdev->features |= NETIF_F_TSO;
}
}
netdev->open = cxgb_open;
netdev->stop = cxgb_close;
netdev->hard_start_xmit = t1_start_xmit;
netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
sizeof(struct cpl_tx_pkt_lso) :
sizeof(struct cpl_tx_pkt);
sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
netdev->get_stats = t1_get_stats;
netdev->set_multicast_list = t1_set_rxmode;
netdev->do_ioctl = t1_ioctl;
@ -1134,6 +1188,17 @@ static int __devinit init_one(struct pci_dev *pdev,
bi->desc, adapter->params.chip_revision,
adapter->params.pci.is_pcix ? "PCIX" : "PCI",
adapter->params.pci.speed, adapter->params.pci.width);
/*
* Set the T1B ASIC and memory clocks.
*/
if (t1powersave)
adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
else
adapter->t1powersave = HCLOCK;
if (t1_is_T1B(adapter))
t1_clock(adapter, t1powersave);
return 0;
out_release_adapter_res:
@ -1153,6 +1218,155 @@ static int __devinit init_one(struct pci_dev *pdev,
return err;
}
static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
{
int data;
int i;
u32 val;
enum {
S_CLOCK = 1 << 3,
S_DATA = 1 << 4
};
for (i = (nbits - 1); i > -1; i--) {
udelay(50);
data = ((bitdata >> i) & 0x1);
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
if (data)
val |= S_DATA;
else
val &= ~S_DATA;
udelay(50);
/* Set SCLOCK low */
val &= ~S_CLOCK;
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(50);
/* Write SCLOCK high */
val |= S_CLOCK;
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
}
}
static int t1_clock(struct adapter *adapter, int mode)
{
u32 val;
int M_CORE_VAL;
int M_MEM_VAL;
enum {
M_CORE_BITS = 9,
T_CORE_VAL = 0,
T_CORE_BITS = 2,
N_CORE_VAL = 0,
N_CORE_BITS = 2,
M_MEM_BITS = 9,
T_MEM_VAL = 0,
T_MEM_BITS = 2,
N_MEM_VAL = 0,
N_MEM_BITS = 2,
NP_LOAD = 1 << 17,
S_LOAD_MEM = 1 << 5,
S_LOAD_CORE = 1 << 6,
S_CLOCK = 1 << 3
};
if (!t1_is_T1B(adapter))
return -ENODEV; /* Can't re-clock this chip. */
if (mode & 2) {
return 0; /* show current mode. */
}
if ((adapter->t1powersave & 1) == (mode & 1))
return -EALREADY; /* ASIC already running in mode. */
if ((mode & 1) == HCLOCK) {
M_CORE_VAL = 0x14;
M_MEM_VAL = 0x18;
adapter->t1powersave = HCLOCK; /* overclock */
} else {
M_CORE_VAL = 0xe;
M_MEM_VAL = 0x10;
adapter->t1powersave = LCLOCK; /* underclock */
}
/* Don't interrupt this serial stream! */
spin_lock(&adapter->tpi_lock);
/* Initialize for ASIC core */
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= NP_LOAD;
udelay(50);
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(50);
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~S_LOAD_CORE;
val &= ~S_CLOCK;
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(50);
/* Serial program the ASIC clock synthesizer */
bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
udelay(50);
/* Finish ASIC core */
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= S_LOAD_CORE;
udelay(50);
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(50);
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~S_LOAD_CORE;
udelay(50);
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(50);
/* Initialize for memory */
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= NP_LOAD;
udelay(50);
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(50);
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~S_LOAD_MEM;
val &= ~S_CLOCK;
udelay(50);
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(50);
/* Serial program the memory clock synthesizer */
bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
udelay(50);
/* Finish memory */
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= S_LOAD_MEM;
udelay(50);
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(50);
__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~S_LOAD_MEM;
udelay(50);
__t1_tpi_write(adapter, A_ELMER0_GPO, val);
spin_unlock(&adapter->tpi_lock);
return 0;
}
static inline void t1_sw_reset(struct pci_dev *pdev)
{
pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);

View File

@ -39,6 +39,12 @@
#ifndef _CXGB_ELMER0_H_
#define _CXGB_ELMER0_H_
/* ELMER0 flavors */
enum {
ELMER0_XC2S300E_6FT256_C,
ELMER0_XC2S100E_6TQ144_C
};
/* ELMER0 registers */
#define A_ELMER0_VERSION 0x100000
#define A_ELMER0_PHY_CFG 0x100004
@ -149,3 +155,4 @@
#define MI1_OP_INDIRECT_READ 3
#endif /* _CXGB_ELMER0_H_ */

View File

@ -81,46 +81,36 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
return busy;
}
/* 1. Deassert rx_reset_core. */
/* 2. Program TRICN_CNFG registers. */
/* 3. Deassert rx_reset_link */
static int tricn_init(adapter_t *adapter)
{
int i = 0;
int stat = 0;
int timeout = 0;
int is_ready = 0;
int i, sme = 1;
/* 1 */
timeout=1000;
do {
stat = readl(adapter->regs + A_ESPI_RX_RESET);
is_ready = (stat & 0x4);
timeout--;
udelay(5);
} while (!is_ready || (timeout==0));
writel(0x2, adapter->regs + A_ESPI_RX_RESET);
if (timeout==0)
{
CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
t1_fatal_err(adapter);
if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) {
CH_ERR("%s: ESPI clock not ready\n", adapter->name);
return -1;
}
/* 2 */
writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET);
if (sme) {
tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
}
for (i = 1; i <= 8; i++)
tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
for (i = 1; i <= 2; i++)
tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
for (i = 1; i <= 3; i++)
tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1);
tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1);
tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1);
tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80);
tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1);
/* 3 */
writel(0x3, adapter->regs + A_ESPI_RX_RESET);
writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST,
adapter->regs + A_ESPI_RX_RESET);
return 0;
}
@ -143,6 +133,7 @@ void t1_espi_intr_enable(struct peespi *espi)
void t1_espi_intr_clear(struct peespi *espi)
{
readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
}
@ -157,7 +148,6 @@ void t1_espi_intr_disable(struct peespi *espi)
int t1_espi_intr_handler(struct peespi *espi)
{
u32 cnt;
u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
if (status & F_DIP4ERR)
@ -177,7 +167,7 @@ int t1_espi_intr_handler(struct peespi *espi)
* Must read the error count to clear the interrupt
* that it causes.
*/
cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
}
/*
@ -210,17 +200,45 @@ static void espi_setup_for_pm3393(adapter_t *adapter)
writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
}
/* T2 Init part -- */
/* 1. Set T_ESPI_MISCCTRL_ADDR */
/* 2. Init ESPI registers. */
/* 3. Init TriCN Hard Macro */
static void espi_setup_for_vsc7321(adapter_t *adapter)
{
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG);
writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
}
/*
* Note that T1B requires at least 2 ports for IXF1010 due to a HW bug.
*/
static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
{
writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
if (nports == 4) {
if (is_T2(adapter)) {
writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
} else {
writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
}
} else {
writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
}
writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG);
}
int t1_espi_init(struct peespi *espi, int mac_type, int nports)
{
u32 cnt;
u32 status_enable_extra = 0;
adapter_t *adapter = espi->adapter;
u32 status, burstval = 0x800100;
/* Disable ESPI training. MACs that can handle it enable it below. */
writel(0, adapter->regs + A_ESPI_TRAIN);
@ -229,38 +247,20 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
writel(V_OUT_OF_SYNC_COUNT(4) |
V_DIP2_PARITY_ERR_THRES(3) |
V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
if (nports == 4) {
/* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
burstval = 0x200040;
}
}
writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
writel(nports == 4 ? 0x200040 : 0x1000080,
adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
} else
writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
switch (mac_type) {
case CHBT_MAC_PM3393:
if (mac_type == CHBT_MAC_PM3393)
espi_setup_for_pm3393(adapter);
break;
default:
else if (mac_type == CHBT_MAC_VSC7321)
espi_setup_for_vsc7321(adapter);
else if (mac_type == CHBT_MAC_IXF1010) {
status_enable_extra = F_INTEL1010MODE;
espi_setup_for_ixf1010(adapter, nports);
} else
return -1;
}
/*
* Make sure any pending interrupts from the SPI are
* Cleared before enabling the interrupt.
*/
writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
if (status & F_DIP2PARITYERR) {
cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
}
/*
* For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
* write the status as is.
*/
if (status && t1_is_T1B(espi->adapter))
status = 1;
writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
writel(status_enable_extra | F_RXSTATUSENABLE,
adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
@ -271,9 +271,11 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
* Always position the control at the 1st port egress IN
* (sop,eop) counter to reduce PIOs for T/N210 workaround.
*/
espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
& ~MON_MASK) | (F_MONITORED_DIRECTION
| F_MONITORED_INTERFACE);
espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL);
espi->misc_ctrl &= ~MON_MASK;
espi->misc_ctrl |= F_MONITORED_DIRECTION;
if (adapter->params.nports == 1)
espi->misc_ctrl |= F_MONITORED_INTERFACE;
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
spin_lock_init(&espi->lock);
}
@ -299,8 +301,7 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
{
struct peespi *espi = adapter->espi;
if (!is_T2(adapter))
return;
if (!is_T2(adapter)) return;
spin_lock(&espi->lock);
espi->misc_ctrl = (val & ~MON_MASK) |
(espi->misc_ctrl & MON_MASK);
@ -310,27 +311,61 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
{
u32 sel;
struct peespi *espi = adapter->espi;
u32 sel;
if (!is_T2(adapter))
return 0;
sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
if (!wait) {
if (!spin_trylock(&espi->lock))
return 0;
}
else
} else
spin_lock(&espi->lock);
if ((sel != (espi->misc_ctrl & MON_MASK))) {
writel(((espi->misc_ctrl & ~MON_MASK) | sel),
adapter->regs + A_ESPI_MISC_CONTROL);
sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
}
else
} else
sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
spin_unlock(&espi->lock);
return sel;
}
/*
* This function is for T204 only.
* compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
* one shot, since there is no per port counter on the out side.
*/
int
t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
{
struct peespi *espi = adapter->espi;
u8 i, nport = (u8)adapter->params.nports;
if (!wait) {
if (!spin_trylock(&espi->lock))
return -1;
} else
spin_lock(&espi->lock);
if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) {
espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
F_MONITORED_DIRECTION;
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
}
for (i = 0 ; i < nport; i++, valp++) {
if (i) {
writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
adapter->regs + A_ESPI_MISC_CONTROL);
}
*valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
}
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
spin_unlock(&espi->lock);
return 0;
}

View File

@ -64,5 +64,6 @@ const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
int t1_espi_get_mon_t204(adapter_t *, u32 *, u8);
#endif /* _CXGB_ESPI_H_ */

View File

@ -0,0 +1,232 @@
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */
/*
* FPGA specific definitions
*/
#ifndef __CHELSIO_FPGA_DEFS_H__
#define __CHELSIO_FPGA_DEFS_H__
#define FPGA_PCIX_ADDR_VERSION 0xA08
#define FPGA_PCIX_ADDR_STAT 0xA0C
/* FPGA master interrupt Cause/Enable bits */
#define FPGA_PCIX_INTERRUPT_SGE_ERROR 0x1
#define FPGA_PCIX_INTERRUPT_SGE_DATA 0x2
#define FPGA_PCIX_INTERRUPT_TP 0x4
#define FPGA_PCIX_INTERRUPT_MC3 0x8
#define FPGA_PCIX_INTERRUPT_GMAC 0x10
#define FPGA_PCIX_INTERRUPT_PCIX 0x20
/* TP interrupt register addresses */
#define FPGA_TP_ADDR_INTERRUPT_ENABLE 0xA10
#define FPGA_TP_ADDR_INTERRUPT_CAUSE 0xA14
#define FPGA_TP_ADDR_VERSION 0xA18
/* TP interrupt Cause/Enable bits */
#define FPGA_TP_INTERRUPT_MC4 0x1
#define FPGA_TP_INTERRUPT_MC5 0x2
/*
* PM interrupt register addresses
*/
#define FPGA_MC3_REG_INTRENABLE 0xA20
#define FPGA_MC3_REG_INTRCAUSE 0xA24
#define FPGA_MC3_REG_VERSION 0xA28
/*
* GMAC interrupt register addresses
*/
#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE 0xA30
#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE 0xA34
#define FPGA_GMAC_ADDR_VERSION 0xA38
/* GMAC Cause/Enable bits */
#define FPGA_GMAC_INTERRUPT_PORT0 0x1
#define FPGA_GMAC_INTERRUPT_PORT1 0x2
#define FPGA_GMAC_INTERRUPT_PORT2 0x4
#define FPGA_GMAC_INTERRUPT_PORT3 0x8
/* MI0 registers */
#define A_MI0_CLK 0xb00
#define S_MI0_CLK_DIV 0
#define M_MI0_CLK_DIV 0xff
#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV)
#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV)
#define S_MI0_CLK_CNT 8
#define M_MI0_CLK_CNT 0xff
#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT)
#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT)
#define A_MI0_CSR 0xb04
#define S_MI0_CSR_POLL 0
#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL)
#define F_MI0_CSR_POLL V_MI0_CSR_POLL(1U)
#define S_MI0_PREAMBLE 1
#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE)
#define F_MI0_PREAMBLE V_MI0_PREAMBLE(1U)
#define S_MI0_INTR_ENABLE 2
#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE)
#define F_MI0_INTR_ENABLE V_MI0_INTR_ENABLE(1U)
#define S_MI0_BUSY 3
#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY)
#define F_MI0_BUSY V_MI0_BUSY(1U)
#define S_MI0_MDIO 4
#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO)
#define F_MI0_MDIO V_MI0_MDIO(1U)
#define A_MI0_ADDR 0xb08
#define S_MI0_PHY_REG_ADDR 0
#define M_MI0_PHY_REG_ADDR 0x1f
#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR)
#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR)
#define S_MI0_PHY_ADDR 5
#define M_MI0_PHY_ADDR 0x1f
#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR)
#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR)
#define A_MI0_DATA_EXT 0xb0c
#define A_MI0_DATA_INT 0xb10
/* GMAC registers */
#define A_GMAC_MACID_LO 0x28
#define A_GMAC_MACID_HI 0x2c
#define A_GMAC_CSR 0x30
#define S_INTERFACE 0
#define M_INTERFACE 0x3
#define V_INTERFACE(x) ((x) << S_INTERFACE)
#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE)
#define S_MAC_TX_ENABLE 2
#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE)
#define F_MAC_TX_ENABLE V_MAC_TX_ENABLE(1U)
#define S_MAC_RX_ENABLE 3
#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE)
#define F_MAC_RX_ENABLE V_MAC_RX_ENABLE(1U)
#define S_MAC_LB_ENABLE 4
#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE)
#define F_MAC_LB_ENABLE V_MAC_LB_ENABLE(1U)
#define S_MAC_SPEED 5
#define M_MAC_SPEED 0x3
#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED)
#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED)
#define S_MAC_HD_FC_ENABLE 7
#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE)
#define F_MAC_HD_FC_ENABLE V_MAC_HD_FC_ENABLE(1U)
#define S_MAC_HALF_DUPLEX 8
#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX)
#define F_MAC_HALF_DUPLEX V_MAC_HALF_DUPLEX(1U)
#define S_MAC_PROMISC 9
#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC)
#define F_MAC_PROMISC V_MAC_PROMISC(1U)
#define S_MAC_MC_ENABLE 10
#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE)
#define F_MAC_MC_ENABLE V_MAC_MC_ENABLE(1U)
#define S_MAC_RESET 11
#define V_MAC_RESET(x) ((x) << S_MAC_RESET)
#define F_MAC_RESET V_MAC_RESET(1U)
#define S_MAC_RX_PAUSE_ENABLE 12
#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE)
#define F_MAC_RX_PAUSE_ENABLE V_MAC_RX_PAUSE_ENABLE(1U)
#define S_MAC_TX_PAUSE_ENABLE 13
#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE)
#define F_MAC_TX_PAUSE_ENABLE V_MAC_TX_PAUSE_ENABLE(1U)
#define S_MAC_LWM_ENABLE 14
#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE)
#define F_MAC_LWM_ENABLE V_MAC_LWM_ENABLE(1U)
#define S_MAC_MAGIC_PKT_ENABLE 15
#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE)
#define F_MAC_MAGIC_PKT_ENABLE V_MAC_MAGIC_PKT_ENABLE(1U)
#define S_MAC_ISL_ENABLE 16
#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE)
#define F_MAC_ISL_ENABLE V_MAC_ISL_ENABLE(1U)
#define S_MAC_JUMBO_ENABLE 17
#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE)
#define F_MAC_JUMBO_ENABLE V_MAC_JUMBO_ENABLE(1U)
#define S_MAC_RX_PAD_ENABLE 18
#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE)
#define F_MAC_RX_PAD_ENABLE V_MAC_RX_PAD_ENABLE(1U)
#define S_MAC_RX_CRC_ENABLE 19
#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE)
#define F_MAC_RX_CRC_ENABLE V_MAC_RX_CRC_ENABLE(1U)
#define A_GMAC_IFS 0x34
#define S_MAC_IFS2 0
#define M_MAC_IFS2 0x3f
#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2)
#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2)
#define S_MAC_IFS1 8
#define M_MAC_IFS1 0x7f
#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1)
#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1)
#define A_GMAC_JUMBO_FRAME_LEN 0x38
#define A_GMAC_LNK_DLY 0x3c
#define A_GMAC_PAUSETIME 0x40
#define A_GMAC_MCAST_LO 0x44
#define A_GMAC_MCAST_HI 0x48
#define A_GMAC_MCAST_MASK_LO 0x4c
#define A_GMAC_MCAST_MASK_HI 0x50
#define A_GMAC_RMT_CNT 0x54
#define A_GMAC_RMT_DATA 0x58
#define A_GMAC_BACKOFF_SEED 0x5c
#define A_GMAC_TXF_THRES 0x60
#define S_TXF_READ_THRESHOLD 0
#define M_TXF_READ_THRESHOLD 0xff
#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD)
#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD)
#define S_TXF_WRITE_THRESHOLD 16
#define M_TXF_WRITE_THRESHOLD 0xff
#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD)
#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD)
#define MAC_REG_BASE 0x600
#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg))
#define MAC_REG_IDLO(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_LO)
#define MAC_REG_IDHI(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_HI)
#define MAC_REG_CSR(idx) MAC_REG_ADDR(idx, A_GMAC_CSR)
#define MAC_REG_IFS(idx) MAC_REG_ADDR(idx, A_GMAC_IFS)
#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN)
#define MAC_REG_LINKDLY(idx) MAC_REG_ADDR(idx, A_GMAC_LNK_DLY)
#define MAC_REG_PAUSETIME(idx) MAC_REG_ADDR(idx, A_GMAC_PAUSETIME)
#define MAC_REG_CASTLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_LO)
#define MAC_REG_MCASTHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_HI)
#define MAC_REG_CASTMASKLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO)
#define MAC_REG_MCASTMASKHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI)
#define MAC_REG_RMCNT(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_CNT)
#define MAC_REG_RMDATA(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_DATA)
#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED)
#define MAC_REG_TXFTHRESHOLDS(idx) MAC_REG_ADDR(idx, A_GMAC_TXF_THRES)
#endif

View File

@ -62,6 +62,8 @@ struct cmac_statistics {
u64 TxInternalMACXmitError;
u64 TxFramesWithExcessiveDeferral;
u64 TxFCSErrors;
u64 TxJumboFramesOK;
u64 TxJumboOctetsOK;
/* Receive */
u64 RxOctetsOK;
@ -81,6 +83,8 @@ struct cmac_statistics {
u64 RxInRangeLengthErrors;
u64 RxOutOfRangeLengthField;
u64 RxFrameTooLongErrors;
u64 RxJumboFramesOK;
u64 RxJumboOctetsOK;
};
struct cmac_ops {
@ -128,6 +132,7 @@ struct gmac {
extern struct gmac t1_pm3393_ops;
extern struct gmac t1_chelsio_mac_ops;
extern struct gmac t1_vsc7321_ops;
extern struct gmac t1_vsc7326_ops;
extern struct gmac t1_ixf1010_ops;
extern struct gmac t1_dummy_mac_ops;

View File

@ -0,0 +1,127 @@
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */
#ifndef CHELSIO_MV8E1XXX_H
#define CHELSIO_MV8E1XXX_H
#ifndef BMCR_SPEED1000
# define BMCR_SPEED1000 0x40
#endif
#ifndef ADVERTISE_PAUSE
# define ADVERTISE_PAUSE 0x400
#endif
#ifndef ADVERTISE_PAUSE_ASYM
# define ADVERTISE_PAUSE_ASYM 0x800
#endif
/* Gigabit MII registers */
#define MII_GBCR 9 /* 1000Base-T control register */
#define MII_GBSR 10 /* 1000Base-T status register */
/* 1000Base-T control register fields */
#define GBCR_ADV_1000HALF 0x100
#define GBCR_ADV_1000FULL 0x200
#define GBCR_PREFER_MASTER 0x400
#define GBCR_MANUAL_AS_MASTER 0x800
#define GBCR_MANUAL_CONFIG_ENABLE 0x1000
/* 1000Base-T status register fields */
#define GBSR_LP_1000HALF 0x400
#define GBSR_LP_1000FULL 0x800
#define GBSR_REMOTE_OK 0x1000
#define GBSR_LOCAL_OK 0x2000
#define GBSR_LOCAL_MASTER 0x4000
#define GBSR_MASTER_FAULT 0x8000
/* Marvell PHY interrupt status bits. */
#define MV88E1XXX_INTR_JABBER 0x0001
#define MV88E1XXX_INTR_POLARITY_CHNG 0x0002
#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010
#define MV88E1XXX_INTR_DOWNSHIFT 0x0020
#define MV88E1XXX_INTR_MDI_XOVER_CHNG 0x0040
#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080
#define MV88E1XXX_INTR_FALSE_CARRIER 0x0100
#define MV88E1XXX_INTR_SYMBOL_ERROR 0x0200
#define MV88E1XXX_INTR_LINK_CHNG 0x0400
#define MV88E1XXX_INTR_AUTONEG_DONE 0x0800
#define MV88E1XXX_INTR_PAGE_RECV 0x1000
#define MV88E1XXX_INTR_DUPLEX_CHNG 0x2000
#define MV88E1XXX_INTR_SPEED_CHNG 0x4000
#define MV88E1XXX_INTR_AUTONEG_ERR 0x8000
/* Marvell PHY specific registers. */
#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER 16
#define MV88E1XXX_SPECIFIC_STATUS_REGISTER 17
#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER 18
#define MV88E1XXX_INTERRUPT_STATUS_REGISTER 19
#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20
#define MV88E1XXX_RECV_ERR_CNTR_REGISTER 21
#define MV88E1XXX_RES_REGISTER 22
#define MV88E1XXX_GLOBAL_STATUS_REGISTER 23
#define MV88E1XXX_LED_CONTROL_REGISTER 24
#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER 25
#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26
#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER 27
#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER 28
#define MV88E1XXX_EXTENDED_ADDR_REGISTER 29
#define MV88E1XXX_EXTENDED_REGISTER 30
/* PHY specific control register fields */
#define S_PSCR_MDI_XOVER_MODE 5
#define M_PSCR_MDI_XOVER_MODE 0x3
#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE)
/* Extended PHY specific control register fields */
#define S_DOWNSHIFT_ENABLE 8
#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
#define S_DOWNSHIFT_CNT 9
#define M_DOWNSHIFT_CNT 0x7
#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT)
/* PHY specific status register fields */
#define S_PSSR_JABBER 0
#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
#define S_PSSR_POLARITY 1
#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
#define S_PSSR_RX_PAUSE 2
#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
#define S_PSSR_TX_PAUSE 3
#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
#define S_PSSR_ENERGY_DETECT 4
#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
#define S_PSSR_DOWNSHIFT_STATUS 5
#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
#define S_PSSR_MDI 6
#define V_PSSR_MDI (1 << S_PSSR_MDI)
#define S_PSSR_CABLE_LEN 7
#define M_PSSR_CABLE_LEN 0x7
#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
#define S_PSSR_LINK 10
#define V_PSSR_LINK (1 << S_PSSR_LINK)
#define S_PSSR_STATUS_RESOLVED 11
#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
#define S_PSSR_PAGE_RECEIVED 12
#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
#define S_PSSR_DUPLEX 13
#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
#define S_PSSR_SPEED 14
#define M_PSSR_SPEED 0x3
#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
#endif

View File

@ -85,29 +85,33 @@ static int mv88x201x_reset(struct cphy *cphy, int wait)
static int mv88x201x_interrupt_enable(struct cphy *cphy)
{
u32 elmer;
/* Enable PHY LASI interrupts. */
mdio_write(cphy, 0x1, 0x9002, 0x1);
/* Enable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
u32 elmer;
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer |= ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
}
return 0;
}
static int mv88x201x_interrupt_disable(struct cphy *cphy)
{
u32 elmer;
/* Disable PHY LASI interrupts. */
mdio_write(cphy, 0x1, 0x9002, 0x0);
/* Disable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
u32 elmer;
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
}
return 0;
}
@ -140,9 +144,11 @@ static int mv88x201x_interrupt_clear(struct cphy *cphy)
#endif
/* Clear Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
}
return 0;
}

View File

@ -0,0 +1,204 @@
/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */
#include "cphy.h"
#include "elmer0.h"
#include "suni1x10gexp_regs.h"
/* Port Reset */
static int my3126_reset(struct cphy *cphy, int wait)
{
/*
* This can be done through registers. It is not required since
* a full chip reset is used.
*/
return (0);
}
static int my3126_interrupt_enable(struct cphy *cphy)
{
schedule_delayed_work(&cphy->phy_update, HZ/30);
t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
return (0);
}
static int my3126_interrupt_disable(struct cphy *cphy)
{
cancel_rearming_delayed_work(&cphy->phy_update);
return (0);
}
static int my3126_interrupt_clear(struct cphy *cphy)
{
return (0);
}
#define OFFSET(REG_ADDR) (REG_ADDR << 2)
static int my3126_interrupt_handler(struct cphy *cphy)
{
u32 val;
u16 val16;
u16 status;
u32 act_count;
adapter_t *adapter;
adapter = cphy->adapter;
if (cphy->count == 50) {
mdio_read(cphy, 0x1, 0x1, &val);
val16 = (u16) val;
status = cphy->bmsr ^ val16;
if (status & BMSR_LSTATUS)
t1_link_changed(adapter, 0);
cphy->bmsr = val16;
/* We have only enabled link change interrupts so it
must be that
*/
cphy->count = 0;
}
t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL),
SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
t1_tpi_read(adapter,
OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count);
t1_tpi_read(adapter,
OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val);
act_count += val;
/* Populate elmer_gpo with the register value */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
cphy->elmer_gpo = val;
if ( (val & (1 << 8)) || (val & (1 << 19)) ||
(cphy->act_count == act_count) || cphy->act_on ) {
if (is_T2(adapter))
val |= (1 << 9);
else if (t1_is_T1B(adapter))
val |= (1 << 20);
cphy->act_on = 0;
} else {
if (is_T2(adapter))
val &= ~(1 << 9);
else if (t1_is_T1B(adapter))
val &= ~(1 << 20);
cphy->act_on = 1;
}
t1_tpi_write(adapter, A_ELMER0_GPO, val);
cphy->elmer_gpo = val;
cphy->act_count = act_count;
cphy->count++;
return cphy_cause_link_change;
}
static void my3216_poll(void *arg)
{
my3126_interrupt_handler(arg);
}
static int my3126_set_loopback(struct cphy *cphy, int on)
{
return (0);
}
/* To check the activity LED */
static int my3126_get_link_status(struct cphy *cphy,
int *link_ok, int *speed, int *duplex, int *fc)
{
u32 val;
u16 val16;
adapter_t *adapter;
adapter = cphy->adapter;
mdio_read(cphy, 0x1, 0x1, &val);
val16 = (u16) val;
/* Populate elmer_gpo with the register value */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
cphy->elmer_gpo = val;
*link_ok = (val16 & BMSR_LSTATUS);
if (*link_ok) {
/* Turn on the LED. */
if (is_T2(adapter))
val &= ~(1 << 8);
else if (t1_is_T1B(adapter))
val &= ~(1 << 19);
} else {
/* Turn off the LED. */
if (is_T2(adapter))
val |= (1 << 8);
else if (t1_is_T1B(adapter))
val |= (1 << 19);
}
t1_tpi_write(adapter, A_ELMER0_GPO, val);
cphy->elmer_gpo = val;
*speed = SPEED_10000;
*duplex = DUPLEX_FULL;
/* need to add flow control */
if (fc)
*fc = PAUSE_RX | PAUSE_TX;
return (0);
}
static void my3126_destroy(struct cphy *cphy)
{
kfree(cphy);
}
static struct cphy_ops my3126_ops = {
.destroy = my3126_destroy,
.reset = my3126_reset,
.interrupt_enable = my3126_interrupt_enable,
.interrupt_disable = my3126_interrupt_disable,
.interrupt_clear = my3126_interrupt_clear,
.interrupt_handler = my3126_interrupt_handler,
.get_link_status = my3126_get_link_status,
.set_loopback = my3126_set_loopback,
};
static struct cphy *my3126_phy_create(adapter_t *adapter,
int phy_addr, struct mdio_ops *mdio_ops)
{
struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
if (cphy)
cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
INIT_WORK(&cphy->phy_update, my3216_poll, cphy);
cphy->bmsr = 0;
return (cphy);
}
/* Chip Reset */
static int my3126_phy_reset(adapter_t * adapter)
{
u32 val;
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~4;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
msleep(100);
t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
msleep(1000);
/* Now lets enable the Laser. Delay 100us */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= 0x8000;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(100);
return (0);
}
struct gphy t1_my3126_ops = {
my3126_phy_create,
my3126_phy_reset
};

View File

@ -88,6 +88,8 @@ enum { /* RMON registers */
RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW,
RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW,
TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
@ -95,7 +97,9 @@ enum { /* RMON registers */
TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW,
TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW,
TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW
};
struct _cmac_instance {
@ -265,6 +269,8 @@ static int pm3393_interrupt_handler(struct cmac *cmac)
/* Read the master interrupt status register. */
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
&master_intr_status);
CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n",
master_intr_status);
/* TBD XXX Lets just clear everything for now */
pm3393_interrupt_clear(cmac);
@ -307,11 +313,7 @@ static int pm3393_enable_port(struct cmac *cmac, int which)
* The PHY doesn't give us link status indication on its own so have
* the link management code query it instead.
*/
{
extern void link_changed(adapter_t *adapter, int port_id);
link_changed(cmac->adapter, 0);
}
t1_link_changed(cmac->adapter, 0);
return 0;
}
@ -519,6 +521,8 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
RMON_UPDATE(mac, RxFragments, RxRuntErrors);
RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
/* Tx stats */
RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
@ -529,6 +533,8 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
return &mac->stats;
}
@ -814,6 +820,12 @@ static int pm3393_mac_reset(adapter_t * adapter)
successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
&& is_xaui_mabc_pll_locked);
CH_DBG(adapter, HW,
"PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
"is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
i, is_pl4_reset_finished, val, is_pl4_outof_lock,
is_xaui_mabc_pll_locked);
}
return successful_reset ? 0 : 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -42,12 +42,14 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/ktime.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/if_arp.h>
@ -57,10 +59,8 @@
#include "regs.h"
#include "espi.h"
#ifdef NETIF_F_TSO
#include <linux/tcp.h>
#endif
/* This belongs in if_ether.h */
#define ETH_P_CPL5 0xf
#define SGE_CMDQ_N 2
#define SGE_FREELQ_N 2
@ -73,6 +73,7 @@
#define SGE_INTRTIMER_NRES 1000
#define SGE_RX_COPY_THRES 256
#define SGE_RX_SM_BUF_SIZE 1536
#define SGE_TX_DESC_MAX_PLEN 16384
# define SGE_RX_DROP_THRES 2
@ -193,8 +194,8 @@ struct cmdQ {
u8 sop; /* is next entry start of packet? */
struct cmdQ_e *entries; /* HW command descriptor Q */
struct cmdQ_ce *centries; /* SW command context descriptor Q */
spinlock_t lock; /* Lock to protect cmdQ enqueuing */
dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
spinlock_t lock; /* Lock to protect cmdQ enqueuing */
};
struct freelQ {
@ -226,6 +227,29 @@ enum {
CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
};
/* T204 TX SW scheduler */
/* Per T204 TX port */
struct sched_port {
unsigned int avail; /* available bits - quota */
unsigned int drain_bits_per_1024ns; /* drain rate */
unsigned int speed; /* drain rate, mbps */
unsigned int mtu; /* mtu size */
struct sk_buff_head skbq; /* pending skbs */
};
/* Per T204 device */
struct sched {
ktime_t last_updated; /* last time quotas were computed */
unsigned int max_avail; /* max bits to be sent to any port */
unsigned int port; /* port index (round robin ports) */
unsigned int num; /* num skbs in per port queues */
struct sched_port p[MAX_NPORTS];
struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
};
static void restart_sched(unsigned long);
/*
* Main SGE data structure
*
@ -246,14 +270,236 @@ struct sge {
unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
struct timer_list espibug_timer;
unsigned int espibug_timeout;
struct sk_buff *espibug_skb;
unsigned long espibug_timeout;
struct sk_buff *espibug_skb[MAX_NPORTS];
u32 sge_control; /* shadow value of sge control reg */
struct sge_intr_counts stats;
struct sge_port_stats port_stats[MAX_NPORTS];
struct sched *tx_sched;
struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
};
/*
* stop tasklet and free all pending skb's
*/
static void tx_sched_stop(struct sge *sge)
{
struct sched *s = sge->tx_sched;
int i;
tasklet_kill(&s->sched_tsk);
for (i = 0; i < MAX_NPORTS; i++)
__skb_queue_purge(&s->p[s->port].skbq);
}
/*
* t1_sched_update_parms() is called when the MTU or link speed changes. It
* re-computes scheduler parameters to scope with the change.
*/
unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
unsigned int mtu, unsigned int speed)
{
struct sched *s = sge->tx_sched;
struct sched_port *p = &s->p[port];
unsigned int max_avail_segs;
pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
if (speed)
p->speed = speed;
if (mtu)
p->mtu = mtu;
if (speed || mtu) {
unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
do_div(drain, (p->mtu + 50) * 1000);
p->drain_bits_per_1024ns = (unsigned int) drain;
if (p->speed < 1000)
p->drain_bits_per_1024ns =
90 * p->drain_bits_per_1024ns / 100;
}
if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
p->drain_bits_per_1024ns -= 16;
s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
max_avail_segs = max(1U, 4096 / (p->mtu - 40));
} else {
s->max_avail = 16384;
max_avail_segs = max(1U, 9000 / (p->mtu - 40));
}
pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
"max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
p->speed, s->max_avail, max_avail_segs,
p->drain_bits_per_1024ns);
return max_avail_segs * (p->mtu - 40);
}
/*
* t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
* data that can be pushed per port.
*/
void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
{
struct sched *s = sge->tx_sched;
unsigned int i;
s->max_avail = val;
for (i = 0; i < MAX_NPORTS; i++)
t1_sched_update_parms(sge, i, 0, 0);
}
/*
* t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
* is draining.
*/
void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
unsigned int val)
{
struct sched *s = sge->tx_sched;
struct sched_port *p = &s->p[port];
p->drain_bits_per_1024ns = val * 1024 / 1000;
t1_sched_update_parms(sge, port, 0, 0);
}
/*
* get_clock() implements a ns clock (see ktime_get)
*/
static inline ktime_t get_clock(void)
{
struct timespec ts;
ktime_get_ts(&ts);
return timespec_to_ktime(ts);
}
/*
* tx_sched_init() allocates resources and does basic initialization.
*/
static int tx_sched_init(struct sge *sge)
{
struct sched *s;
int i;
s = kzalloc(sizeof (struct sched), GFP_KERNEL);
if (!s)
return -ENOMEM;
pr_debug("tx_sched_init\n");
tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
sge->tx_sched = s;
for (i = 0; i < MAX_NPORTS; i++) {
skb_queue_head_init(&s->p[i].skbq);
t1_sched_update_parms(sge, i, 1500, 1000);
}
return 0;
}
/*
* sched_update_avail() computes the delta since the last time it was called
* and updates the per port quota (number of bits that can be sent to the any
* port).
*/
static inline int sched_update_avail(struct sge *sge)
{
struct sched *s = sge->tx_sched;
ktime_t now = get_clock();
unsigned int i;
long long delta_time_ns;
delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
if (delta_time_ns < 15000)
return 0;
for (i = 0; i < MAX_NPORTS; i++) {
struct sched_port *p = &s->p[i];
unsigned int delta_avail;
delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
p->avail = min(p->avail + delta_avail, s->max_avail);
}
s->last_updated = now;
return 1;
}
/*
* sched_skb() is called from two different places. In the tx path, any
* packet generating load on an output port will call sched_skb()
* (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
* context (skb == NULL).
* The scheduler only returns a skb (which will then be sent) if the
* length of the skb is <= the current quota of the output port.
*/
static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
unsigned int credits)
{
struct sched *s = sge->tx_sched;
struct sk_buff_head *skbq;
unsigned int i, len, update = 1;
pr_debug("sched_skb %p\n", skb);
if (!skb) {
if (!s->num)
return NULL;
} else {
skbq = &s->p[skb->dev->if_port].skbq;
__skb_queue_tail(skbq, skb);
s->num++;
skb = NULL;
}
if (credits < MAX_SKB_FRAGS + 1)
goto out;
again:
for (i = 0; i < MAX_NPORTS; i++) {
s->port = ++s->port & (MAX_NPORTS - 1);
skbq = &s->p[s->port].skbq;
skb = skb_peek(skbq);
if (!skb)
continue;
len = skb->len;
if (len <= s->p[s->port].avail) {
s->p[s->port].avail -= len;
s->num--;
__skb_unlink(skb, skbq);
goto out;
}
skb = NULL;
}
if (update-- && sched_update_avail(sge))
goto again;
out:
/* If there are more pending skbs, we use the hardware to schedule us
* again.
*/
if (s->num && !skb) {
struct cmdQ *q = &sge->cmdQ[0];
clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
}
}
pr_debug("sched_skb ret %p\n", skb);
return skb;
}
/*
* PIO to indicate that memory mapped Q contains valid descriptor(s).
*/
@ -350,9 +596,12 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
sizeof(struct cpl_rx_data) +
sge->freelQ[!sge->jumbo_fl].dma_offset;
sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
size = (16 * 1024) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
/*
* Setup which skb recycle Q should be used when recycling buffers from
* each free list.
@ -388,17 +637,23 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
q->in_use -= n;
ce = &q->centries[cidx];
while (n--) {
if (q->sop)
pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
pci_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE);
else
pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
if (q->sop) {
if (likely(pci_unmap_len(ce, dma_len))) {
pci_unmap_single(pdev,
pci_unmap_addr(ce, dma_addr),
pci_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE);
q->sop = 0;
}
} else {
if (likely(pci_unmap_len(ce, dma_len))) {
pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
pci_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE);
}
}
if (ce->skb) {
dev_kfree_skb(ce->skb);
dev_kfree_skb_any(ce->skb);
q->sop = 1;
}
ce++;
@ -538,7 +793,6 @@ static void configure_sge(struct sge *sge, struct sge_params *p)
sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
V_RX_PKT_OFFSET(sge->rx_pkt_pad);
#if defined(__BIG_ENDIAN_BITFIELD)
@ -566,9 +820,7 @@ static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
*/
void t1_sge_destroy(struct sge *sge)
{
if (sge->espibug_skb)
kfree_skb(sge->espibug_skb);
kfree(sge->tx_sched);
free_tx_resources(sge);
free_rx_resources(sge);
kfree(sge);
@ -853,6 +1105,99 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
recycle_fl_buf(fl, fl->cidx);
}
/*
* T1/T2 SGE limits the maximum DMA size per TX descriptor to
* SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
* stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
* Note that the *_large_page_tx_descs stuff will be optimized out when
* PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
*
* compute_large_page_descs() computes how many additional descriptors are
* required to break down the stack's request.
*/
static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
{
unsigned int count = 0;
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
unsigned int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int i, len = skb->len - skb->data_len;
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
}
for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = frag->size;
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
}
}
}
return count;
}
/*
* Write a cmdQ entry.
*
* Since this function writes the 'flags' field, it must not be used to
* write the first cmdQ entry.
*/
static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
unsigned int len, unsigned int gen,
unsigned int eop)
{
if (unlikely(len > SGE_TX_DESC_MAX_PLEN))
BUG();
e->addr_lo = (u32)mapping;
e->addr_hi = (u64)mapping >> 32;
e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
}
/*
* See comment for previous function.
*
* write_tx_descs_large_page() writes additional SGE tx descriptors if
* *desc_len exceeds HW's capability.
*/
static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
struct cmdQ_e **e,
struct cmdQ_ce **ce,
unsigned int *gen,
dma_addr_t *desc_mapping,
unsigned int *desc_len,
unsigned int nfrags,
struct cmdQ *q)
{
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
struct cmdQ_e *e1 = *e;
struct cmdQ_ce *ce1 = *ce;
while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
*desc_len -= SGE_TX_DESC_MAX_PLEN;
write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
*gen, nfrags == 0 && *desc_len == 0);
ce1->skb = NULL;
pci_unmap_len_set(ce1, dma_len, 0);
*desc_mapping += SGE_TX_DESC_MAX_PLEN;
if (*desc_len) {
ce1++;
e1++;
if (++pidx == q->size) {
pidx = 0;
*gen ^= 1;
ce1 = q->centries;
e1 = q->entries;
}
}
}
*e = e1;
*ce = ce1;
}
return pidx;
}
/*
* Write the command descriptors to transmit the given skb starting at
* descriptor pidx with the given generation.
@ -861,50 +1206,84 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
unsigned int pidx, unsigned int gen,
struct cmdQ *q)
{
dma_addr_t mapping;
dma_addr_t mapping, desc_mapping;
struct cmdQ_e *e, *e1;
struct cmdQ_ce *ce;
unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
unsigned int i, flags, first_desc_len, desc_len,
nfrags = skb_shinfo(skb)->nr_frags;
e = e1 = &q->entries[pidx];
ce = &q->centries[pidx];
mapping = pci_map_single(adapter->pdev, skb->data,
skb->len - skb->data_len, PCI_DMA_TODEVICE);
ce = &q->centries[pidx];
desc_mapping = mapping;
desc_len = skb->len - skb->data_len;
flags = F_CMD_DATAVALID | F_CMD_SOP |
V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
V_CMD_GEN2(gen);
first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
desc_len : SGE_TX_DESC_MAX_PLEN;
e->addr_lo = (u32)desc_mapping;
e->addr_hi = (u64)desc_mapping >> 32;
e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
ce->skb = NULL;
pci_unmap_len_set(ce, dma_len, 0);
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
desc_len > SGE_TX_DESC_MAX_PLEN) {
desc_mapping += first_desc_len;
desc_len -= first_desc_len;
e1++;
ce++;
if (++pidx == q->size) {
pidx = 0;
gen ^= 1;
e1 = q->entries;
ce = q->centries;
}
pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
&desc_mapping, &desc_len,
nfrags, q);
if (likely(desc_len))
write_tx_desc(e1, desc_mapping, desc_len, gen,
nfrags == 0);
}
ce->skb = NULL;
pci_unmap_addr_set(ce, dma_addr, mapping);
pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
V_CMD_GEN2(gen);
e = &q->entries[pidx];
e->addr_lo = (u32)mapping;
e->addr_hi = (u64)mapping >> 32;
e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
for (e1 = e, i = 0; nfrags--; i++) {
for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ce++;
e1++;
ce++;
if (++pidx == q->size) {
pidx = 0;
gen ^= 1;
ce = q->centries;
e1 = q->entries;
ce = q->centries;
}
mapping = pci_map_page(adapter->pdev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
desc_mapping = mapping;
desc_len = frag->size;
pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
&desc_mapping, &desc_len,
nfrags, q);
if (likely(desc_len))
write_tx_desc(e1, desc_mapping, desc_len, gen,
nfrags == 0);
ce->skb = NULL;
pci_unmap_addr_set(ce, dma_addr, mapping);
pci_unmap_len_set(ce, dma_len, frag->size);
e1->addr_lo = (u32)mapping;
e1->addr_hi = (u64)mapping >> 32;
e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
V_CMD_GEN2(gen);
}
ce->skb = skb;
wmb();
e->flags = flags;
@ -918,14 +1297,56 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
unsigned int reclaim = q->processed - q->cleaned;
if (reclaim) {
pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
q->processed, q->cleaned);
free_cmdQ_buffers(sge, q, reclaim);
q->cleaned += reclaim;
}
}
#ifndef SET_ETHTOOL_OPS
# define __netif_rx_complete(dev) netif_rx_complete(dev)
#endif
/*
* Called from tasklet. Checks the scheduler for any
* pending skbs that can be sent.
*/
static void restart_sched(unsigned long arg)
{
struct sge *sge = (struct sge *) arg;
struct adapter *adapter = sge->adapter;
struct cmdQ *q = &sge->cmdQ[0];
struct sk_buff *skb;
unsigned int credits, queued_skb = 0;
spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
credits = q->size - q->in_use;
pr_debug("restart_sched credits=%d\n", credits);
while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
unsigned int genbit, pidx, count;
count = 1 + skb_shinfo(skb)->nr_frags;
count += compute_large_page_tx_descs(skb);
q->in_use += count;
genbit = q->genbit;
pidx = q->pidx;
q->pidx += count;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->genbit ^= 1;
}
write_tx_descs(adapter, skb, pidx, genbit, q);
credits = q->size - q->in_use;
queued_skb = 1;
}
if (queued_skb) {
clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
}
}
spin_unlock(&q->lock);
}
/**
* sge_rx - process an ingress ethernet packet
@ -953,6 +1374,11 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
p = (struct cpl_rx_pkt *)skb->data;
skb_pull(skb, sizeof(*p));
skb->dev = adapter->port[p->iff].dev;
if (p->iff >= adapter->params.nports) {
kfree_skb(skb);
return 0;
}
skb->dev->last_rx = jiffies;
skb->protocol = eth_type_trans(skb, skb->dev);
if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
@ -1025,7 +1451,10 @@ static unsigned int update_tx_info(struct adapter *adapter,
struct cmdQ *cmdq = &sge->cmdQ[0];
cmdq->processed += pr0;
if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
freelQs_empty(sge);
flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
}
if (flags & F_CMDQ0_ENABLE) {
clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
@ -1034,6 +1463,9 @@ static unsigned int update_tx_info(struct adapter *adapter,
set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
}
if (sge->tx_sched)
tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
flags &= ~F_CMDQ0_ENABLE;
}
@ -1234,13 +1666,14 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data)
"NAPI schedule failure!\n");
} else
writel(q->cidx, adapter->regs + A_SG_SLEEPING);
handled = 1;
goto unlock;
} else
writel(q->cidx, adapter->regs + A_SG_SLEEPING);
} else
if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
} else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) {
printk(KERN_ERR "data interrupt while NAPI running\n");
}
handled = t1_slow_intr_handler(adapter);
if (!handled)
@ -1321,7 +1754,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
{
struct sge *sge = adapter->sge;
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count;
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
@ -1329,26 +1762,49 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
pidx = q->pidx;
credits = q->size - q->in_use;
count = 1 + skb_shinfo(skb)->nr_frags;
count += compute_large_page_tx_descs(skb);
{ /* Ethernet packet */
/* Ethernet packet */
if (unlikely(credits < count)) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
set_bit(dev->if_port, &sge->stopped_tx_queues);
sge->stats.cmdQ_full[2]++;
spin_unlock(&q->lock);
if (!netif_queue_stopped(dev))
CH_ERR("%s: Tx ring full while queue awake!\n",
adapter->name);
}
spin_unlock(&q->lock);
return NETDEV_TX_BUSY;
}
if (unlikely(credits - count < q->stop_thres)) {
sge->stats.cmdQ_full[2]++;
netif_stop_queue(dev);
set_bit(dev->if_port, &sge->stopped_tx_queues);
sge->stats.cmdQ_full[2]++;
}
/* T204 cmdQ0 skbs that are destined for a certain port have to go
* through the scheduler.
*/
if (sge->tx_sched && !qid && skb->dev) {
use_sched:
use_sched_skb = 1;
/* Note that the scheduler might return a different skb than
* the one passed in.
*/
skb = sched_skb(sge, skb, credits);
if (!skb) {
spin_unlock(&q->lock);
return NETDEV_TX_OK;
}
pidx = q->pidx;
count = 1 + skb_shinfo(skb)->nr_frags;
count += compute_large_page_tx_descs(skb);
}
q->in_use += count;
genbit = q->genbit;
pidx = q->pidx;
q->pidx += count;
if (q->pidx >= q->size) {
q->pidx -= q->size;
@ -1374,6 +1830,14 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
}
}
if (use_sched_skb) {
if (spin_trylock(&q->lock)) {
credits = q->size - q->in_use;
skb = NULL;
goto use_sched;
}
}
return NETDEV_TX_OK;
}
@ -1402,8 +1866,10 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct sge *sge = adapter->sge;
struct cpl_tx_pkt *cpl;
#ifdef NETIF_F_TSO
if (skb_is_gso(skb)) {
if (skb->protocol == htons(ETH_P_CPL5))
goto send;
if (skb_shinfo(skb)->gso_size) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;
@ -1422,9 +1888,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr->len = htonl(skb->len - sizeof(*hdr));
cpl = (struct cpl_tx_pkt *)hdr;
sge->stats.tx_lso_pkts++;
} else
#endif
{
} else {
/*
* Packets shorter than ETH_HLEN can break the MAC, drop them
* early. Also, we may get oversized packets because some
@ -1433,6 +1897,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely(skb->len < ETH_HLEN ||
skb->len > dev->mtu + eth_hdr_len(skb->data))) {
pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
skb->len, eth_hdr_len(skb->data), dev->mtu);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -1442,10 +1908,12 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
* components, such as pktgen, do not handle it right.
* Complain when this happens but try to fix things up.
*/
if (unlikely(skb_headroom(skb) <
dev->hard_header_len - ETH_HLEN)) {
if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
struct sk_buff *orig_skb = skb;
pr_debug("%s: headroom %d header_len %d\n", dev->name,
skb_headroom(skb), dev->hard_header_len);
if (net_ratelimit())
printk(KERN_ERR "%s: inadequate headroom in "
"Tx packet\n", dev->name);
@ -1457,19 +1925,21 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
skb->ip_summed == CHECKSUM_PARTIAL &&
skb->nh.iph->protocol == IPPROTO_UDP)
skb->nh.iph->protocol == IPPROTO_UDP) {
if (unlikely(skb_checksum_help(skb))) {
pr_debug("%s: unable to do udp checksum\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
/* Hmmm, assuming to catch the gratious arp... and we'll use
* it to flush out stuck espi packets...
*/
if (unlikely(!adapter->sge->espibug_skb)) {
if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
if (skb->protocol == htons(ETH_P_ARP) &&
skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
adapter->sge->espibug_skb = skb;
adapter->sge->espibug_skb[dev->if_port] = skb;
/* We want to re-use this skb later. We
* simply bump the reference count and it
* will not be freed...
@ -1499,6 +1969,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
cpl->vlan_valid = 0;
send:
dev->trans_start = jiffies;
return t1_sge_tx(skb, adapter, 0, dev);
}
@ -1518,10 +1989,9 @@ static void sge_tx_reclaim_cb(unsigned long data)
continue;
reclaim_completed_tx(sge, q);
if (i == 0 && q->in_use) /* flush pending credits */
writel(F_CMDQ0_ENABLE,
sge->adapter->regs + A_SG_DOORBELL);
if (i == 0 && q->in_use) { /* flush pending credits */
writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
}
spin_unlock(&q->lock);
}
mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
@ -1568,11 +2038,20 @@ int t1_sge_configure(struct sge *sge, struct sge_params *p)
*/
void t1_sge_stop(struct sge *sge)
{
int i;
writel(0, sge->adapter->regs + A_SG_CONTROL);
(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
if (is_T2(sge->adapter))
del_timer_sync(&sge->espibug_timer);
del_timer_sync(&sge->tx_reclaim_timer);
if (sge->tx_sched)
tx_sched_stop(sge);
for (i = 0; i < MAX_NPORTS; i++)
if (sge->espibug_skb[i])
kfree_skb(sge->espibug_skb[i]);
}
/*
@ -1585,7 +2064,7 @@ void t1_sge_start(struct sge *sge)
writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
@ -1596,14 +2075,52 @@ void t1_sge_start(struct sge *sge)
/*
* Callback for the T2 ESPI 'stuck packet feature' workaorund
*/
static void espibug_workaround(void *data)
static void espibug_workaround_t204(unsigned long data)
{
struct adapter *adapter = (struct adapter *)data;
struct sge *sge = adapter->sge;
unsigned int nports = adapter->params.nports;
u32 seop[MAX_NPORTS];
if (adapter->open_device_map & PORT_MASK) {
int i;
if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) {
return;
}
for (i = 0; i < nports; i++) {
struct sk_buff *skb = sge->espibug_skb[i];
if ( (netif_running(adapter->port[i].dev)) &&
!(netif_queue_stopped(adapter->port[i].dev)) &&
(seop[i] && ((seop[i] & 0xfff) == 0)) &&
skb ) {
if (!skb->cb[0]) {
u8 ch_mac_addr[ETH_ALEN] =
{0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
memcpy(skb->data + sizeof(struct cpl_tx_pkt),
ch_mac_addr, ETH_ALEN);
memcpy(skb->data + skb->len - 10,
ch_mac_addr, ETH_ALEN);
skb->cb[0] = 0xff;
}
/* bump the reference count to avoid freeing of
* the skb once the DMA has completed.
*/
skb = skb_get(skb);
t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
}
}
}
mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
}
static void espibug_workaround(unsigned long data)
{
struct adapter *adapter = (struct adapter *)data;
struct sge *sge = adapter->sge;
if (netif_running(adapter->port[0].dev)) {
struct sk_buff *skb = sge->espibug_skb;
struct sk_buff *skb = sge->espibug_skb[0];
u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
if ((seop & 0xfff0fff) == 0xfff && skb) {
@ -1649,9 +2166,19 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
if (is_T2(sge->adapter)) {
init_timer(&sge->espibug_timer);
sge->espibug_timer.function = (void *)&espibug_workaround;
if (adapter->params.nports > 1) {
tx_sched_init(sge);
sge->espibug_timer.function = espibug_workaround_t204;
} else {
sge->espibug_timer.function = espibug_workaround;
}
sge->espibug_timer.data = (unsigned long)sge->adapter;
sge->espibug_timeout = 1;
/* for T204, every 10ms */
if (adapter->params.nports > 1)
sge->espibug_timeout = HZ/100;
}
@ -1659,7 +2186,14 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
p->cmdQ_size[1] = SGE_CMDQ1_E_N;
p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
if (sge->tx_sched) {
if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
p->rx_coalesce_usecs = 15;
else
p->rx_coalesce_usecs = 50;
} else
p->rx_coalesce_usecs = 50;
p->coalesce_enable = 0;
p->sample_interval_usecs = 0;
p->polling = 0;

View File

@ -92,5 +92,9 @@ void t1_sge_intr_disable(struct sge *);
void t1_sge_intr_clear(struct sge *);
const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
void t1_sched_set_max_avail_bytes(struct sge *, unsigned int);
void t1_sched_set_drain_bits_per_us(struct sge *, unsigned int, unsigned int);
unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int,
unsigned int);
#endif /* _CXGB_SGE_H_ */

View File

@ -43,6 +43,7 @@
#include "gmac.h"
#include "cphy.h"
#include "sge.h"
#include "tp.h"
#include "espi.h"
/**
@ -78,7 +79,7 @@ static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
/*
* Write a register over the TPI interface (unlocked and locked versions).
*/
static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
{
int tpi_busy;
@ -98,16 +99,16 @@ int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
{
int ret;
spin_lock(&(adapter)->tpi_lock);
spin_lock(&adapter->tpi_lock);
ret = __t1_tpi_write(adapter, addr, value);
spin_unlock(&(adapter)->tpi_lock);
spin_unlock(&adapter->tpi_lock);
return ret;
}
/*
* Read a register over the TPI interface (unlocked and locked versions).
*/
static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
{
int tpi_busy;
@ -128,18 +129,26 @@ int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
{
int ret;
spin_lock(&(adapter)->tpi_lock);
spin_lock(&adapter->tpi_lock);
ret = __t1_tpi_read(adapter, addr, valp);
spin_unlock(&(adapter)->tpi_lock);
spin_unlock(&adapter->tpi_lock);
return ret;
}
/*
* Set a TPI parameter.
*/
static void t1_tpi_par(adapter_t *adapter, u32 value)
{
writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR);
}
/*
* Called when a port's link settings change to propagate the new values to the
* associated PHY and MAC. After performing the common tasks it invokes an
* OS-specific handler.
*/
/* static */ void link_changed(adapter_t *adapter, int port_id)
void t1_link_changed(adapter_t *adapter, int port_id)
{
int link_ok, speed, duplex, fc;
struct cphy *phy = adapter->port[port_id].phy;
@ -159,7 +168,7 @@ int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
lc->fc = (unsigned char)fc;
}
t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc);
}
static int t1_pci_intr_handler(adapter_t *adapter)
@ -217,7 +226,7 @@ static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
{
u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
spin_lock(&(adapter)->tpi_lock);
spin_lock(&adapter->tpi_lock);
/* Write the address we want. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
@ -227,12 +236,13 @@ static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
/* Write the operation we want. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
__t1_tpi_write(adapter,
A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
/* Read the data. */
__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
spin_unlock(&(adapter)->tpi_lock);
spin_unlock(&adapter->tpi_lock);
return 0;
}
@ -241,7 +251,7 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
{
u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
spin_lock(&(adapter)->tpi_lock);
spin_lock(&adapter->tpi_lock);
/* Write the address we want. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
@ -254,7 +264,7 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
spin_unlock(&(adapter)->tpi_lock);
spin_unlock(&adapter->tpi_lock);
return 0;
}
@ -265,12 +275,25 @@ static struct mdio_ops mi1_mdio_ext_ops = {
};
enum {
CH_BRD_T110_1CU,
CH_BRD_N110_1F,
CH_BRD_N210_1F,
CH_BRD_T210_1F,
CH_BRD_T210_1CU,
CH_BRD_N204_4CU,
};
static struct board_info t1_board[] = {
{ CHBT_BOARD_CHT110, 1/*ports#*/,
SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T1,
CHBT_MAC_PM3393, CHBT_PHY_MY3126,
125000000/*clk-core*/, 150000000/*clk-mc3*/, 125000000/*clk-mc4*/,
1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 1/*mdien*/,
1/*mdiinv*/, 1/*mdc*/, 1/*phybaseaddr*/, &t1_pm3393_ops,
&t1_my3126_ops, &mi1_mdio_ext_ops,
"Chelsio T110 1x10GBase-CX4 TOE" },
{ CHBT_BOARD_N110, 1/*ports#*/,
SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
CHBT_MAC_PM3393, CHBT_PHY_88X2010,
@ -289,12 +312,36 @@ static struct board_info t1_board[] = {
&t1_mv88x201x_ops, &mi1_mdio_ext_ops,
"Chelsio N210 1x10GBaseX NIC" },
{ CHBT_BOARD_CHT210, 1/*ports#*/,
SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T2,
CHBT_MAC_PM3393, CHBT_PHY_88X2010,
125000000/*clk-core*/, 133000000/*clk-mc3*/, 125000000/*clk-mc4*/,
1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
&t1_mv88x201x_ops, &mi1_mdio_ext_ops,
"Chelsio T210 1x10GBaseX TOE" },
{ CHBT_BOARD_CHT210, 1/*ports#*/,
SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T2,
CHBT_MAC_PM3393, CHBT_PHY_MY3126,
125000000/*clk-core*/, 133000000/*clk-mc3*/, 125000000/*clk-mc4*/,
1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 1/*mdien*/,
1/*mdiinv*/, 1/*mdc*/, 1/*phybaseaddr*/, &t1_pm3393_ops,
&t1_my3126_ops, &mi1_mdio_ext_ops,
"Chelsio T210 1x10GBase-CX4 TOE" },
};
struct pci_device_id t1_pci_tbl[] = {
CH_DEVICE(8, 0, CH_BRD_T110_1CU),
CH_DEVICE(8, 1, CH_BRD_T110_1CU),
CH_DEVICE(7, 0, CH_BRD_N110_1F),
CH_DEVICE(10, 1, CH_BRD_N210_1F),
{ 0, }
CH_DEVICE(11, 1, CH_BRD_T210_1F),
CH_DEVICE(14, 1, CH_BRD_T210_1CU),
CH_DEVICE(16, 1, CH_BRD_N204_4CU),
{ 0 }
};
MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
@ -390,9 +437,14 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
if (lc->supported & SUPPORTED_Autoneg) {
lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
if (fc) {
lc->advertising |= ADVERTISED_ASYM_PAUSE;
if (fc == (PAUSE_RX | PAUSE_TX))
if (fc == ((PAUSE_RX | PAUSE_TX) &
(mac->adapter->params.nports < 2)))
lc->advertising |= ADVERTISED_PAUSE;
else {
lc->advertising |= ADVERTISED_ASYM_PAUSE;
if (fc == PAUSE_RX)
lc->advertising |= ADVERTISED_PAUSE;
}
}
phy->ops->advertise(phy, lc->advertising);
@ -403,11 +455,15 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
mac->ops->set_speed_duplex_fc(mac, lc->speed,
lc->duplex, fc);
/* Also disables autoneg */
phy->state = PHY_AUTONEG_RDY;
phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
phy->ops->reset(phy, 0);
} else
phy->ops->autoneg_enable(phy); /* also resets PHY */
} else {
phy->state = PHY_AUTONEG_EN;
phy->ops->autoneg_enable(phy); /* also resets PHY */
}
} else {
phy->state = PHY_AUTONEG_RDY;
mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
lc->fc = (unsigned char)fc;
phy->ops->reset(phy, 0);
@ -418,7 +474,7 @@ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
/*
* External interrupt handler for boards using elmer0.
*/
int elmer0_ext_intr_handler(adapter_t *adapter)
int t1_elmer0_ext_intr_handler(adapter_t *adapter)
{
struct cphy *phy;
int phy_cause;
@ -427,13 +483,32 @@ int elmer0_ext_intr_handler(adapter_t *adapter)
t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
switch (board_info(adapter)->board) {
case CHBT_BOARD_CHT210:
case CHBT_BOARD_N210:
case CHBT_BOARD_N110:
if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
phy = adapter->port[0].phy;
phy_cause = phy->ops->interrupt_handler(phy);
if (phy_cause & cphy_cause_link_change)
link_changed(adapter, 0);
t1_link_changed(adapter, 0);
}
break;
case CHBT_BOARD_8000:
case CHBT_BOARD_CHT110:
CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
cause);
if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
struct cmac *mac = adapter->port[0].mac;
mac->ops->interrupt_handler(mac);
}
if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
u32 mod_detect;
t1_tpi_read(adapter,
A_ELMER0_GPI_STAT, &mod_detect);
CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
mod_detect ? "removed" : "inserted");
}
break;
}
@ -445,11 +520,11 @@ int elmer0_ext_intr_handler(adapter_t *adapter)
void t1_interrupts_enable(adapter_t *adapter)
{
unsigned int i;
u32 pl_intr;
adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP;
t1_sge_intr_enable(adapter->sge);
t1_tp_intr_enable(adapter->tp);
if (adapter->espi) {
adapter->slow_intr_mask |= F_PL_INTR_ESPI;
t1_espi_intr_enable(adapter->espi);
@ -462,7 +537,8 @@ void t1_interrupts_enable(adapter_t *adapter)
}
/* Enable PCIX & external chip interrupts on ASIC boards. */
pl_intr = readl(adapter->regs + A_PL_ENABLE);
if (t1_is_asic(adapter)) {
u32 pl_intr = readl(adapter->regs + A_PL_ENABLE);
/* PCI-X interrupts */
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
@ -471,6 +547,7 @@ void t1_interrupts_enable(adapter_t *adapter)
adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
writel(pl_intr, adapter->regs + A_PL_ENABLE);
}
}
/* Disables all interrupts. */
@ -479,6 +556,7 @@ void t1_interrupts_disable(adapter_t* adapter)
unsigned int i;
t1_sge_intr_disable(adapter->sge);
t1_tp_intr_disable(adapter->tp);
if (adapter->espi)
t1_espi_intr_disable(adapter->espi);
@ -489,6 +567,7 @@ void t1_interrupts_disable(adapter_t* adapter)
}
/* Disable PCIX & external chip interrupts. */
if (t1_is_asic(adapter))
writel(0, adapter->regs + A_PL_ENABLE);
/* PCI-X interrupts */
@ -501,10 +580,9 @@ void t1_interrupts_disable(adapter_t* adapter)
void t1_interrupts_clear(adapter_t* adapter)
{
unsigned int i;
u32 pl_intr;
t1_sge_intr_clear(adapter->sge);
t1_tp_intr_clear(adapter->tp);
if (adapter->espi)
t1_espi_intr_clear(adapter->espi);
@ -515,10 +593,12 @@ void t1_interrupts_clear(adapter_t* adapter)
}
/* Enable interrupts for external devices. */
pl_intr = readl(adapter->regs + A_PL_CAUSE);
if (t1_is_asic(adapter)) {
u32 pl_intr = readl(adapter->regs + A_PL_CAUSE);
writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
adapter->regs + A_PL_CAUSE);
}
/* PCI-X interrupts */
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
@ -527,7 +607,7 @@ void t1_interrupts_clear(adapter_t* adapter)
/*
* Slow path interrupt handler for ASICs.
*/
int t1_slow_intr_handler(adapter_t *adapter)
static int asic_slow_intr(adapter_t *adapter)
{
u32 cause = readl(adapter->regs + A_PL_CAUSE);
@ -536,89 +616,50 @@ int t1_slow_intr_handler(adapter_t *adapter)
return 0;
if (cause & F_PL_INTR_SGE_ERR)
t1_sge_intr_error_handler(adapter->sge);
if (cause & F_PL_INTR_TP)
t1_tp_intr_handler(adapter->tp);
if (cause & F_PL_INTR_ESPI)
t1_espi_intr_handler(adapter->espi);
if (cause & F_PL_INTR_PCIX)
t1_pci_intr_handler(adapter);
if (cause & F_PL_INTR_EXT)
t1_elmer0_ext_intr(adapter);
t1_elmer0_ext_intr_handler(adapter);
/* Clear the interrupts just processed. */
writel(cause, adapter->regs + A_PL_CAUSE);
(void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
readl(adapter->regs + A_PL_CAUSE); /* flush writes */
return 1;
}
/* Pause deadlock avoidance parameters */
#define DROP_MSEC 16
#define DROP_PKTS_CNT 1
static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
int t1_slow_intr_handler(adapter_t *adapter)
{
u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
if (enable)
val |= csum_bit;
else
val &= ~csum_bit;
writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
return asic_slow_intr(adapter);
}
void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
/* Power sequencing is a work-around for Intel's XPAKs. */
static void power_sequence_xpak(adapter_t* adapter)
{
set_csum_offload(adapter, F_IP_CSUM, enable);
}
u32 mod_detect;
u32 gpo;
void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
{
set_csum_offload(adapter, F_UDP_CSUM, enable);
}
void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
{
set_csum_offload(adapter, F_TCP_CSUM, enable);
}
static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
{
u32 val;
val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
F_TP_IN_ESPI_CHECK_TCP_CSUM;
writel(val, adapter->regs + A_TP_IN_CONFIG);
writel(F_TP_OUT_CSPI_CPL |
F_TP_OUT_ESPI_ETHERNET |
F_TP_OUT_ESPI_GENERATE_IP_CSUM |
F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
adapter->regs + A_TP_OUT_CONFIG);
val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
/*
* Enable pause frame deadlock prevention.
*/
if (is_T2(adapter)) {
u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
V_DROP_TICKS_CNT(drop_ticks) |
V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
adapter->regs + A_TP_TX_DROP_CONFIG);
/* Check for XPAK */
t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
if (!(ELMER0_GP_BIT5 & mod_detect)) {
/* XPAK is present */
t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
gpo |= ELMER0_GP_BIT18;
t1_tpi_write(adapter, A_ELMER0_GPO, gpo);
}
writel(F_TP_RESET, adapter->regs + A_TP_RESET);
}
int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p)
{
p->chip_version = bi->chip_term;
p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
if (p->chip_version == CHBT_TERM_T1 ||
p->chip_version == CHBT_TERM_T2) {
p->chip_version == CHBT_TERM_T2 ||
p->chip_version == CHBT_TERM_FPGA) {
u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
val = G_TP_PC_REV(val);
@ -640,11 +681,23 @@ int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
static int board_init(adapter_t *adapter, const struct board_info *bi)
{
switch (bi->board) {
case CHBT_BOARD_8000:
case CHBT_BOARD_N110:
case CHBT_BOARD_N210:
writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
case CHBT_BOARD_CHT210:
case CHBT_BOARD_COUGAR:
t1_tpi_par(adapter, 0xf);
t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
break;
case CHBT_BOARD_CHT110:
t1_tpi_par(adapter, 0xf);
t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
/* TBD XXX Might not need. This fixes a problem
* described in the Intel SR XPAK errata.
*/
power_sequence_xpak(adapter);
break;
}
return 0;
}
@ -670,7 +723,8 @@ int t1_init_hw_modules(adapter_t *adapter)
bi->espi_nports))
goto out_err;
t1_tp_reset(adapter, bi->clock_core);
if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core))
goto out_err;
err = t1_sge_configure(adapter->sge, &adapter->params.sge);
if (err)
@ -714,6 +768,8 @@ void t1_free_sw_modules(adapter_t *adapter)
if (adapter->sge)
t1_sge_destroy(adapter->sge);
if (adapter->tp)
t1_tp_destroy(adapter->tp);
if (adapter->espi)
t1_espi_destroy(adapter->espi);
}
@ -762,6 +818,13 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
goto error;
}
adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
if (!adapter->tp) {
CH_ERR("%s: TP initialization failed\n",
adapter->name);
goto error;
}
board_init(adapter, bi);
bi->mdio_ops->init(adapter, bi);
if (bi->gphy->reset)
@ -793,7 +856,9 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
* Get the port's MAC addresses either from the EEPROM if one
* exists or the one hardcoded in the MAC.
*/
if (vpd_macaddress_get(adapter, i, hw_addr)) {
if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
mac->ops->macaddress_get(mac, hw_addr);
else if (vpd_macaddress_get(adapter, i, hw_addr)) {
CH_ERR("%s: could not read MAC address from VPD ROM\n",
adapter->port[i].dev->name);
goto error;
@ -806,7 +871,7 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
t1_interrupts_clear(adapter);
return 0;
error:
error:
t1_free_sw_modules(adapter);
return -1;
}

File diff suppressed because it is too large Load Diff

145
drivers/net/chelsio/tp.c Normal file
View File

@ -0,0 +1,145 @@
/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */
#include "common.h"
#include "regs.h"
#include "tp.h"
struct petp {
adapter_t *adapter;
};
/* Pause deadlock avoidance parameters */
#define DROP_MSEC 16
#define DROP_PKTS_CNT 1
static void tp_init(adapter_t * ap, const struct tp_params *p,
unsigned int tp_clk)
{
if (t1_is_asic(ap)) {
u32 val;
val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
if (!p->pm_size)
val |= F_OFFLOAD_DISABLE;
else
val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
F_TP_IN_ESPI_CHECK_TCP_CSUM;
writel(val, ap->regs + A_TP_IN_CONFIG);
writel(F_TP_OUT_CSPI_CPL |
F_TP_OUT_ESPI_ETHERNET |
F_TP_OUT_ESPI_GENERATE_IP_CSUM |
F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
ap->regs + A_TP_OUT_CONFIG);
writel(V_IP_TTL(64) |
F_PATH_MTU /* IP DF bit */ |
V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
V_SYN_COOKIE_PARAMETER(29),
ap->regs + A_TP_GLOBAL_CONFIG);
/*
* Enable pause frame deadlock prevention.
*/
if (is_T2(ap) && ap->params.nports > 1) {
u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
V_DROP_TICKS_CNT(drop_ticks) |
V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
ap->regs + A_TP_TX_DROP_CONFIG);
}
}
}
void t1_tp_destroy(struct petp *tp)
{
kfree(tp);
}
struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
{
struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
if (!tp)
return NULL;
tp->adapter = adapter;
return tp;
}
void t1_tp_intr_enable(struct petp *tp)
{
u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
{
/* We don't use any TP interrupts */
writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
writel(tp_intr | F_PL_INTR_TP,
tp->adapter->regs + A_PL_ENABLE);
}
}
void t1_tp_intr_disable(struct petp *tp)
{
u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
{
writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
writel(tp_intr & ~F_PL_INTR_TP,
tp->adapter->regs + A_PL_ENABLE);
}
}
void t1_tp_intr_clear(struct petp *tp)
{
writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE);
writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE);
}
int t1_tp_intr_handler(struct petp *tp)
{
u32 cause;
cause = readl(tp->adapter->regs + A_TP_INT_CAUSE);
writel(cause, tp->adapter->regs + A_TP_INT_CAUSE);
return 0;
}
static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable)
{
u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG);
if (enable)
val |= csum_bit;
else
val &= ~csum_bit;
writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG);
}
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
{
set_csum_offload(tp, F_IP_CSUM, enable);
}
void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable)
{
set_csum_offload(tp, F_UDP_CSUM, enable);
}
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
{
set_csum_offload(tp, F_TCP_CSUM, enable);
}
/*
* Initialize TP state. tp_params contains initial settings for some TP
* parameters, particularly the one-time PM and CM settings.
*/
int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
{
adapter_t *adapter = tp->adapter;
tp_init(adapter, p, tp_clk);
writel(F_TP_RESET, adapter->regs + A_TP_RESET);
return 0;
}

73
drivers/net/chelsio/tp.h Normal file
View File

@ -0,0 +1,73 @@
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */
#ifndef CHELSIO_TP_H
#define CHELSIO_TP_H
#include "common.h"
#define TP_MAX_RX_COALESCING_SIZE 16224U
struct tp_mib_statistics {
/* IP */
u32 ipInReceive_hi;
u32 ipInReceive_lo;
u32 ipInHdrErrors_hi;
u32 ipInHdrErrors_lo;
u32 ipInAddrErrors_hi;
u32 ipInAddrErrors_lo;
u32 ipInUnknownProtos_hi;
u32 ipInUnknownProtos_lo;
u32 ipInDiscards_hi;
u32 ipInDiscards_lo;
u32 ipInDelivers_hi;
u32 ipInDelivers_lo;
u32 ipOutRequests_hi;
u32 ipOutRequests_lo;
u32 ipOutDiscards_hi;
u32 ipOutDiscards_lo;
u32 ipOutNoRoutes_hi;
u32 ipOutNoRoutes_lo;
u32 ipReasmTimeout;
u32 ipReasmReqds;
u32 ipReasmOKs;
u32 ipReasmFails;
u32 reserved[8];
/* TCP */
u32 tcpActiveOpens;
u32 tcpPassiveOpens;
u32 tcpAttemptFails;
u32 tcpEstabResets;
u32 tcpOutRsts;
u32 tcpCurrEstab;
u32 tcpInSegs_hi;
u32 tcpInSegs_lo;
u32 tcpOutSegs_hi;
u32 tcpOutSegs_lo;
u32 tcpRetransSeg_hi;
u32 tcpRetransSeg_lo;
u32 tcpInErrs_hi;
u32 tcpInErrs_lo;
u32 tcpRtoMin;
u32 tcpRtoMax;
};
struct petp;
struct tp_params;
struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
void t1_tp_destroy(struct petp *tp);
void t1_tp_intr_disable(struct petp *tp);
void t1_tp_intr_enable(struct petp *tp);
void t1_tp_intr_clear(struct petp *tp);
int t1_tp_intr_handler(struct petp *tp);
void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
#endif

View File

@ -0,0 +1,286 @@
/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */
#ifndef _VSC7321_REG_H_
#define _VSC7321_REG_H_
/* Register definitions for Vitesse VSC7321 (Meigs II) MAC
*
* Straight off the data sheet, VMDS-10038 Rev 2.0 and
* PD0011-01-14-Meigs-II 2002-12-12
*/
/* Just 'cause it's in here doesn't mean it's used. */
#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1))
/* System and CPU comm's registers */
#define REG_CHIP_ID CRA(0x7,0xf,0x00) /* Chip ID */
#define REG_BLADE_ID CRA(0x7,0xf,0x01) /* Blade ID */
#define REG_SW_RESET CRA(0x7,0xf,0x02) /* Global Soft Reset */
#define REG_MEM_BIST CRA(0x7,0xf,0x04) /* mem */
#define REG_IFACE_MODE CRA(0x7,0xf,0x07) /* Interface mode */
#define REG_MSCH CRA(0x7,0x2,0x06) /* CRC error count */
#define REG_CRC_CNT CRA(0x7,0x2,0x0a) /* CRC error count */
#define REG_CRC_CFG CRA(0x7,0x2,0x0b) /* CRC config */
#define REG_SI_TRANSFER_SEL CRA(0x7,0xf,0x18) /* SI Transfer Select */
#define REG_PLL_CLK_SPEED CRA(0x7,0xf,0x19) /* Clock Speed Selection */
#define REG_SYS_CLK_SELECT CRA(0x7,0xf,0x1c) /* System Clock Select */
#define REG_GPIO_CTRL CRA(0x7,0xf,0x1d) /* GPIO Control */
#define REG_GPIO_OUT CRA(0x7,0xf,0x1e) /* GPIO Out */
#define REG_GPIO_IN CRA(0x7,0xf,0x1f) /* GPIO In */
#define REG_CPU_TRANSFER_SEL CRA(0x7,0xf,0x20) /* CPU Transfer Select */
#define REG_LOCAL_DATA CRA(0x7,0xf,0xfe) /* Local CPU Data Register */
#define REG_LOCAL_STATUS CRA(0x7,0xf,0xff) /* Local CPU Status Register */
/* Aggregator registers */
#define REG_AGGR_SETUP CRA(0x7,0x1,0x00) /* Aggregator Setup */
#define REG_PMAP_TABLE CRA(0x7,0x1,0x01) /* Port map table */
#define REG_MPLS_BIT0 CRA(0x7,0x1,0x08) /* MPLS bit0 position */
#define REG_MPLS_BIT1 CRA(0x7,0x1,0x09) /* MPLS bit1 position */
#define REG_MPLS_BIT2 CRA(0x7,0x1,0x0a) /* MPLS bit2 position */
#define REG_MPLS_BIT3 CRA(0x7,0x1,0x0b) /* MPLS bit3 position */
#define REG_MPLS_BITMASK CRA(0x7,0x1,0x0c) /* MPLS bit mask */
#define REG_PRE_BIT0POS CRA(0x7,0x1,0x10) /* Preamble bit0 position */
#define REG_PRE_BIT1POS CRA(0x7,0x1,0x11) /* Preamble bit1 position */
#define REG_PRE_BIT2POS CRA(0x7,0x1,0x12) /* Preamble bit2 position */
#define REG_PRE_BIT3POS CRA(0x7,0x1,0x13) /* Preamble bit3 position */
#define REG_PRE_ERR_CNT CRA(0x7,0x1,0x14) /* Preamble parity error count */
/* BIST registers */
/*#define REG_RAM_BIST_CMD CRA(0x7,0x2,0x00)*/ /* RAM BIST Command Register */
/*#define REG_RAM_BIST_RESULT CRA(0x7,0x2,0x01)*/ /* RAM BIST Read Status/Result */
#define REG_RAM_BIST_CMD CRA(0x7,0x1,0x00) /* RAM BIST Command Register */
#define REG_RAM_BIST_RESULT CRA(0x7,0x1,0x01) /* RAM BIST Read Status/Result */
#define BIST_PORT_SELECT 0x00 /* BIST port select */
#define BIST_COMMAND 0x01 /* BIST enable/disable */
#define BIST_STATUS 0x02 /* BIST operation status */
#define BIST_ERR_CNT_LSB 0x03 /* BIST error count lo 8b */
#define BIST_ERR_CNT_MSB 0x04 /* BIST error count hi 8b */
#define BIST_ERR_SEL_LSB 0x05 /* BIST error select lo 8b */
#define BIST_ERR_SEL_MSB 0x06 /* BIST error select hi 8b */
#define BIST_ERROR_STATE 0x07 /* BIST engine internal state */
#define BIST_ERR_ADR0 0x08 /* BIST error address lo 8b */
#define BIST_ERR_ADR1 0x09 /* BIST error address lomid 8b */
#define BIST_ERR_ADR2 0x0a /* BIST error address himid 8b */
#define BIST_ERR_ADR3 0x0b /* BIST error address hi 8b */
/* FIFO registers
* ie = 0 for ingress, 1 for egress
* fn = FIFO number, 0-9
*/
#define REG_TEST(ie,fn) CRA(0x2,ie&1,0x00+fn) /* Mode & Test Register */
#define REG_TOP_BOTTOM(ie,fn) CRA(0x2,ie&1,0x10+fn) /* FIFO Buffer Top & Bottom */
#define REG_TAIL(ie,fn) CRA(0x2,ie&1,0x20+fn) /* FIFO Write Pointer */
#define REG_HEAD(ie,fn) CRA(0x2,ie&1,0x30+fn) /* FIFO Read Pointer */
#define REG_HIGH_LOW_WM(ie,fn) CRA(0x2,ie&1,0x40+fn) /* Flow Control Water Marks */
#define REG_CT_THRHLD(ie,fn) CRA(0x2,ie&1,0x50+fn) /* Cut Through Threshold */
#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn) /* Drop & CRC Error Counter */
#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn) /* Input Side Debug Counter */
#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn) /* Input Side Debug Counter */
#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn) /* Input Side Debug Counter */
/* Traffic shaper buckets
* ie = 0 for ingress, 1 for egress
* bn = bucket number 0-10 (yes, 11 buckets)
*/
/* OK, this one's kinda ugly. Some hardware designers are perverse. */
#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4))
#define REG_TRAFFIC_SHAPER_CONTROL(ie) CRA(0x2,ie&1,0x3b)
#define REG_SRAM_ADR(ie) CRA(0x2,ie&1,0x0e) /* FIFO SRAM address */
#define REG_SRAM_WR_STRB(ie) CRA(0x2,ie&1,0x1e) /* FIFO SRAM write strobe */
#define REG_SRAM_RD_STRB(ie) CRA(0x2,ie&1,0x2e) /* FIFO SRAM read strobe */
#define REG_SRAM_DATA_0(ie) CRA(0x2,ie&1,0x3e) /* FIFO SRAM data lo 8b */
#define REG_SRAM_DATA_1(ie) CRA(0x2,ie&1,0x4e) /* FIFO SRAM data lomid 8b */
#define REG_SRAM_DATA_2(ie) CRA(0x2,ie&1,0x5e) /* FIFO SRAM data himid 8b */
#define REG_SRAM_DATA_3(ie) CRA(0x2,ie&1,0x6e) /* FIFO SRAM data hi 8b */
#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e) /* FIFO SRAM tag */
/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */
#define REG_CONTROL(ie) CRA(0x2,ie&1,0x0f) /* FIFO control */
#define REG_ING_CONTROL CRA(0x2,0x0,0x0f) /* Ingress control (alias) */
#define REG_EGR_CONTROL CRA(0x2,0x1,0x0f) /* Egress control (alias) */
#define REG_AGE_TIMER(ie) CRA(0x2,ie&1,0x1f) /* Aging timer */
#define REG_AGE_INC(ie) CRA(0x2,ie&1,0x2f) /* Aging increment */
#define DEBUG_OUT(ie) CRA(0x2,ie&1,0x3f) /* Output debug counter control */
#define DEBUG_CNT(ie) CRA(0x2,ie&1,0x4f) /* Output debug counter */
/* SPI4 interface */
#define REG_SPI4_MISC CRA(0x5,0x0,0x00) /* Misc Register */
#define REG_SPI4_STATUS CRA(0x5,0x0,0x01) /* CML Status */
#define REG_SPI4_ING_SETUP0 CRA(0x5,0x0,0x02) /* Ingress Status Channel Setup */
#define REG_SPI4_ING_SETUP1 CRA(0x5,0x0,0x03) /* Ingress Data Training Setup */
#define REG_SPI4_ING_SETUP2 CRA(0x5,0x0,0x04) /* Ingress Data Burst Size Setup */
#define REG_SPI4_EGR_SETUP0 CRA(0x5,0x0,0x05) /* Egress Status Channel Setup */
#define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */
#define REG_SPI4_DBG_SETUP CRA(0x5,0x0,0x1A) /* Debug counters setup */
#define REG_SPI4_TEST CRA(0x5,0x0,0x20) /* Test Setup Register */
#define REG_TPGEN_UP0 CRA(0x5,0x0,0x21) /* Test Pattern generator user pattern 0 */
#define REG_TPGEN_UP1 CRA(0x5,0x0,0x22) /* Test Pattern generator user pattern 1 */
#define REG_TPCHK_UP0 CRA(0x5,0x0,0x23) /* Test Pattern checker user pattern 0 */
#define REG_TPCHK_UP1 CRA(0x5,0x0,0x24) /* Test Pattern checker user pattern 1 */
#define REG_TPSAM_P0 CRA(0x5,0x0,0x25) /* Sampled pattern 0 */
#define REG_TPSAM_P1 CRA(0x5,0x0,0x26) /* Sampled pattern 1 */
#define REG_TPERR_CNT CRA(0x5,0x0,0x27) /* Pattern checker error counter */
#define REG_SPI4_STICKY CRA(0x5,0x0,0x30) /* Sticky bits register */
#define REG_SPI4_DBG_INH CRA(0x5,0x0,0x31) /* Core egress & ingress inhibit */
#define REG_SPI4_DBG_STATUS CRA(0x5,0x0,0x32) /* Sampled ingress status */
#define REG_SPI4_DBG_GRANT CRA(0x5,0x0,0x33) /* Ingress cranted credit value */
#define REG_SPI4_DESKEW CRA(0x5,0x0,0x43) /* Ingress cranted credit value */
/* 10GbE MAC Block Registers */
/* Note that those registers that are exactly the same for 10GbE as for
* tri-speed are only defined with the version that needs a port number.
* Pass 0xa in those cases.
*
* Also note that despite the presence of a MAC address register, this part
* does no ingress MAC address filtering. That register is used only for
* pause frame detection and generation.
*/
/* 10GbE specific, and different from tri-speed */
#define REG_MISC_10G CRA(0x1,0xa,0x00) /* Misc 10GbE setup */
#define REG_PAUSE_10G CRA(0x1,0xa,0x01) /* Pause register */
#define REG_NORMALIZER_10G CRA(0x1,0xa,0x05) /* 10G normalizer */
#define REG_STICKY_RX CRA(0x1,0xa,0x06) /* RX debug register */
#define REG_DENORM_10G CRA(0x1,0xa,0x07) /* Denormalizer */
#define REG_STICKY_TX CRA(0x1,0xa,0x08) /* TX sticky bits */
#define REG_MAX_RXHIGH CRA(0x1,0xa,0x0a) /* XGMII lane 0-3 debug */
#define REG_MAX_RXLOW CRA(0x1,0xa,0x0b) /* XGMII lane 4-7 debug */
#define REG_MAC_TX_STICKY CRA(0x1,0xa,0x0c) /* MAC Tx state sticky debug */
#define REG_MAC_TX_RUNNING CRA(0x1,0xa,0x0d) /* MAC Tx state running debug */
#define REG_TX_ABORT_AGE CRA(0x1,0xa,0x14) /* Aged Tx frames discarded */
#define REG_TX_ABORT_SHORT CRA(0x1,0xa,0x15) /* Short Tx frames discarded */
#define REG_TX_ABORT_TAXI CRA(0x1,0xa,0x16) /* Taxi error frames discarded */
#define REG_TX_ABORT_UNDERRUN CRA(0x1,0xa,0x17) /* Tx Underrun abort counter */
#define REG_TX_DENORM_DISCARD CRA(0x1,0xa,0x18) /* Tx denormalizer discards */
#define REG_XAUI_STAT_A CRA(0x1,0xa,0x20) /* XAUI status A */
#define REG_XAUI_STAT_B CRA(0x1,0xa,0x21) /* XAUI status B */
#define REG_XAUI_STAT_C CRA(0x1,0xa,0x22) /* XAUI status C */
#define REG_XAUI_CONF_A CRA(0x1,0xa,0x23) /* XAUI configuration A */
#define REG_XAUI_CONF_B CRA(0x1,0xa,0x24) /* XAUI configuration B */
#define REG_XAUI_CODE_GRP_CNT CRA(0x1,0xa,0x25) /* XAUI code group error count */
#define REG_XAUI_CONF_TEST_A CRA(0x1,0xa,0x26) /* XAUI test register A */
#define REG_PDERRCNT CRA(0x1,0xa,0x27) /* XAUI test register B */
/* pn = port number 0-9 for tri-speed, 10 for 10GbE */
/* Both tri-speed and 10GbE */
#define REG_MAX_LEN(pn) CRA(0x1,pn,0x02) /* Max length */
#define REG_MAC_HIGH_ADDR(pn) CRA(0x1,pn,0x03) /* Upper 24 bits of MAC addr */
#define REG_MAC_LOW_ADDR(pn) CRA(0x1,pn,0x04) /* Lower 24 bits of MAC addr */
/* tri-speed only
* pn = port number, 0-9
*/
#define REG_MODE_CFG(pn) CRA(0x1,pn,0x00) /* Mode configuration */
#define REG_PAUSE_CFG(pn) CRA(0x1,pn,0x01) /* Pause configuration */
#define REG_NORMALIZER(pn) CRA(0x1,pn,0x05) /* Normalizer */
#define REG_TBI_STATUS(pn) CRA(0x1,pn,0x06) /* TBI status */
#define REG_PCS_STATUS_DBG(pn) CRA(0x1,pn,0x07) /* PCS status debug */
#define REG_PCS_CTRL(pn) CRA(0x1,pn,0x08) /* PCS control */
#define REG_TBI_CONFIG(pn) CRA(0x1,pn,0x09) /* TBI configuration */
#define REG_STICK_BIT(pn) CRA(0x1,pn,0x0a) /* Sticky bits */
#define REG_DEV_SETUP(pn) CRA(0x1,pn,0x0b) /* MAC clock/reset setup */
#define REG_DROP_CNT(pn) CRA(0x1,pn,0x0c) /* Drop counter */
#define REG_PORT_POS(pn) CRA(0x1,pn,0x0d) /* Preamble port position */
#define REG_PORT_FAIL(pn) CRA(0x1,pn,0x0e) /* Preamble port position */
#define REG_SERDES_CONF(pn) CRA(0x1,pn,0x0f) /* SerDes configuration */
#define REG_SERDES_TEST(pn) CRA(0x1,pn,0x10) /* SerDes test */
#define REG_SERDES_STAT(pn) CRA(0x1,pn,0x11) /* SerDes status */
#define REG_SERDES_COM_CNT(pn) CRA(0x1,pn,0x12) /* SerDes comma counter */
#define REG_DENORM(pn) CRA(0x1,pn,0x15) /* Frame denormalization */
#define REG_DBG(pn) CRA(0x1,pn,0x16) /* Device 1G debug */
#define REG_TX_IFG(pn) CRA(0x1,pn,0x18) /* Tx IFG config */
#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */
/* Statistics */
/* pn = port number, 0-a, a = 10GbE */
#define REG_RX_IN_BYTES(pn) CRA(0x4,pn,0x00) /* # Rx in octets */
#define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01) /* Frames w/ symbol errors */
#define REG_RX_PAUSE(pn) CRA(0x4,pn,0x02) /* # pause frames received */
#define REG_RX_UNSUP_OPCODE(pn) CRA(0x4,pn,0x03) /* # control frames with unsupported opcode */
#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,0x04) /* # octets in good frames */
#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,0x05) /* # octets in bad frames */
#define REG_RX_UNICAST(pn) CRA(0x4,pn,0x06) /* # good unicast frames */
#define REG_RX_MULTICAST(pn) CRA(0x4,pn,0x07) /* # good multicast frames */
#define REG_RX_BROADCAST(pn) CRA(0x4,pn,0x08) /* # good broadcast frames */
#define REG_CRC(pn) CRA(0x4,pn,0x09) /* # frames w/ bad CRC only */
#define REG_RX_ALIGNMENT(pn) CRA(0x4,pn,0x0a) /* # frames w/ alignment err */
#define REG_RX_UNDERSIZE(pn) CRA(0x4,pn,0x0b) /* # frames undersize */
#define REG_RX_FRAGMENTS(pn) CRA(0x4,pn,0x0c) /* # frames undersize w/ crc err */
#define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d) /* # frames with length error */
#define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e) /* # frames with illegal length field */
#define REG_RX_OVERSIZE(pn) CRA(0x4,pn,0x0f) /* # frames oversize */
#define REG_RX_JABBERS(pn) CRA(0x4,pn,0x10) /* # frames oversize w/ crc err */
#define REG_RX_SIZE_64(pn) CRA(0x4,pn,0x11) /* # frames 64 octets long */
#define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12) /* # frames 65-127 octets */
#define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13) /* # frames 128-255 */
#define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14) /* # frames 256-511 */
#define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15) /* # frames 512-1023 */
#define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16) /* # frames 1024-1518 */
#define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17) /* # frames 1519-max */
#define REG_TX_OUT_BYTES(pn) CRA(0x4,pn,0x18) /* # octets tx */
#define REG_TX_PAUSE(pn) CRA(0x4,pn,0x19) /* # pause frames sent */
#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,0x1a) /* # octets tx OK */
#define REG_TX_UNICAST(pn) CRA(0x4,pn,0x1b) /* # frames unicast */
#define REG_TX_MULTICAST(pn) CRA(0x4,pn,0x1c) /* # frames multicast */
#define REG_TX_BROADCAST(pn) CRA(0x4,pn,0x1d) /* # frames broadcast */
#define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e) /* # frames tx after multiple collisions */
#define REG_TX_LATE_COLL(pn) CRA(0x4,pn,0x1f) /* # late collisions detected */
#define REG_TX_XCOLL(pn) CRA(0x4,pn,0x20) /* # frames lost, excessive collisions */
#define REG_TX_DEFER(pn) CRA(0x4,pn,0x21) /* # frames deferred on first tx attempt */
#define REG_TX_XDEFER(pn) CRA(0x4,pn,0x22) /* # frames excessively deferred */
#define REG_TX_CSENSE(pn) CRA(0x4,pn,0x23) /* carrier sense errors at frame end */
#define REG_TX_SIZE_64(pn) CRA(0x4,pn,0x24) /* # frames 64 octets long */
#define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25) /* # frames 65-127 octets */
#define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26) /* # frames 128-255 */
#define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27) /* # frames 256-511 */
#define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28) /* # frames 512-1023 */
#define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29) /* # frames 1024-1518 */
#define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a) /* # frames 1519-max */
#define REG_TX_SINGLE_COLL(pn) CRA(0x4,pn,0x2b) /* # frames tx after single collision */
#define REG_TX_BACKOFF2(pn) CRA(0x4,pn,0x2c) /* # frames tx ok after 2 backoffs/collisions */
#define REG_TX_BACKOFF3(pn) CRA(0x4,pn,0x2d) /* after 3 backoffs/collisions */
#define REG_TX_BACKOFF4(pn) CRA(0x4,pn,0x2e) /* after 4 */
#define REG_TX_BACKOFF5(pn) CRA(0x4,pn,0x2f) /* after 5 */
#define REG_TX_BACKOFF6(pn) CRA(0x4,pn,0x30) /* after 6 */
#define REG_TX_BACKOFF7(pn) CRA(0x4,pn,0x31) /* after 7 */
#define REG_TX_BACKOFF8(pn) CRA(0x4,pn,0x32) /* after 8 */
#define REG_TX_BACKOFF9(pn) CRA(0x4,pn,0x33) /* after 9 */
#define REG_TX_BACKOFF10(pn) CRA(0x4,pn,0x34) /* after 10 */
#define REG_TX_BACKOFF11(pn) CRA(0x4,pn,0x35) /* after 11 */
#define REG_TX_BACKOFF12(pn) CRA(0x4,pn,0x36) /* after 12 */
#define REG_TX_BACKOFF13(pn) CRA(0x4,pn,0x37) /* after 13 */
#define REG_TX_BACKOFF14(pn) CRA(0x4,pn,0x38) /* after 14 */
#define REG_TX_BACKOFF15(pn) CRA(0x4,pn,0x39) /* after 15 */
#define REG_TX_UNDERRUN(pn) CRA(0x4,pn,0x3a) /* # frames dropped from underrun */
#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
#define REG_RX_IPG_SHRINK(pn) CRA(0x4,pn,0x3c) /* # of IPG shrinks detected */
#define REG_STAT_STICKY1G(pn) CRA(0x4,pn,0x3e) /* tri-speed sticky bits */
#define REG_STAT_STICKY10G CRA(0x4,0xa,0x3e) /* 10GbE sticky bits */
#define REG_STAT_INIT(pn) CRA(0x4,pn,0x3f) /* Clear all statistics */
/* MII-Management Block registers */
/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If
* we hooked up to the one with separate directions, the middle 0x0 needs to
* change to 0x1. And the current errata states that MII-M 1 doesn't work.
*/
#define REG_MIIM_STATUS CRA(0x3,0x0,0x00) /* MII-M Status */
#define REG_MIIM_CMD CRA(0x3,0x0,0x01) /* MII-M Command */
#define REG_MIIM_DATA CRA(0x3,0x0,0x02) /* MII-M Data */
#define REG_MIIM_PRESCALE CRA(0x3,0x0,0x03) /* MII-M MDC Prescale */
#define REG_ING_FFILT_UM_EN CRA(0x2, 0, 0xd)
#define REG_ING_FFILT_BE_EN CRA(0x2, 0, 0x1d)
#define REG_ING_FFILT_VAL0 CRA(0x2, 0, 0x2d)
#define REG_ING_FFILT_VAL1 CRA(0x2, 0, 0x3d)
#define REG_ING_FFILT_MASK0 CRA(0x2, 0, 0x4d)
#define REG_ING_FFILT_MASK1 CRA(0x2, 0, 0x5d)
#define REG_ING_FFILT_MASK2 CRA(0x2, 0, 0x6d)
#define REG_ING_FFILT_ETYPE CRA(0x2, 0, 0x7d)
/* Whew. */
#endif