2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-02-08 23:23:26 +00:00
|
|
|
* Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
|
2007-09-21 11:52:10 +00:00
|
|
|
* Copyright (c) 2006, 2007 Maciej W. Rozycki
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This driver is designed for the Broadcom SiByte SOC built-in
|
|
|
|
* Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
|
2007-09-21 11:52:10 +00:00
|
|
|
*
|
|
|
|
* Updated to the driver model and the PHY abstraction layer
|
|
|
|
* by Maciej W. Rozycki.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
|
|
|
|
#include <linux/bug.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/bitops.h>
|
2007-09-21 11:52:10 +00:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/ethtool.h>
|
|
|
|
#include <linux/mii.h>
|
|
|
|
#include <linux/phy.h>
|
|
|
|
#include <linux/platform_device.h>
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/cache.h>
|
2007-09-21 11:52:10 +00:00
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/processor.h> /* Processor type for cache alignment. */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* This is only here until the firmware is ready. In that case,
|
|
|
|
the firmware leaves the ethernet address in the register for us. */
|
|
|
|
#ifdef CONFIG_SIBYTE_STANDALONE
|
|
|
|
#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
|
|
|
|
#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
|
|
|
|
#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
|
2006-02-08 23:23:26 +00:00
|
|
|
#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/* These identify the driver base version and may not be removed. */
|
|
|
|
#if 0
|
2007-09-21 11:52:10 +00:00
|
|
|
static char version1[] __initdata =
|
2005-04-16 22:20:36 +00:00
|
|
|
"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/* Operational parameters that usually are not changed. */
|
|
|
|
|
|
|
|
#define CONFIG_SBMAC_COALESCE
|
|
|
|
|
|
|
|
/* Time in jiffies before concluding the transmitter is hung. */
|
|
|
|
#define TX_TIMEOUT (2*HZ)
|
|
|
|
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
|
|
|
|
MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
|
|
|
|
|
|
|
|
/* A few user-configurable values which may be modified when a driver
|
|
|
|
module is loaded. */
|
|
|
|
|
|
|
|
/* 1 normal messages, 0 quiet .. 7 verbose. */
|
|
|
|
static int debug = 1;
|
|
|
|
module_param(debug, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(debug, "Debug messages");
|
|
|
|
|
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
2007-04-26 07:23:22 +00:00
|
|
|
static int int_pktcnt_tx = 255;
|
|
|
|
module_param(int_pktcnt_tx, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
static int int_timeout_tx = 255;
|
|
|
|
module_param(int_timeout_tx, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
|
|
|
|
|
|
|
|
static int int_pktcnt_rx = 64;
|
|
|
|
module_param(int_pktcnt_rx, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
|
|
|
|
|
|
|
|
static int int_timeout_rx = 64;
|
|
|
|
module_param(int_timeout_rx, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
#include <asm/sibyte/board.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/sibyte/sb1250.h>
|
2006-02-08 23:23:26 +00:00
|
|
|
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
|
|
|
|
#include <asm/sibyte/bcm1480_regs.h>
|
|
|
|
#include <asm/sibyte/bcm1480_int.h>
|
2007-04-26 07:23:22 +00:00
|
|
|
#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
|
2006-02-08 23:23:26 +00:00
|
|
|
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/sibyte/sb1250_regs.h>
|
|
|
|
#include <asm/sibyte/sb1250_int.h>
|
2006-02-08 23:23:26 +00:00
|
|
|
#else
|
|
|
|
#error invalid SiByte MAC configuation
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/sibyte/sb1250_scd.h>
|
2006-02-08 23:23:26 +00:00
|
|
|
#include <asm/sibyte/sb1250_mac.h>
|
|
|
|
#include <asm/sibyte/sb1250_dma.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-02-08 23:23:26 +00:00
|
|
|
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
|
|
|
|
#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
|
|
|
|
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
|
|
|
|
#define UNIT_INT(n) (K_INT_MAC_0 + (n))
|
|
|
|
#else
|
|
|
|
#error invalid SiByte MAC configuation
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
#ifdef K_INT_PHY
|
|
|
|
#define SBMAC_PHY_INT K_INT_PHY
|
|
|
|
#else
|
|
|
|
#define SBMAC_PHY_INT PHY_POLL
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**********************************************************************
|
|
|
|
* Simple types
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
enum sbmac_speed {
|
|
|
|
sbmac_speed_none = 0,
|
|
|
|
sbmac_speed_10 = SPEED_10,
|
|
|
|
sbmac_speed_100 = SPEED_100,
|
|
|
|
sbmac_speed_1000 = SPEED_1000,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
enum sbmac_duplex {
|
|
|
|
sbmac_duplex_none = -1,
|
|
|
|
sbmac_duplex_half = DUPLEX_HALF,
|
|
|
|
sbmac_duplex_full = DUPLEX_FULL,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
enum sbmac_fc {
|
|
|
|
sbmac_fc_none,
|
|
|
|
sbmac_fc_disabled,
|
|
|
|
sbmac_fc_frame,
|
|
|
|
sbmac_fc_collision,
|
|
|
|
sbmac_fc_carrier,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
enum sbmac_state {
|
|
|
|
sbmac_state_uninit,
|
|
|
|
sbmac_state_off,
|
|
|
|
sbmac_state_on,
|
|
|
|
sbmac_state_broken,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* Macros
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
|
|
|
|
#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
|
|
|
|
(d)->sbdma_dscrtable : (d)->f+1)
|
|
|
|
|
|
|
|
|
|
|
|
#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
|
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
#define SBMAC_MAX_TXDESCR 256
|
|
|
|
#define SBMAC_MAX_RXDESCR 256
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
#define ETHER_ADDR_LEN 6
|
2005-10-19 14:40:02 +00:00
|
|
|
#define ENET_PACKET_SIZE 1518
|
|
|
|
/*#define ENET_PACKET_SIZE 9216 */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* DMA Descriptor structure
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
struct sbdmadscr {
|
2005-04-16 22:20:36 +00:00
|
|
|
uint64_t dscr_a;
|
|
|
|
uint64_t dscr_b;
|
2007-09-20 18:14:01 +00:00
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* DMA Controller structure
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
struct sbmacdma {
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* This stuff is used to identify the channel and the registers
|
|
|
|
* associated with it.
|
|
|
|
*/
|
2007-09-20 18:14:01 +00:00
|
|
|
struct sbmac_softc *sbdma_eth; /* back pointer to associated
|
|
|
|
MAC */
|
|
|
|
int sbdma_channel; /* channel number */
|
|
|
|
int sbdma_txdir; /* direction (1=transmit) */
|
|
|
|
int sbdma_maxdescr; /* total # of descriptors
|
|
|
|
in ring */
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
2007-09-20 18:14:01 +00:00
|
|
|
int sbdma_int_pktcnt;
|
|
|
|
/* # descriptors rx/tx
|
|
|
|
before interrupt */
|
|
|
|
int sbdma_int_timeout;
|
|
|
|
/* # usec rx/tx interrupt */
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2007-09-20 18:14:01 +00:00
|
|
|
void __iomem *sbdma_config0; /* DMA config register 0 */
|
|
|
|
void __iomem *sbdma_config1; /* DMA config register 1 */
|
|
|
|
void __iomem *sbdma_dscrbase;
|
|
|
|
/* descriptor base address */
|
|
|
|
void __iomem *sbdma_dscrcnt; /* descriptor count register */
|
|
|
|
void __iomem *sbdma_curdscr; /* current descriptor
|
|
|
|
address */
|
|
|
|
void __iomem *sbdma_oodpktlost;
|
|
|
|
/* pkt drop (rx only) */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This stuff is for maintenance of the ring
|
|
|
|
*/
|
2007-09-20 18:14:01 +00:00
|
|
|
void *sbdma_dscrtable_unaligned;
|
|
|
|
struct sbdmadscr *sbdma_dscrtable;
|
|
|
|
/* base of descriptor table */
|
|
|
|
struct sbdmadscr *sbdma_dscrtable_end;
|
|
|
|
/* end of descriptor table */
|
|
|
|
struct sk_buff **sbdma_ctxtable;
|
|
|
|
/* context table, one
|
|
|
|
per descr */
|
|
|
|
dma_addr_t sbdma_dscrtable_phys;
|
|
|
|
/* and also the phys addr */
|
|
|
|
struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */
|
|
|
|
struct sbdmadscr *sbdma_remptr; /* next dscr for sw
|
|
|
|
to remove */
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* Ethernet softc structure
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
struct sbmac_softc {
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Linux-specific things
|
|
|
|
*/
|
2007-09-20 18:14:01 +00:00
|
|
|
struct net_device *sbm_dev; /* pointer to linux device */
|
|
|
|
struct napi_struct napi;
|
2007-09-21 11:52:10 +00:00
|
|
|
struct phy_device *phy_dev; /* the associated PHY device */
|
|
|
|
struct mii_bus mii_bus; /* the MII bus */
|
|
|
|
int phy_irq[PHY_MAX_ADDR];
|
2007-09-20 18:14:01 +00:00
|
|
|
spinlock_t sbm_lock; /* spin lock */
|
|
|
|
int sbm_devflags; /* current device flags */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Controller-specific things
|
|
|
|
*/
|
2007-09-20 18:14:01 +00:00
|
|
|
void __iomem *sbm_base; /* MAC's base address */
|
|
|
|
enum sbmac_state sbm_state; /* current state */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
void __iomem *sbm_macenable; /* MAC Enable Register */
|
|
|
|
void __iomem *sbm_maccfg; /* MAC Config Register */
|
|
|
|
void __iomem *sbm_fifocfg; /* FIFO Config Register */
|
|
|
|
void __iomem *sbm_framecfg; /* Frame Config Register */
|
|
|
|
void __iomem *sbm_rxfilter; /* Receive Filter Register */
|
|
|
|
void __iomem *sbm_isr; /* Interrupt Status Register */
|
|
|
|
void __iomem *sbm_imr; /* Interrupt Mask Register */
|
|
|
|
void __iomem *sbm_mdio; /* MDIO Register */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
enum sbmac_speed sbm_speed; /* current speed */
|
|
|
|
enum sbmac_duplex sbm_duplex; /* current duplex */
|
|
|
|
enum sbmac_fc sbm_fc; /* cur. flow control setting */
|
2007-09-21 11:52:10 +00:00
|
|
|
int sbm_pause; /* current pause setting */
|
|
|
|
int sbm_link; /* current link state */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
struct sbmacdma sbm_txdma; /* only channel 0 for now */
|
|
|
|
struct sbmacdma sbm_rxdma;
|
|
|
|
int rx_hw_checksum;
|
|
|
|
int sbe_idx;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* Externs
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* Prototypes
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
|
|
|
|
int txrx, int maxdescr);
|
|
|
|
static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
|
2008-05-18 03:45:09 +00:00
|
|
|
static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
|
|
|
|
struct sk_buff *m);
|
2007-09-20 18:14:01 +00:00
|
|
|
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
|
|
|
|
static void sbdma_emptyring(struct sbmacdma *d);
|
2008-05-18 03:45:09 +00:00
|
|
|
static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
|
2007-09-20 18:14:01 +00:00
|
|
|
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
|
|
|
int work_to_do, int poll);
|
|
|
|
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
|
|
|
int poll);
|
2005-04-16 22:20:36 +00:00
|
|
|
static int sbmac_initctx(struct sbmac_softc *s);
|
|
|
|
static void sbmac_channel_start(struct sbmac_softc *s);
|
|
|
|
static void sbmac_channel_stop(struct sbmac_softc *s);
|
2007-09-20 18:14:01 +00:00
|
|
|
static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *,
|
|
|
|
enum sbmac_state);
|
|
|
|
static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
|
2005-04-16 22:20:36 +00:00
|
|
|
static uint64_t sbmac_addr2reg(unsigned char *ptr);
|
2007-09-20 18:14:01 +00:00
|
|
|
static irqreturn_t sbmac_intr(int irq, void *dev_instance);
|
2005-04-16 22:20:36 +00:00
|
|
|
static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
|
|
|
|
static void sbmac_setmulti(struct sbmac_softc *sc);
|
2007-09-21 11:52:10 +00:00
|
|
|
static int sbmac_init(struct platform_device *pldev, long long base);
|
2007-09-20 18:14:01 +00:00
|
|
|
static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed);
|
|
|
|
static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
|
|
|
|
enum sbmac_fc fc);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static int sbmac_open(struct net_device *dev);
|
|
|
|
static void sbmac_tx_timeout (struct net_device *dev);
|
|
|
|
static void sbmac_set_rx_mode(struct net_device *dev);
|
|
|
|
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
|
|
|
static int sbmac_close(struct net_device *dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
static int sbmac_poll(struct napi_struct *napi, int budget);
|
2007-04-26 07:23:22 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static void sbmac_mii_poll(struct net_device *dev);
|
2005-10-20 11:01:28 +00:00
|
|
|
static int sbmac_mii_probe(struct net_device *dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static void sbmac_mii_sync(void __iomem *sbm_mdio);
|
|
|
|
static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
|
2007-09-20 18:14:01 +00:00
|
|
|
int bitcnt);
|
2007-09-21 11:52:10 +00:00
|
|
|
static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx);
|
|
|
|
static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
|
|
|
|
u16 val);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* Globals
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static char sbmac_string[] = "sb1250-mac";
|
|
|
|
static char sbmac_pretty[] = "SB1250 MAC";
|
|
|
|
|
|
|
|
static char sbmac_mdio_string[] = "sb1250-mac-mdio";
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* MDIO constants
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
#define MII_COMMAND_START 0x01
|
|
|
|
#define MII_COMMAND_READ 0x02
|
|
|
|
#define MII_COMMAND_WRITE 0x01
|
|
|
|
#define MII_COMMAND_ACK 0x02
|
|
|
|
|
|
|
|
#define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */
|
|
|
|
|
|
|
|
#define ENABLE 1
|
|
|
|
#define DISABLE 0
|
|
|
|
|
|
|
|
/**********************************************************************
|
2007-09-21 11:52:10 +00:00
|
|
|
* SBMAC_MII_SYNC(sbm_mdio)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Synchronize with the MII - send a pattern of bits to the MII
|
|
|
|
* that will guarantee that it is ready to accept a command.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2007-09-21 11:52:10 +00:00
|
|
|
* sbm_mdio - address of the MAC's MDIO register
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static void sbmac_mii_sync(void __iomem *sbm_mdio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int cnt;
|
|
|
|
uint64_t bits;
|
|
|
|
int mac_mdio_genc;
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (cnt = 0; cnt < 32; cnt++) {
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
|
|
|
|
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
2007-09-21 11:52:10 +00:00
|
|
|
* SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Send some bits to the MII. The bits to be sent are right-
|
|
|
|
* justified in the 'data' parameter.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2007-09-21 11:52:10 +00:00
|
|
|
* sbm_mdio - address of the MAC's MDIO register
|
|
|
|
* data - data to send
|
|
|
|
* bitcnt - number of bits to send
|
2005-04-16 22:20:36 +00:00
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
|
|
|
|
int bitcnt)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint64_t bits;
|
|
|
|
unsigned int curmask;
|
|
|
|
int mac_mdio_genc;
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
bits = M_MAC_MDIO_DIR_OUTPUT;
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
curmask = 1 << (bitcnt - 1);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < bitcnt; i++) {
|
|
|
|
if (data & curmask)
|
|
|
|
bits |= M_MAC_MDIO_OUT;
|
|
|
|
else bits &= ~M_MAC_MDIO_OUT;
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
|
|
|
|
__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
|
|
|
|
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
|
2005-04-16 22:20:36 +00:00
|
|
|
curmask >>= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
2007-09-21 11:52:10 +00:00
|
|
|
* SBMAC_MII_READ(bus, phyaddr, regidx)
|
2005-04-16 22:20:36 +00:00
|
|
|
* Read a PHY register.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2007-09-21 11:52:10 +00:00
|
|
|
* bus - MDIO bus handle
|
2005-04-16 22:20:36 +00:00
|
|
|
* phyaddr - PHY's address
|
2007-09-21 11:52:10 +00:00
|
|
|
* regnum - index of register to read
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
2007-09-21 11:52:10 +00:00
|
|
|
* value read, or 0xffff if an error occurred.
|
2005-04-16 22:20:36 +00:00
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-09-21 11:52:10 +00:00
|
|
|
struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
|
|
|
|
void __iomem *sbm_mdio = sc->sbm_mdio;
|
2005-04-16 22:20:36 +00:00
|
|
|
int idx;
|
|
|
|
int error;
|
|
|
|
int regval;
|
|
|
|
int mac_mdio_genc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Synchronize ourselves so that the PHY knows the next
|
|
|
|
* thing coming down is a command
|
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_mii_sync(sbm_mdio);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Send the data to the PHY. The sequence is
|
|
|
|
* a "start" command (2 bits)
|
|
|
|
* a "read" command (2 bits)
|
|
|
|
* the PHY addr (5 bits)
|
|
|
|
* the register index (5 bits)
|
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
|
|
|
|
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2);
|
|
|
|
sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
|
|
|
|
sbmac_mii_senddata(sbm_mdio, regidx, 5);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Switch the port around without a clock transition.
|
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Send out a clock pulse to signal we want the status
|
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
|
|
|
|
sbm_mdio);
|
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* If an error occurred, the PHY will signal '1' back
|
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Issue an 'idle' clock pulse, but keep the direction
|
|
|
|
* the same.
|
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
|
|
|
|
sbm_mdio);
|
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
regval = 0;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (idx = 0; idx < 16; idx++) {
|
|
|
|
regval <<= 1;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (error == 0) {
|
2007-09-21 11:52:10 +00:00
|
|
|
if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN)
|
2005-04-16 22:20:36 +00:00
|
|
|
regval |= 1;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
|
|
|
|
sbm_mdio);
|
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Switch back to output */
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (error == 0)
|
|
|
|
return regval;
|
2007-09-21 11:52:10 +00:00
|
|
|
return 0xffff;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
2007-09-21 11:52:10 +00:00
|
|
|
* SBMAC_MII_WRITE(bus, phyaddr, regidx, regval)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Write a value to a PHY register.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2007-09-21 11:52:10 +00:00
|
|
|
* bus - MDIO bus handle
|
2005-04-16 22:20:36 +00:00
|
|
|
* phyaddr - PHY to use
|
2007-09-21 11:52:10 +00:00
|
|
|
* regidx - register within the PHY
|
|
|
|
* regval - data to write to register
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
2007-09-21 11:52:10 +00:00
|
|
|
* 0 for success
|
2005-04-16 22:20:36 +00:00
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
|
|
|
|
u16 regval)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-09-21 11:52:10 +00:00
|
|
|
struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
|
|
|
|
void __iomem *sbm_mdio = sc->sbm_mdio;
|
2005-04-16 22:20:36 +00:00
|
|
|
int mac_mdio_genc;
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_mii_sync(sbm_mdio);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
|
|
|
|
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2);
|
|
|
|
sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
|
|
|
|
sbmac_mii_senddata(sbm_mdio, regidx, 5);
|
|
|
|
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2);
|
|
|
|
sbmac_mii_senddata(sbm_mdio, regval, 16);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
|
|
|
|
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Initialize a DMA channel context. Since there are potentially
|
|
|
|
* eight DMA channels per MAC, it's nice to do this in a standard
|
2005-10-19 14:40:02 +00:00
|
|
|
* way.
|
|
|
|
*
|
|
|
|
* Input parameters:
|
2007-09-20 18:14:01 +00:00
|
|
|
* d - struct sbmacdma (DMA channel context)
|
|
|
|
* s - struct sbmac_softc (pointer to a MAC)
|
2005-04-16 22:20:36 +00:00
|
|
|
* chan - channel number (0..1 right now)
|
|
|
|
* txrx - Identifies DMA_TX or DMA_RX for channel direction
|
|
|
|
* maxdescr - number of descriptors
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
|
|
|
|
int txrx, int maxdescr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-04-26 07:23:22 +00:00
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
|
|
|
int int_pktcnt, int_timeout;
|
|
|
|
#endif
|
|
|
|
|
2005-10-19 14:40:02 +00:00
|
|
|
/*
|
|
|
|
* Save away interesting stuff in the structure
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_eth = s;
|
|
|
|
d->sbdma_channel = chan;
|
|
|
|
d->sbdma_txdir = txrx;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#if 0
|
|
|
|
/* RMON clearing */
|
|
|
|
s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
|
|
|
|
#endif
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR);
|
|
|
|
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-19 14:40:02 +00:00
|
|
|
/*
|
|
|
|
* initialize register pointers
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
d->sbdma_config0 =
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
|
2005-10-19 14:40:02 +00:00
|
|
|
d->sbdma_config1 =
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
|
2005-10-19 14:40:02 +00:00
|
|
|
d->sbdma_dscrbase =
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
|
2005-10-19 14:40:02 +00:00
|
|
|
d->sbdma_dscrcnt =
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
|
2005-10-19 14:40:02 +00:00
|
|
|
d->sbdma_curdscr =
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
|
2007-04-26 07:23:22 +00:00
|
|
|
if (d->sbdma_txdir)
|
|
|
|
d->sbdma_oodpktlost = NULL;
|
|
|
|
else
|
|
|
|
d->sbdma_oodpktlost =
|
|
|
|
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Allocate memory for the ring
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_maxdescr = maxdescr;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1,
|
|
|
|
sizeof(*d->sbdma_dscrtable),
|
|
|
|
GFP_KERNEL);
|
2005-10-10 13:50:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The descriptor table must be aligned to at least 16 bytes or the
|
|
|
|
* MAC will corrupt it.
|
|
|
|
*/
|
2007-09-20 18:14:01 +00:00
|
|
|
d->sbdma_dscrtable = (struct sbdmadscr *)
|
|
|
|
ALIGN((unsigned long)d->sbdma_dscrtable_unaligned,
|
|
|
|
sizeof(*d->sbdma_dscrtable));
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* And context table
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-07-31 21:58:36 +00:00
|
|
|
d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr,
|
2007-09-20 18:14:01 +00:00
|
|
|
sizeof(*d->sbdma_ctxtable), GFP_KERNEL);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
|
|
|
/*
|
|
|
|
* Setup Rx/Tx DMA coalescing defaults
|
|
|
|
*/
|
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
|
2005-04-16 22:20:36 +00:00
|
|
|
if ( int_pktcnt ) {
|
|
|
|
d->sbdma_int_pktcnt = int_pktcnt;
|
|
|
|
} else {
|
|
|
|
d->sbdma_int_pktcnt = 1;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
|
2005-04-16 22:20:36 +00:00
|
|
|
if ( int_timeout ) {
|
|
|
|
d->sbdma_int_timeout = int_timeout;
|
|
|
|
} else {
|
|
|
|
d->sbdma_int_timeout = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBDMA_CHANNEL_START(d)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Initialize the hardware registers for a DMA channel.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* d - DMA channel to init (context must be previously init'd
|
|
|
|
* rxtx - DMA_RX or DMA_TX depending on what type of channel
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static void sbdma_channel_start(struct sbmacdma *d, int rxtx)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Turn on the DMA channel
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
|
|
|
|
0, d->sbdma_config1);
|
|
|
|
__raw_writeq(M_DMA_EOP_INT_EN |
|
2005-04-16 22:20:36 +00:00
|
|
|
V_DMA_RINGSZ(d->sbdma_maxdescr) |
|
|
|
|
V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
|
2005-10-19 14:39:05 +00:00
|
|
|
0, d->sbdma_config0);
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, d->sbdma_config1);
|
|
|
|
__raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
|
|
|
|
0, d->sbdma_config0);
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize ring pointers
|
|
|
|
*/
|
|
|
|
|
|
|
|
d->sbdma_addptr = d->sbdma_dscrtable;
|
|
|
|
d->sbdma_remptr = d->sbdma_dscrtable;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBDMA_CHANNEL_STOP(d)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Initialize the hardware registers for a DMA channel.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* d - DMA channel to init (context must be previously init'd
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static void sbdma_channel_stop(struct sbmacdma *d)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Turn off the DMA channel
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, d->sbdma_config1);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, d->sbdma_dscrbase);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, d->sbdma_config0);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Zero ring pointers
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
d->sbdma_addptr = NULL;
|
|
|
|
d->sbdma_remptr = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
static inline void sbdma_align_skb(struct sk_buff *skb,
|
|
|
|
unsigned int power2, unsigned int offset)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-05-18 03:45:09 +00:00
|
|
|
unsigned char *addr = skb->data;
|
|
|
|
unsigned char *newaddr = PTR_ALIGN(addr, power2);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
skb_reserve(skb, newaddr - addr + offset);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBDMA_ADD_RCVBUFFER(d,sb)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Add a buffer to the specified DMA channel. For receive channels,
|
|
|
|
* this queues a buffer for inbound packets.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2008-05-18 03:45:09 +00:00
|
|
|
* sc - softc structure
|
|
|
|
* d - DMA channel descriptor
|
2005-04-16 22:20:36 +00:00
|
|
|
* sb - sk_buff to add, or NULL if we should allocate one
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* 0 if buffer could not be added (ring is full)
|
|
|
|
* 1 if buffer added successfully
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
|
|
|
|
struct sk_buff *sb)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-05-18 03:45:09 +00:00
|
|
|
struct net_device *dev = sc->sbm_dev;
|
2007-09-20 18:14:01 +00:00
|
|
|
struct sbdmadscr *dsc;
|
|
|
|
struct sbdmadscr *nextdsc;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct sk_buff *sb_new = NULL;
|
|
|
|
int pktsize = ENET_PACKET_SIZE;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* get pointer to our current place in the ring */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
dsc = d->sbdma_addptr;
|
|
|
|
nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* figure out if the ring is full - if the next descriptor
|
|
|
|
* is the same as the one that we're going to remove from
|
|
|
|
* the ring, the ring is full
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (nextdsc == d->sbdma_remptr) {
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
2005-10-19 14:40:02 +00:00
|
|
|
/*
|
|
|
|
* Allocate a sk_buff if we don't already have one.
|
2005-04-16 22:20:36 +00:00
|
|
|
* If we do have an sk_buff, reset it so that it's empty.
|
|
|
|
*
|
|
|
|
* Note: sk_buffs don't seem to be guaranteed to have any sort
|
|
|
|
* of alignment when they are allocated. Therefore, allocate enough
|
|
|
|
* extra space to make sure that:
|
|
|
|
*
|
|
|
|
* 1. the data does not start in the middle of a cache line.
|
|
|
|
* 2. The data does not end in the middle of a cache line
|
2005-10-19 14:40:02 +00:00
|
|
|
* 3. The buffer can be aligned such that the IP addresses are
|
2005-04-16 22:20:36 +00:00
|
|
|
* naturally aligned.
|
|
|
|
*
|
|
|
|
* Remember, the SOCs MAC writes whole cache lines at a time,
|
|
|
|
* without reading the old contents first. So, if the sk_buff's
|
|
|
|
* data portion starts in the middle of a cache line, the SOC
|
|
|
|
* DMA will trash the beginning (and ending) portions.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (sb == NULL) {
|
2008-05-18 03:45:09 +00:00
|
|
|
sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
|
|
|
|
SMP_CACHE_BYTES * 2 +
|
|
|
|
NET_IP_ALIGN);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (sb_new == NULL) {
|
2007-09-21 11:52:10 +00:00
|
|
|
pr_info("%s: sk_buff allocation failed\n",
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_eth->sbm_dev->name);
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
sb_new = sb;
|
2005-10-19 14:40:02 +00:00
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* nothing special to reinit buffer, it's already aligned
|
|
|
|
* and sb->data already points to a good place.
|
|
|
|
*/
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* fill in the descriptor
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
|
|
|
/*
|
|
|
|
* Do not interrupt per DMA transfer.
|
|
|
|
*/
|
2005-06-28 22:25:31 +00:00
|
|
|
dsc->dscr_a = virt_to_phys(sb_new->data) |
|
2008-05-18 03:45:09 +00:00
|
|
|
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
2005-06-28 22:25:31 +00:00
|
|
|
dsc->dscr_a = virt_to_phys(sb_new->data) |
|
2008-05-18 03:45:09 +00:00
|
|
|
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
|
2005-04-16 22:20:36 +00:00
|
|
|
M_DMA_DSCRA_INTERRUPT;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* receiving: no options */
|
|
|
|
dsc->dscr_b = 0;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* fill in the context
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* point at next packet
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_addptr = nextdsc;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Give the buffer to the DMA engine.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(1, d->sbdma_dscrcnt);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0; /* we did it */
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBDMA_ADD_TXBUFFER(d,sb)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Add a transmit buffer to the specified DMA channel, causing a
|
|
|
|
* transmit to start.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* d - DMA channel descriptor
|
|
|
|
* sb - sk_buff to add
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* 0 transmit queued successfully
|
|
|
|
* otherwise error code
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-09-20 18:14:01 +00:00
|
|
|
struct sbdmadscr *dsc;
|
|
|
|
struct sbdmadscr *nextdsc;
|
2005-04-16 22:20:36 +00:00
|
|
|
uint64_t phys;
|
|
|
|
uint64_t ncb;
|
|
|
|
int length;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* get pointer to our current place in the ring */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
dsc = d->sbdma_addptr;
|
|
|
|
nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* figure out if the ring is full - if the next descriptor
|
|
|
|
* is the same as the one that we're going to remove from
|
|
|
|
* the ring, the ring is full
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (nextdsc == d->sbdma_remptr) {
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Under Linux, it's not necessary to copy/coalesce buffers
|
|
|
|
* like it is on NetBSD. We think they're all contiguous,
|
|
|
|
* but that may not be true for GBE.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
length = sb->len;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* fill in the descriptor. Note that the number of cache
|
|
|
|
* blocks in the descriptor is the number of blocks
|
|
|
|
* *spanned*, so we need to add in the offset (if any)
|
|
|
|
* while doing the calculation.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
phys = virt_to_phys(sb->data);
|
|
|
|
ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
|
|
|
|
|
2005-10-19 14:40:02 +00:00
|
|
|
dsc->dscr_a = phys |
|
2005-04-16 22:20:36 +00:00
|
|
|
V_DMA_DSCRA_A_SIZE(ncb) |
|
|
|
|
#ifndef CONFIG_SBMAC_COALESCE
|
|
|
|
M_DMA_DSCRA_INTERRUPT |
|
|
|
|
#endif
|
|
|
|
M_DMA_ETHTX_SOP;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* transmitting: set outbound options and length */
|
|
|
|
|
|
|
|
dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
|
|
|
|
V_DMA_DSCRB_PKT_SIZE(length);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* fill in the context
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* point at next packet
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_addptr = nextdsc;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Give the buffer to the DMA engine.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(1, d->sbdma_dscrcnt);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0; /* we did it */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBDMA_EMPTYRING(d)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Free all allocated sk_buffs on the specified DMA channel;
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* d - DMA channel
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static void sbdma_emptyring(struct sbmacdma *d)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int idx;
|
|
|
|
struct sk_buff *sb;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
|
|
|
|
sb = d->sbdma_ctxtable[idx];
|
|
|
|
if (sb) {
|
|
|
|
dev_kfree_skb(sb);
|
|
|
|
d->sbdma_ctxtable[idx] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBDMA_FILLRING(d)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Fill the specified DMA channel (must be receive channel)
|
|
|
|
* with sk_buffs
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2008-05-18 03:45:09 +00:00
|
|
|
* sc - softc structure
|
|
|
|
* d - DMA channel
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int idx;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
|
|
|
|
if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-19 22:43:11 +00:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void sbmac_netpoll(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct sbmac_softc *sc = netdev_priv(netdev);
|
|
|
|
int irq = sc->sbm_dev->irq;
|
|
|
|
|
|
|
|
__raw_writeq(0, sc->sbm_imr);
|
|
|
|
|
2007-07-06 09:39:56 +00:00
|
|
|
sbmac_intr(irq, netdev);
|
2007-03-19 22:43:11 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
|
|
|
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
|
|
|
|
((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
|
|
|
|
sc->sbm_imr);
|
|
|
|
#else
|
|
|
|
__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
|
|
|
|
(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**********************************************************************
|
2007-04-26 07:23:22 +00:00
|
|
|
* SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Process "completed" receive buffers on the specified DMA channel.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2005-10-19 14:40:02 +00:00
|
|
|
* Input parameters:
|
2007-04-26 07:23:22 +00:00
|
|
|
* sc - softc structure
|
|
|
|
* d - DMA channel context
|
|
|
|
* work_to_do - no. of packets to process before enabling interrupt
|
|
|
|
* again (for NAPI)
|
|
|
|
* poll - 1: using polling (for NAPI)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
|
|
|
int work_to_do, int poll)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-10-04 00:41:50 +00:00
|
|
|
struct net_device *dev = sc->sbm_dev;
|
2005-04-16 22:20:36 +00:00
|
|
|
int curidx;
|
|
|
|
int hwidx;
|
2007-09-20 18:14:01 +00:00
|
|
|
struct sbdmadscr *dsc;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct sk_buff *sb;
|
|
|
|
int len;
|
2007-04-26 07:23:22 +00:00
|
|
|
int work_done = 0;
|
|
|
|
int dropped = 0;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
prefetch(d);
|
|
|
|
|
|
|
|
again:
|
|
|
|
/* Check if the HW dropped any frames */
|
2007-10-04 00:41:50 +00:00
|
|
|
dev->stats.rx_fifo_errors
|
2007-04-26 07:23:22 +00:00
|
|
|
+= __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
|
|
|
|
__raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
|
|
|
|
|
|
|
|
while (work_to_do-- > 0) {
|
2005-10-19 14:40:02 +00:00
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* figure out where we are (as an index) and where
|
|
|
|
* the hardware is (also as an index)
|
|
|
|
*
|
2005-10-19 14:40:02 +00:00
|
|
|
* This could be done faster if (for example) the
|
2005-04-16 22:20:36 +00:00
|
|
|
* descriptor table was page-aligned and contiguous in
|
|
|
|
* both virtual and physical memory -- you could then
|
|
|
|
* just compare the low-order bits of the virtual address
|
|
|
|
* (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
dsc = d->sbdma_remptr;
|
|
|
|
curidx = dsc - d->sbdma_dscrtable;
|
|
|
|
|
|
|
|
prefetch(dsc);
|
|
|
|
prefetch(&d->sbdma_ctxtable[curidx]);
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
|
|
|
|
d->sbdma_dscrtable_phys) /
|
|
|
|
sizeof(*d->sbdma_dscrtable);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* If they're the same, that means we've processed all
|
|
|
|
* of the descriptors up to (but not including) the one that
|
|
|
|
* the hardware is working on right now.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (curidx == hwidx)
|
2007-04-26 07:23:22 +00:00
|
|
|
goto done;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Otherwise, get the packet's sk_buff ptr back
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sb = d->sbdma_ctxtable[curidx];
|
|
|
|
d->sbdma_ctxtable[curidx] = NULL;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Check packet status. If good, process it.
|
|
|
|
* If not, silently drop it and put it back on the
|
|
|
|
* receive ring.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Add a new buffer to replace the old one. If we fail
|
|
|
|
* to allocate a buffer, we're going to drop this
|
|
|
|
* packet and put it right back on the receive ring.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
|
|
|
|
-ENOBUFS)) {
|
2007-10-04 00:41:50 +00:00
|
|
|
dev->stats.rx_dropped++;
|
2008-05-18 03:45:09 +00:00
|
|
|
/* Re-add old buffer */
|
|
|
|
sbdma_add_rcvbuffer(sc, d, sb);
|
2007-04-26 07:23:22 +00:00
|
|
|
/* No point in continuing at the moment */
|
|
|
|
printk(KERN_ERR "dropped packet (1)\n");
|
|
|
|
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
|
|
|
|
goto done;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Set length into the packet
|
|
|
|
*/
|
|
|
|
skb_put(sb,len);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Buffer has been replaced on the
|
|
|
|
* receive ring. Pass the buffer to
|
|
|
|
* the kernel
|
|
|
|
*/
|
|
|
|
sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
|
|
|
|
/* Check hw IPv4/TCP checksum if supported */
|
|
|
|
if (sc->rx_hw_checksum == ENABLE) {
|
|
|
|
if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
|
|
|
|
!((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
|
|
|
|
sb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
/* don't need to set sb->csum */
|
|
|
|
} else {
|
|
|
|
sb->ip_summed = CHECKSUM_NONE;
|
|
|
|
}
|
|
|
|
}
|
2007-04-26 07:23:22 +00:00
|
|
|
prefetch(sb->data);
|
|
|
|
prefetch((const void *)(((char *)sb->data)+32));
|
|
|
|
if (poll)
|
|
|
|
dropped = netif_receive_skb(sb);
|
|
|
|
else
|
|
|
|
dropped = netif_rx(sb);
|
|
|
|
|
|
|
|
if (dropped == NET_RX_DROP) {
|
2007-10-04 00:41:50 +00:00
|
|
|
dev->stats.rx_dropped++;
|
2007-04-26 07:23:22 +00:00
|
|
|
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
else {
|
2007-10-04 00:41:50 +00:00
|
|
|
dev->stats.rx_bytes += len;
|
|
|
|
dev->stats.rx_packets++;
|
2007-04-26 07:23:22 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Packet was mangled somehow. Just drop it and
|
|
|
|
* put it back on the receive ring.
|
|
|
|
*/
|
2007-10-04 00:41:50 +00:00
|
|
|
dev->stats.rx_errors++;
|
2008-05-18 03:45:09 +00:00
|
|
|
sbdma_add_rcvbuffer(sc, d, sb);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* .. and advance to the next buffer.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
|
2007-04-26 07:23:22 +00:00
|
|
|
work_done++;
|
|
|
|
}
|
|
|
|
if (!poll) {
|
|
|
|
work_to_do = 32;
|
|
|
|
goto again; /* collect fifo drop statistics again */
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-04-26 07:23:22 +00:00
|
|
|
done:
|
|
|
|
return work_done;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBDMA_TX_PROCESS(sc,d)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Process "completed" transmit buffers on the specified DMA channel.
|
2005-04-16 22:20:36 +00:00
|
|
|
* This is normally called within the interrupt service routine.
|
|
|
|
* Note that this isn't really ideal for priority channels, since
|
2005-10-19 14:40:02 +00:00
|
|
|
* it processes all of the packets on a given channel before
|
|
|
|
* returning.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2005-10-19 14:40:02 +00:00
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* sc - softc structure
|
2007-04-26 07:23:22 +00:00
|
|
|
* d - DMA channel context
|
|
|
|
* poll - 1: using polling (for NAPI)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
|
|
|
int poll)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-10-04 00:41:50 +00:00
|
|
|
struct net_device *dev = sc->sbm_dev;
|
2005-04-16 22:20:36 +00:00
|
|
|
int curidx;
|
|
|
|
int hwidx;
|
2007-09-20 18:14:01 +00:00
|
|
|
struct sbdmadscr *dsc;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct sk_buff *sb;
|
|
|
|
unsigned long flags;
|
2007-04-26 07:23:22 +00:00
|
|
|
int packets_handled = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&(sc->sbm_lock), flags);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
if (d->sbdma_remptr == d->sbdma_addptr)
|
|
|
|
goto end_unlock;
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
|
|
|
|
d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable);
|
2007-04-26 07:23:22 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (;;) {
|
2005-10-19 14:40:02 +00:00
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* figure out where we are (as an index) and where
|
|
|
|
* the hardware is (also as an index)
|
|
|
|
*
|
2005-10-19 14:40:02 +00:00
|
|
|
* This could be done faster if (for example) the
|
2005-04-16 22:20:36 +00:00
|
|
|
* descriptor table was page-aligned and contiguous in
|
|
|
|
* both virtual and physical memory -- you could then
|
|
|
|
* just compare the low-order bits of the virtual address
|
|
|
|
* (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
curidx = d->sbdma_remptr - d->sbdma_dscrtable;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If they're the same, that means we've processed all
|
|
|
|
* of the descriptors up to (but not including) the one that
|
|
|
|
* the hardware is working on right now.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (curidx == hwidx)
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Otherwise, get the packet's sk_buff ptr back
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
dsc = &(d->sbdma_dscrtable[curidx]);
|
|
|
|
sb = d->sbdma_ctxtable[curidx];
|
|
|
|
d->sbdma_ctxtable[curidx] = NULL;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Stats
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-10-04 00:41:50 +00:00
|
|
|
dev->stats.tx_bytes += sb->len;
|
|
|
|
dev->stats.tx_packets++;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* for transmits, we just free buffers.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
dev_kfree_skb_irq(sb);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* .. and advance to the next buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
packets_handled++;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Decide if we should wake up the protocol or not.
|
|
|
|
* Other drivers seem to do this when we reach a low
|
|
|
|
* watermark on the transmit queue.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
if (packets_handled)
|
|
|
|
netif_wake_queue(d->sbdma_eth->sbm_dev);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
end_unlock:
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irqrestore(&(sc->sbm_lock), flags);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_INITCTX(s)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Initialize an Ethernet context structure - this is called
|
|
|
|
* once per MAC on the 1250. Memory is allocated here, so don't
|
|
|
|
* call it again from inside the ioctl routines that bring the
|
|
|
|
* interface up/down
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* s - sbmac context structure
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* 0
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static int sbmac_initctx(struct sbmac_softc *s)
|
|
|
|
{
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* figure out the addresses of some ports
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
|
|
|
|
s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
|
|
|
|
s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
|
|
|
|
s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG;
|
|
|
|
s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG;
|
|
|
|
s->sbm_isr = s->sbm_base + R_MAC_STATUS;
|
|
|
|
s->sbm_imr = s->sbm_base + R_MAC_INT_MASK;
|
|
|
|
s->sbm_mdio = s->sbm_base + R_MAC_MDIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the DMA channels. Right now, only one per MAC is used
|
|
|
|
* Note: Only do this _once_, as it allocates memory from the kernel!
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
|
|
|
|
sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* initial state is OFF
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_state = sbmac_state_off;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static void sbdma_uninitctx(struct sbmacdma *d)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-04-26 07:23:22 +00:00
|
|
|
if (d->sbdma_dscrtable_unaligned) {
|
|
|
|
kfree(d->sbdma_dscrtable_unaligned);
|
|
|
|
d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (d->sbdma_ctxtable) {
|
|
|
|
kfree(d->sbdma_ctxtable);
|
|
|
|
d->sbdma_ctxtable = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void sbmac_uninitctx(struct sbmac_softc *sc)
|
|
|
|
{
|
|
|
|
sbdma_uninitctx(&(sc->sbm_txdma));
|
|
|
|
sbdma_uninitctx(&(sc->sbm_rxdma));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_CHANNEL_START(s)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Start packet processing on this MAC.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* s - sbmac structure
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static void sbmac_channel_start(struct sbmac_softc *s)
|
|
|
|
{
|
|
|
|
uint64_t reg;
|
2007-09-20 18:14:01 +00:00
|
|
|
void __iomem *port;
|
2005-04-16 22:20:36 +00:00
|
|
|
uint64_t cfg,fifo,framecfg;
|
|
|
|
int idx, th_value;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Don't do this if running
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (s->sbm_state == sbmac_state_on)
|
|
|
|
return;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Bring the controller out of reset, but leave it off.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, s->sbm_macenable);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Ignore all received packets
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, s->sbm_rxfilter);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Calculate values for various control registers.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
cfg = M_MAC_RETRY_EN |
|
2005-10-19 14:40:02 +00:00
|
|
|
M_MAC_TX_HOLD_SOP_EN |
|
2005-04-16 22:20:36 +00:00
|
|
|
V_MAC_TX_PAUSE_CNT_16K |
|
|
|
|
M_MAC_AP_STAT_EN |
|
|
|
|
M_MAC_FAST_SYNC |
|
|
|
|
M_MAC_SS_EN |
|
|
|
|
0;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
|
|
|
|
* and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
|
|
|
|
* Use a larger RD_THRSH for gigabit
|
|
|
|
*/
|
2006-02-08 23:23:26 +00:00
|
|
|
if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
|
2005-04-16 22:20:36 +00:00
|
|
|
th_value = 28;
|
2006-02-08 23:23:26 +00:00
|
|
|
else
|
|
|
|
th_value = 64;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
|
|
|
|
((s->sbm_speed == sbmac_speed_1000)
|
|
|
|
? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
|
|
|
|
V_MAC_TX_RL_THRSH(4) |
|
|
|
|
V_MAC_RX_PL_THRSH(4) |
|
|
|
|
V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
|
|
|
|
V_MAC_RX_PL_THRSH(4) |
|
|
|
|
V_MAC_RX_RL_THRSH(8) |
|
|
|
|
0;
|
|
|
|
|
|
|
|
framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
|
|
|
|
V_MAC_MAX_FRAMESZ_DEFAULT |
|
|
|
|
V_MAC_BACKOFF_SEL(1);
|
|
|
|
|
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* Clear out the hash address map
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
port = s->sbm_base + R_MAC_HASH_BASE;
|
|
|
|
for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
port += sizeof(uint64_t);
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Clear out the exact-match table
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
port = s->sbm_base + R_MAC_ADDR_BASE;
|
|
|
|
for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
port += sizeof(uint64_t);
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Clear out the DMA Channel mapping table registers
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
port = s->sbm_base + R_MAC_CHUP0_BASE;
|
|
|
|
for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
port += sizeof(uint64_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
port = s->sbm_base + R_MAC_CHLO0_BASE;
|
|
|
|
for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
port += sizeof(uint64_t);
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Program the hardware address. It goes into the hardware-address
|
|
|
|
* register as well as the first filter register.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
reg = sbmac_addr2reg(s->sbm_hwaddr);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
port = s->sbm_base + R_MAC_ADDR_BASE;
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
port = s->sbm_base + R_MAC_ETHERNET_ADDR;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
|
|
|
|
/*
|
|
|
|
* Pass1 SOCs do not receive packets addressed to the
|
|
|
|
* destination address in the R_MAC_ETHERNET_ADDR register.
|
|
|
|
* Set the value to zero.
|
|
|
|
*/
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Set the receive filter for no packets, and write values
|
|
|
|
* to the various config registers
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, s->sbm_rxfilter);
|
|
|
|
__raw_writeq(0, s->sbm_imr);
|
|
|
|
__raw_writeq(framecfg, s->sbm_framecfg);
|
|
|
|
__raw_writeq(fifo, s->sbm_fifocfg);
|
|
|
|
__raw_writeq(cfg, s->sbm_maccfg);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Initialize DMA channels (rings should be ok now)
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
|
|
|
|
sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Configure the speed, duplex, and flow control
|
|
|
|
*/
|
|
|
|
|
|
|
|
sbmac_set_speed(s,s->sbm_speed);
|
|
|
|
sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Fill the receive ring
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2008-05-18 03:45:09 +00:00
|
|
|
sbdma_fillring(s, &(s->sbm_rxdma));
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Turn on the rest of the bits in the enable register
|
2005-10-19 14:40:02 +00:00
|
|
|
*/
|
|
|
|
|
2006-02-08 23:23:26 +00:00
|
|
|
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
|
|
|
|
__raw_writeq(M_MAC_RXDMA_EN0 |
|
|
|
|
M_MAC_TXDMA_EN0, s->sbm_macenable);
|
|
|
|
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(M_MAC_RXDMA_EN0 |
|
2005-04-16 22:20:36 +00:00
|
|
|
M_MAC_TXDMA_EN0 |
|
|
|
|
M_MAC_RX_ENABLE |
|
2005-10-19 14:39:05 +00:00
|
|
|
M_MAC_TX_ENABLE, s->sbm_macenable);
|
2006-02-08 23:23:26 +00:00
|
|
|
#else
|
|
|
|
#error invalid SiByte MAC configuation
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
|
|
|
|
((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
|
|
|
|
(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable receiving unicasts and broadcasts
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* we're running now.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_state = sbmac_state_on;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Program multicast addresses
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sbmac_setmulti(s);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If channel was in promiscuous mode before, turn that on
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (s->sbm_devflags & IFF_PROMISC) {
|
|
|
|
sbmac_promiscuous_mode(s,1);
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_CHANNEL_STOP(s)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Stop packet processing on this MAC.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* s - sbmac structure
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static void sbmac_channel_stop(struct sbmac_softc *s)
|
|
|
|
{
|
|
|
|
/* don't do this if already stopped */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (s->sbm_state == sbmac_state_off)
|
|
|
|
return;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* don't accept any packets, disable all interrupts */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, s->sbm_rxfilter);
|
|
|
|
__raw_writeq(0, s->sbm_imr);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Turn off ticker */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* XXX */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* turn off receiver and transmitter */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, s->sbm_macenable);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* We're stopped now. */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_state = sbmac_state_off;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Stop DMA channels (rings should be ok now)
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sbdma_channel_stop(&(s->sbm_rxdma));
|
|
|
|
sbdma_channel_stop(&(s->sbm_txdma));
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Empty the receive and transmit rings */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sbdma_emptyring(&(s->sbm_rxdma));
|
|
|
|
sbdma_emptyring(&(s->sbm_txdma));
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_SET_CHANNEL_STATE(state)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Set the channel's state ON or OFF
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* state - new state
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* old state
|
|
|
|
********************************************************************* */
|
2007-09-20 18:14:01 +00:00
|
|
|
static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc,
|
|
|
|
enum sbmac_state state)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-09-20 18:14:01 +00:00
|
|
|
enum sbmac_state oldstate = sc->sbm_state;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* If same as previous state, return
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (state == oldstate) {
|
|
|
|
return oldstate;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* If new state is ON, turn channel on
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (state == sbmac_state_on) {
|
|
|
|
sbmac_channel_start(sc);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
sbmac_channel_stop(sc);
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Return previous state
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return oldstate;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_PROMISCUOUS_MODE(sc,onoff)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Turn on or off promiscuous mode
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* sc - softc
|
|
|
|
* onoff - 1 to turn on, 0 to turn off
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
|
|
|
|
{
|
|
|
|
uint64_t reg;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (sc->sbm_state != sbmac_state_on)
|
|
|
|
return;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (onoff) {
|
2005-10-19 14:39:05 +00:00
|
|
|
reg = __raw_readq(sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg |= M_MAC_ALLPKT_EN;
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, sc->sbm_rxfilter);
|
2005-10-19 14:40:02 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
else {
|
2005-10-19 14:39:05 +00:00
|
|
|
reg = __raw_readq(sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg &= ~M_MAC_ALLPKT_EN;
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_SETIPHDR_OFFSET(sc,onoff)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Set the iphdr offset as 15 assuming ethernet encapsulation
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* sc - softc
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
|
|
|
|
{
|
|
|
|
uint64_t reg;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Hard code the off set to 15 for now */
|
2005-10-19 14:39:05 +00:00
|
|
|
reg = __raw_readq(sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, sc->sbm_rxfilter);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2006-02-08 23:23:26 +00:00
|
|
|
/* BCM1250 pass1 didn't have hardware checksum. Everything
|
|
|
|
later does. */
|
|
|
|
if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
|
2005-04-16 22:20:36 +00:00
|
|
|
sc->rx_hw_checksum = DISABLE;
|
2006-02-08 23:23:26 +00:00
|
|
|
} else {
|
|
|
|
sc->rx_hw_checksum = ENABLE;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_ADDR2REG(ptr)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Convert six bytes into the 64-bit register value that
|
|
|
|
* we typically write into the SBMAC's address/mcast registers
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* ptr - pointer to 6 bytes
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* register value
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static uint64_t sbmac_addr2reg(unsigned char *ptr)
|
|
|
|
{
|
|
|
|
uint64_t reg = 0;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
ptr += 6;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
reg |= (uint64_t) *(--ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg <<= 8;
|
2005-10-19 14:40:02 +00:00
|
|
|
reg |= (uint64_t) *(--ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg <<= 8;
|
2005-10-19 14:40:02 +00:00
|
|
|
reg |= (uint64_t) *(--ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg <<= 8;
|
2005-10-19 14:40:02 +00:00
|
|
|
reg |= (uint64_t) *(--ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg <<= 8;
|
2005-10-19 14:40:02 +00:00
|
|
|
reg |= (uint64_t) *(--ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg <<= 8;
|
2005-10-19 14:40:02 +00:00
|
|
|
reg |= (uint64_t) *(--ptr);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_SET_SPEED(s,speed)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Configure LAN speed for the specified MAC.
|
|
|
|
* Warning: must be called when MAC is off!
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* s - sbmac structure
|
2007-09-20 18:14:01 +00:00
|
|
|
* speed - speed to set MAC to (see enum sbmac_speed)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* 1 if successful
|
|
|
|
* 0 indicates invalid parameters
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
uint64_t cfg;
|
|
|
|
uint64_t framecfg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save new current values
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_speed = speed;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (s->sbm_state == sbmac_state_on)
|
|
|
|
return 0; /* save for next restart */
|
|
|
|
|
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* Read current register values
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
cfg = __raw_readq(s->sbm_maccfg);
|
|
|
|
framecfg = __raw_readq(s->sbm_framecfg);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Mask out the stuff we want to change
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
|
|
|
|
framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
|
|
|
|
M_MAC_SLOT_SIZE);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Now add in the new bits
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (speed) {
|
|
|
|
case sbmac_speed_10:
|
|
|
|
framecfg |= V_MAC_IFG_RX_10 |
|
|
|
|
V_MAC_IFG_TX_10 |
|
|
|
|
K_MAC_IFG_THRSH_10 |
|
|
|
|
V_MAC_SLOT_SIZE_10;
|
|
|
|
cfg |= V_MAC_SPEED_SEL_10MBPS;
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case sbmac_speed_100:
|
|
|
|
framecfg |= V_MAC_IFG_RX_100 |
|
|
|
|
V_MAC_IFG_TX_100 |
|
|
|
|
V_MAC_IFG_THRSH_100 |
|
|
|
|
V_MAC_SLOT_SIZE_100;
|
|
|
|
cfg |= V_MAC_SPEED_SEL_100MBPS ;
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case sbmac_speed_1000:
|
|
|
|
framecfg |= V_MAC_IFG_RX_1000 |
|
|
|
|
V_MAC_IFG_TX_1000 |
|
|
|
|
V_MAC_IFG_THRSH_1000 |
|
|
|
|
V_MAC_SLOT_SIZE_1000;
|
|
|
|
cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* Send the bits back to the hardware
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(framecfg, s->sbm_framecfg);
|
|
|
|
__raw_writeq(cfg, s->sbm_maccfg);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_SET_DUPLEX(s,duplex,fc)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Set Ethernet duplex and flow control options for this MAC
|
|
|
|
* Warning: must be called when MAC is off!
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* s - sbmac structure
|
2007-09-20 18:14:01 +00:00
|
|
|
* duplex - duplex setting (see enum sbmac_duplex)
|
|
|
|
* fc - flow control setting (see enum sbmac_fc)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* 1 if ok
|
|
|
|
* 0 if an invalid parameter combination was specified
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-20 18:14:01 +00:00
|
|
|
static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
|
|
|
|
enum sbmac_fc fc)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
uint64_t cfg;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Save new current values
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
s->sbm_duplex = duplex;
|
|
|
|
s->sbm_fc = fc;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (s->sbm_state == sbmac_state_on)
|
|
|
|
return 0; /* save for next restart */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* Read current register values
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
cfg = __raw_readq(s->sbm_maccfg);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Mask off the stuff we're about to change
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (duplex) {
|
|
|
|
case sbmac_duplex_half:
|
|
|
|
switch (fc) {
|
|
|
|
case sbmac_fc_disabled:
|
|
|
|
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case sbmac_fc_collision:
|
|
|
|
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case sbmac_fc_carrier:
|
|
|
|
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case sbmac_fc_frame: /* not valid in half duplex */
|
|
|
|
default: /* invalid selection */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case sbmac_duplex_full:
|
|
|
|
switch (fc) {
|
|
|
|
case sbmac_fc_disabled:
|
|
|
|
cfg |= V_MAC_FC_CMD_DISABLED;
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case sbmac_fc_frame:
|
|
|
|
cfg |= V_MAC_FC_CMD_ENABLED;
|
|
|
|
break;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case sbmac_fc_collision: /* not valid in full duplex */
|
|
|
|
case sbmac_fc_carrier: /* not valid in full duplex */
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
2007-09-21 11:52:10 +00:00
|
|
|
default:
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* Send the bits back to the hardware
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(cfg, s->sbm_maccfg);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_INTR()
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Interrupt handler for MAC interrupts
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* MAC structure
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
|
|
|
static irqreturn_t sbmac_intr(int irq,void *dev_instance)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct net_device *dev = (struct net_device *) dev_instance;
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
|
|
|
uint64_t isr;
|
|
|
|
int handled = 0;
|
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
/*
|
|
|
|
* Read the ISR (this clears the bits in the real
|
|
|
|
* register, except for counter addr)
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
if (isr == 0)
|
|
|
|
return IRQ_RETVAL(0);
|
|
|
|
handled = 1;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
/*
|
|
|
|
* Transmits on channel 0
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0))
|
2007-04-26 07:23:22 +00:00
|
|
|
sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-04-26 07:23:22 +00:00
|
|
|
if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
if (netif_rx_schedule_prep(dev, &sc->napi)) {
|
2007-04-26 07:23:22 +00:00
|
|
|
__raw_writeq(0, sc->sbm_imr);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
__netif_rx_schedule(dev, &sc->napi);
|
2007-04-26 07:23:22 +00:00
|
|
|
/* Depend on the exit from poll to reenable intr */
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* may leave some packets behind */
|
|
|
|
sbdma_rx_process(sc,&(sc->sbm_rxdma),
|
|
|
|
SBMAC_MAX_RXDESCR * 2, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return IRQ_RETVAL(handled);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_START_TX(skb,dev)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Start output on the specified interface. Basically, we
|
2005-04-16 22:20:36 +00:00
|
|
|
* queue as many buffers as we can until the ring fills up, or
|
|
|
|
* we run off the end of the queue, whichever comes first.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
|
|
|
*
|
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* lock eth irq */
|
|
|
|
spin_lock_irq (&sc->sbm_lock);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-19 14:40:02 +00:00
|
|
|
* Put the buffer on the transmit ring. If we
|
2005-04-16 22:20:36 +00:00
|
|
|
* don't have room, stop the queue.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
|
|
|
|
/* XXX save skb that we could not send */
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
spin_unlock_irq(&sc->sbm_lock);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
dev->trans_start = jiffies;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irq (&sc->sbm_lock);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_SETMULTI(sc)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Reprogram the multicast table into the hardware, given
|
|
|
|
* the list of multicasts associated with the interface
|
|
|
|
* structure.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* sc - softc
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* nothing
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static void sbmac_setmulti(struct sbmac_softc *sc)
|
|
|
|
{
|
|
|
|
uint64_t reg;
|
2007-09-20 18:14:01 +00:00
|
|
|
void __iomem *port;
|
2005-04-16 22:20:36 +00:00
|
|
|
int idx;
|
|
|
|
struct dev_mc_list *mclist;
|
|
|
|
struct net_device *dev = sc->sbm_dev;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Clear out entire multicast table. We do this by nuking
|
|
|
|
* the entire hash table and all the direct matches except
|
2005-10-19 14:40:02 +00:00
|
|
|
* the first one, which is used for our station address
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
|
|
|
|
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
|
|
|
|
port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(0, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Clear the filter to say we don't want any multicasts.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
reg = __raw_readq(sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, sc->sbm_rxfilter);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
2005-10-19 14:40:02 +00:00
|
|
|
/*
|
|
|
|
* Enable ALL multicasts. Do this by inverting the
|
|
|
|
* multicast enable bit.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:39:05 +00:00
|
|
|
reg = __raw_readq(sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Progam new multicast entries. For now, only use the
|
|
|
|
* perfect filter. In the future we'll need to use the
|
|
|
|
* hash filter if the perfect filter overflows
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* XXX only using perfect filter for now, need to use hash
|
|
|
|
* XXX if the table overflows */
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
idx = 1; /* skip station address */
|
|
|
|
mclist = dev->mc_list;
|
|
|
|
while (mclist && (idx < MAC_ADDR_COUNT)) {
|
|
|
|
reg = sbmac_addr2reg(mclist->dmi_addr);
|
|
|
|
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, port);
|
2005-04-16 22:20:36 +00:00
|
|
|
idx++;
|
|
|
|
mclist = mclist->next;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Enable the "accept multicast bits" if we programmed at least one
|
2005-10-19 14:40:02 +00:00
|
|
|
* multicast.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (idx > 1) {
|
2005-10-19 14:39:05 +00:00
|
|
|
reg = __raw_readq(sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg |= M_MAC_MCAST_EN;
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_writeq(reg, sc->sbm_rxfilter);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-08 23:23:26 +00:00
|
|
|
#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
|
2005-04-16 22:20:36 +00:00
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_PARSE_XDIGIT(str)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Parse a hex digit, returning its value
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* str - character
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* hex value, or -1 if invalid
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static int sbmac_parse_xdigit(char str)
|
|
|
|
{
|
|
|
|
int digit;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if ((str >= '0') && (str <= '9'))
|
|
|
|
digit = str - '0';
|
|
|
|
else if ((str >= 'a') && (str <= 'f'))
|
|
|
|
digit = str - 'a' + 10;
|
|
|
|
else if ((str >= 'A') && (str <= 'F'))
|
|
|
|
digit = str - 'A' + 10;
|
|
|
|
else
|
|
|
|
return -1;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return digit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_PARSE_HWADDR(str,hwaddr)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
|
|
|
|
* Ethernet address.
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* str - string
|
|
|
|
* hwaddr - pointer to hardware address
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* 0 if ok, else -1
|
|
|
|
********************************************************************* */
|
|
|
|
|
|
|
|
static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
|
|
|
|
{
|
|
|
|
int digit1,digit2;
|
|
|
|
int idx = 6;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
while (*str && (idx > 0)) {
|
|
|
|
digit1 = sbmac_parse_xdigit(*str);
|
|
|
|
if (digit1 < 0)
|
|
|
|
return -1;
|
|
|
|
str++;
|
|
|
|
if (!*str)
|
|
|
|
return -1;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if ((*str == ':') || (*str == '-')) {
|
|
|
|
digit2 = digit1;
|
|
|
|
digit1 = 0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
digit2 = sbmac_parse_xdigit(*str);
|
|
|
|
if (digit2 < 0)
|
|
|
|
return -1;
|
|
|
|
str++;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
*hwaddr++ = (digit1 << 4) | digit2;
|
|
|
|
idx--;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (*str == '-')
|
|
|
|
str++;
|
|
|
|
if (*str == ':')
|
|
|
|
str++;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
|
|
|
|
{
|
|
|
|
if (new_mtu > ENET_PACKET_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
_dev->mtu = new_mtu;
|
2007-09-21 11:52:10 +00:00
|
|
|
pr_info("changing the mtu to %d\n", new_mtu);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* SBMAC_INIT(dev)
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Attach routine - init hardware and hook ourselves into linux
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2005-04-16 22:20:36 +00:00
|
|
|
* dev - net_device structure
|
2005-10-19 14:40:02 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Return value:
|
|
|
|
* status
|
|
|
|
********************************************************************* */
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static int sbmac_init(struct platform_device *pldev, long long base)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-09-21 11:52:10 +00:00
|
|
|
struct net_device *dev = pldev->dev.driver_data;
|
|
|
|
int idx = pldev->id;
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned char *eaddr;
|
|
|
|
uint64_t ea_reg;
|
|
|
|
int i;
|
|
|
|
int err;
|
2007-10-04 00:59:30 +00:00
|
|
|
DECLARE_MAC_BUF(mac);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sc->sbm_dev = dev;
|
|
|
|
sc->sbe_idx = idx;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
eaddr = sc->sbm_hwaddr;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Read the ethernet address. The firwmare left this programmed
|
|
|
|
* for us in the ethernet address register for each mac.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
|
|
|
|
__raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < 6; i++) {
|
|
|
|
eaddr[i] = (uint8_t) (ea_reg & 0xFF);
|
|
|
|
ea_reg >>= 8;
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < 6; i++) {
|
|
|
|
dev->dev_addr[i] = eaddr[i];
|
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Initialize context (get pointers to registers and stuff), then
|
|
|
|
* allocate the memory for the descriptor tables.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sbmac_initctx(sc);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Set up Linux device callins
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock_init(&(sc->sbm_lock));
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
dev->open = sbmac_open;
|
|
|
|
dev->hard_start_xmit = sbmac_start_tx;
|
|
|
|
dev->stop = sbmac_close;
|
|
|
|
dev->set_multicast_list = sbmac_set_rx_mode;
|
|
|
|
dev->do_ioctl = sbmac_mii_ioctl;
|
|
|
|
dev->tx_timeout = sbmac_tx_timeout;
|
|
|
|
dev->watchdog_timeo = TX_TIMEOUT;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
|
|
|
|
netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
dev->change_mtu = sb1250_change_mtu;
|
2007-03-19 22:43:11 +00:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
dev->poll_controller = sbmac_netpoll;
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
dev->irq = UNIT_INT(idx);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* This is needed for PASS2 for Rx H/W checksum feature */
|
|
|
|
sbmac_set_iphdr_offset(sc);
|
|
|
|
|
|
|
|
err = register_netdev(dev);
|
2007-09-21 11:52:10 +00:00
|
|
|
if (err) {
|
|
|
|
printk(KERN_ERR "%s.%d: unable to register netdev\n",
|
|
|
|
sbmac_string, idx);
|
|
|
|
sbmac_uninitctx(sc);
|
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
|
|
|
|
|
|
|
|
if (sc->rx_hw_checksum == ENABLE)
|
|
|
|
pr_info("%s: enabling TCP rcv checksum\n", dev->name);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Display Ethernet address (this is called during the config
|
|
|
|
* process so we need to finish off the config message that
|
|
|
|
* was being displayed)
|
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %s\n",
|
|
|
|
dev->name, base, print_mac(mac, eaddr));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sc->mii_bus.name = sbmac_mdio_string;
|
2008-04-10 00:38:13 +00:00
|
|
|
snprintf(sc->mii_bus.id, MII_BUS_ID_SIZE, "%x", idx);
|
2007-09-21 11:52:10 +00:00
|
|
|
sc->mii_bus.priv = sc;
|
|
|
|
sc->mii_bus.read = sbmac_mii_read;
|
|
|
|
sc->mii_bus.write = sbmac_mii_write;
|
|
|
|
sc->mii_bus.irq = sc->phy_irq;
|
|
|
|
for (i = 0; i < PHY_MAX_ADDR; ++i)
|
|
|
|
sc->mii_bus.irq[i] = SBMAC_PHY_INT;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sc->mii_bus.dev = &pldev->dev;
|
|
|
|
dev_set_drvdata(&pldev->dev, &sc->mii_bus);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int sbmac_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
2007-09-21 11:52:10 +00:00
|
|
|
int err;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
if (debug > 1)
|
|
|
|
pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* map/route interrupt (clear status first, in case something
|
|
|
|
* weird is pending; we haven't initialized the mac registers
|
|
|
|
* yet)
|
|
|
|
*/
|
|
|
|
|
2005-10-19 14:39:05 +00:00
|
|
|
__raw_readq(sc->sbm_isr);
|
2007-09-21 11:52:10 +00:00
|
|
|
err = request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev);
|
|
|
|
if (err) {
|
|
|
|
printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name,
|
|
|
|
dev->irq);
|
|
|
|
goto out_err;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-20 11:01:28 +00:00
|
|
|
/*
|
2007-09-21 11:52:10 +00:00
|
|
|
* Probe PHY address
|
2005-10-20 11:01:28 +00:00
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
err = mdiobus_register(&sc->mii_bus);
|
|
|
|
if (err) {
|
|
|
|
printk(KERN_ERR "%s: unable to register MDIO bus\n",
|
|
|
|
dev->name);
|
|
|
|
goto out_unirq;
|
2005-10-20 11:01:28 +00:00
|
|
|
}
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sc->sbm_speed = sbmac_speed_none;
|
|
|
|
sc->sbm_duplex = sbmac_duplex_none;
|
|
|
|
sc->sbm_fc = sbmac_fc_none;
|
|
|
|
sc->sbm_pause = -1;
|
|
|
|
sc->sbm_link = 0;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2007-09-21 11:52:10 +00:00
|
|
|
* Attach to the PHY
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
err = sbmac_mii_probe(dev);
|
|
|
|
if (err)
|
|
|
|
goto out_unregister;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Turn on the channel
|
|
|
|
*/
|
|
|
|
|
|
|
|
sbmac_set_channel_state(sc,sbmac_state_on);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
netif_start_queue(dev);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sbmac_set_rx_mode(dev);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
phy_start(sc->phy_dev);
|
|
|
|
|
|
|
|
napi_enable(&sc->napi);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
2007-09-21 11:52:10 +00:00
|
|
|
|
|
|
|
out_unregister:
|
|
|
|
mdiobus_unregister(&sc->mii_bus);
|
|
|
|
|
|
|
|
out_unirq:
|
|
|
|
free_irq(dev->irq, dev);
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-10-20 11:01:28 +00:00
|
|
|
static int sbmac_mii_probe(struct net_device *dev)
|
|
|
|
{
|
2007-09-21 11:52:10 +00:00
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
|
|
|
struct phy_device *phy_dev;
|
2005-10-20 11:01:28 +00:00
|
|
|
int i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
for (i = 0; i < PHY_MAX_ADDR; i++) {
|
|
|
|
phy_dev = sc->mii_bus.phy_map[i];
|
|
|
|
if (phy_dev)
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-09-21 11:52:10 +00:00
|
|
|
if (!phy_dev) {
|
|
|
|
printk(KERN_ERR "%s: no PHY found\n", dev->name);
|
|
|
|
return -ENXIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
phy_dev = phy_connect(dev, phy_dev->dev.bus_id, &sbmac_mii_poll, 0,
|
|
|
|
PHY_INTERFACE_MODE_GMII);
|
|
|
|
if (IS_ERR(phy_dev)) {
|
|
|
|
printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
|
|
|
|
return PTR_ERR(phy_dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
/* Remove any features not supported by the controller */
|
|
|
|
phy_dev->supported &= SUPPORTED_10baseT_Half |
|
|
|
|
SUPPORTED_10baseT_Full |
|
|
|
|
SUPPORTED_100baseT_Half |
|
|
|
|
SUPPORTED_100baseT_Full |
|
|
|
|
SUPPORTED_1000baseT_Half |
|
|
|
|
SUPPORTED_1000baseT_Full |
|
|
|
|
SUPPORTED_Autoneg |
|
|
|
|
SUPPORTED_MII |
|
|
|
|
SUPPORTED_Pause |
|
|
|
|
SUPPORTED_Asym_Pause;
|
|
|
|
phy_dev->advertising = phy_dev->supported;
|
|
|
|
|
|
|
|
pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
|
|
|
|
dev->name, phy_dev->drv->name,
|
|
|
|
phy_dev->dev.bus_id, phy_dev->irq);
|
|
|
|
|
|
|
|
sc->phy_dev = phy_dev;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static void sbmac_mii_poll(struct net_device *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
2007-09-21 11:52:10 +00:00
|
|
|
struct phy_device *phy_dev = sc->phy_dev;
|
|
|
|
unsigned long flags;
|
|
|
|
enum sbmac_fc fc;
|
|
|
|
int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg;
|
|
|
|
|
|
|
|
link_chg = (sc->sbm_link != phy_dev->link);
|
|
|
|
speed_chg = (sc->sbm_speed != phy_dev->speed);
|
|
|
|
duplex_chg = (sc->sbm_duplex != phy_dev->duplex);
|
|
|
|
pause_chg = (sc->sbm_pause != phy_dev->pause);
|
|
|
|
|
|
|
|
if (!link_chg && !speed_chg && !duplex_chg && !pause_chg)
|
|
|
|
return; /* Hmmm... */
|
|
|
|
|
|
|
|
if (!phy_dev->link) {
|
|
|
|
if (link_chg) {
|
|
|
|
sc->sbm_link = phy_dev->link;
|
|
|
|
sc->sbm_speed = sbmac_speed_none;
|
|
|
|
sc->sbm_duplex = sbmac_duplex_none;
|
|
|
|
sc->sbm_fc = sbmac_fc_disabled;
|
|
|
|
sc->sbm_pause = -1;
|
|
|
|
pr_info("%s: link unavailable\n", dev->name);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
if (phy_dev->duplex == DUPLEX_FULL) {
|
|
|
|
if (phy_dev->pause)
|
|
|
|
fc = sbmac_fc_frame;
|
|
|
|
else
|
|
|
|
fc = sbmac_fc_disabled;
|
|
|
|
} else
|
|
|
|
fc = sbmac_fc_collision;
|
|
|
|
fc_chg = (sc->sbm_fc != fc);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed,
|
|
|
|
phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H');
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
spin_lock_irqsave(&sc->sbm_lock, flags);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sc->sbm_speed = phy_dev->speed;
|
|
|
|
sc->sbm_duplex = phy_dev->duplex;
|
|
|
|
sc->sbm_fc = fc;
|
|
|
|
sc->sbm_pause = phy_dev->pause;
|
|
|
|
sc->sbm_link = phy_dev->link;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
if ((speed_chg || duplex_chg || fc_chg) &&
|
|
|
|
sc->sbm_state != sbmac_state_off) {
|
|
|
|
/*
|
|
|
|
* something changed, restart the channel
|
|
|
|
*/
|
|
|
|
if (debug > 1)
|
|
|
|
pr_debug("%s: restarting channel "
|
|
|
|
"because PHY state changed\n", dev->name);
|
|
|
|
sbmac_channel_stop(sc);
|
|
|
|
sbmac_channel_start(sc);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
spin_unlock_irqrestore(&sc->sbm_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void sbmac_tx_timeout (struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock_irq (&sc->sbm_lock);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
dev->trans_start = jiffies;
|
2007-10-04 00:41:50 +00:00
|
|
|
dev->stats.tx_errors++;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irq (&sc->sbm_lock);
|
|
|
|
|
|
|
|
printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void sbmac_set_rx_mode(struct net_device *dev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&sc->sbm_lock, flags);
|
|
|
|
if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
|
|
|
|
/*
|
|
|
|
* Promiscuous changed.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
|
|
|
if (dev->flags & IFF_PROMISC) {
|
2005-04-16 22:20:36 +00:00
|
|
|
sbmac_promiscuous_mode(sc,1);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
sbmac_promiscuous_mode(sc,0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&sc->sbm_lock, flags);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Program the multicasts. Do this every time.
|
|
|
|
*/
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sbmac_setmulti(sc);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
|
|
{
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
if (!netif_running(dev) || !sc->phy_dev)
|
|
|
|
return -EINVAL;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
return phy_mii_ioctl(sc->phy_dev, if_mii(rq), cmd);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sbmac_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
napi_disable(&sc->napi);
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
phy_stop(sc->phy_dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_set_channel_state(sc, sbmac_state_off);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
if (debug > 1)
|
|
|
|
pr_debug("%s: Shutting down ethercard\n", dev->name);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
phy_disconnect(sc->phy_dev);
|
|
|
|
sc->phy_dev = NULL;
|
|
|
|
|
|
|
|
mdiobus_unregister(&sc->mii_bus);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
free_irq(dev->irq, dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sbdma_emptyring(&(sc->sbm_txdma));
|
|
|
|
sbdma_emptyring(&(sc->sbm_rxdma));
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
static int sbmac_poll(struct napi_struct *napi, int budget)
|
2007-04-26 07:23:22 +00:00
|
|
|
{
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
|
|
|
|
struct net_device *dev = sc->sbm_dev;
|
2007-04-26 07:23:22 +00:00
|
|
|
int work_done;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
|
2007-04-26 07:23:22 +00:00
|
|
|
sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
if (work_done < budget) {
|
|
|
|
netif_rx_complete(dev, napi);
|
2007-04-26 07:23:22 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_SBMAC_COALESCE
|
|
|
|
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
|
|
|
|
((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
|
|
|
|
sc->sbm_imr);
|
|
|
|
#else
|
|
|
|
__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
|
|
|
|
(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 23:41:36 +00:00
|
|
|
return work_done;
|
2007-04-26 07:23:22 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
|
|
|
|
static int __init sbmac_probe(struct platform_device *pldev)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct sbmac_softc *sc;
|
|
|
|
void __iomem *sbm_base;
|
|
|
|
struct resource *res;
|
|
|
|
u64 sbmac_orig_hwaddr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
|
|
|
|
BUG_ON(!res);
|
|
|
|
sbm_base = ioremap_nocache(res->start, res->end - res->start + 1);
|
|
|
|
if (!sbm_base) {
|
|
|
|
printk(KERN_ERR "%s: unable to map device registers\n",
|
|
|
|
pldev->dev.bus_id);
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The R_MAC_ETHERNET_ADDR register will be set to some nonzero
|
|
|
|
* value for us by the firmware if we're going to use this MAC.
|
|
|
|
* If we find a zero, skip this MAC.
|
|
|
|
*/
|
|
|
|
sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
|
|
|
|
pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", pldev->dev.bus_id,
|
|
|
|
sbmac_orig_hwaddr ? "" : "not ", (long long)res->start);
|
|
|
|
if (sbmac_orig_hwaddr == 0) {
|
|
|
|
err = 0;
|
|
|
|
goto out_unmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Okay, cool. Initialize this MAC.
|
|
|
|
*/
|
|
|
|
dev = alloc_etherdev(sizeof(struct sbmac_softc));
|
|
|
|
if (!dev) {
|
|
|
|
printk(KERN_ERR "%s: unable to allocate etherdev\n",
|
|
|
|
pldev->dev.bus_id);
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_unmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
pldev->dev.driver_data = dev;
|
|
|
|
SET_NETDEV_DEV(dev, &pldev->dev);
|
|
|
|
|
|
|
|
sc = netdev_priv(dev);
|
|
|
|
sc->sbm_base = sbm_base;
|
|
|
|
|
|
|
|
err = sbmac_init(pldev, res->start);
|
|
|
|
if (err)
|
|
|
|
goto out_kfree;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_kfree:
|
|
|
|
free_netdev(dev);
|
|
|
|
__raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR);
|
|
|
|
|
|
|
|
out_unmap:
|
|
|
|
iounmap(sbm_base);
|
|
|
|
|
|
|
|
out_out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __exit sbmac_remove(struct platform_device *pldev)
|
|
|
|
{
|
|
|
|
struct net_device *dev = pldev->dev.driver_data;
|
|
|
|
struct sbmac_softc *sc = netdev_priv(dev);
|
|
|
|
|
|
|
|
unregister_netdev(dev);
|
|
|
|
sbmac_uninitctx(sc);
|
|
|
|
iounmap(sc->sbm_base);
|
|
|
|
free_netdev(dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct platform_device **sbmac_pldev;
|
|
|
|
static int sbmac_max_units;
|
|
|
|
|
2006-02-08 23:23:26 +00:00
|
|
|
#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
|
2007-09-21 11:52:10 +00:00
|
|
|
static void __init sbmac_setup_hwaddr(int idx, char *addr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-09-21 11:52:10 +00:00
|
|
|
void __iomem *sbm_base;
|
|
|
|
unsigned long start, end;
|
2005-04-16 22:20:36 +00:00
|
|
|
uint8_t eaddr[6];
|
|
|
|
uint64_t val;
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
if (idx >= sbmac_max_units)
|
|
|
|
return;
|
|
|
|
|
|
|
|
start = A_MAC_CHANNEL_BASE(idx);
|
|
|
|
end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
|
|
|
|
|
|
|
|
sbm_base = ioremap_nocache(start, end - start + 1);
|
|
|
|
if (!sbm_base) {
|
|
|
|
printk(KERN_ERR "%s: unable to map device registers\n",
|
|
|
|
sbmac_string);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sbmac_parse_hwaddr(addr, eaddr);
|
2005-04-16 22:20:36 +00:00
|
|
|
val = sbmac_addr2reg(eaddr);
|
2007-09-21 11:52:10 +00:00
|
|
|
__raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR);
|
|
|
|
val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
|
|
|
|
|
|
|
|
iounmap(sbm_base);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static int __init sbmac_platform_probe_one(int idx)
|
|
|
|
{
|
|
|
|
struct platform_device *pldev;
|
|
|
|
struct {
|
|
|
|
struct resource r;
|
|
|
|
char name[strlen(sbmac_pretty) + 4];
|
|
|
|
} *res;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
|
|
|
if (!res) {
|
|
|
|
printk(KERN_ERR "%s.%d: unable to allocate memory\n",
|
|
|
|
sbmac_string, idx);
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the base address of the MAC.
|
|
|
|
*/
|
|
|
|
snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx);
|
|
|
|
res->r.name = res->name;
|
|
|
|
res->r.flags = IORESOURCE_MEM;
|
|
|
|
res->r.start = A_MAC_CHANNEL_BASE(idx);
|
|
|
|
res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
|
|
|
|
|
|
|
|
pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1);
|
|
|
|
if (IS_ERR(pldev)) {
|
|
|
|
printk(KERN_ERR "%s.%d: unable to register platform device\n",
|
|
|
|
sbmac_string, idx);
|
|
|
|
err = PTR_ERR(pldev);
|
|
|
|
goto out_kfree;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pldev->dev.driver) {
|
|
|
|
err = 0; /* No hardware at this address. */
|
|
|
|
goto out_unregister;
|
|
|
|
}
|
|
|
|
|
|
|
|
sbmac_pldev[idx] = pldev;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_unregister:
|
|
|
|
platform_device_unregister(pldev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
out_kfree:
|
|
|
|
kfree(res);
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init sbmac_platform_probe(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-09-21 11:52:10 +00:00
|
|
|
int i;
|
2005-10-19 14:40:02 +00:00
|
|
|
|
2006-02-08 23:23:26 +00:00
|
|
|
/* Set the number of available units based on the SOC type. */
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (soc_type) {
|
|
|
|
case K_SYS_SOC_TYPE_BCM1250:
|
|
|
|
case K_SYS_SOC_TYPE_BCM1250_ALT:
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_max_units = 3;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case K_SYS_SOC_TYPE_BCM1120:
|
|
|
|
case K_SYS_SOC_TYPE_BCM1125:
|
|
|
|
case K_SYS_SOC_TYPE_BCM1125H:
|
2007-09-21 11:52:10 +00:00
|
|
|
case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
|
|
|
|
sbmac_max_units = 2;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2006-02-08 23:23:26 +00:00
|
|
|
case K_SYS_SOC_TYPE_BCM1x55:
|
|
|
|
case K_SYS_SOC_TYPE_BCM1x80:
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_max_units = 4;
|
2006-02-08 23:23:26 +00:00
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
2007-09-21 11:52:10 +00:00
|
|
|
return; /* none */
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-02-08 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* For bringup when not using the firmware, we can pre-fill
|
|
|
|
* the MAC addresses using the environment variables
|
|
|
|
* specified in this file (or maybe from the config file?)
|
|
|
|
*/
|
|
|
|
#ifdef SBMAC_ETH0_HWADDR
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR);
|
2006-02-08 23:23:26 +00:00
|
|
|
#endif
|
|
|
|
#ifdef SBMAC_ETH1_HWADDR
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR);
|
2006-02-08 23:23:26 +00:00
|
|
|
#endif
|
|
|
|
#ifdef SBMAC_ETH2_HWADDR
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR);
|
2006-02-08 23:23:26 +00:00
|
|
|
#endif
|
|
|
|
#ifdef SBMAC_ETH3_HWADDR
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR);
|
2006-02-08 23:23:26 +00:00
|
|
|
#endif
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!sbmac_pldev) {
|
|
|
|
printk(KERN_ERR "%s: unable to allocate memory\n",
|
|
|
|
sbmac_string);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-02-08 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* Walk through the Ethernet controllers and find
|
|
|
|
* those who have their MAC addresses set.
|
|
|
|
*/
|
2007-09-21 11:52:10 +00:00
|
|
|
for (i = 0; i < sbmac_max_units; i++)
|
|
|
|
if (sbmac_platform_probe_one(i))
|
|
|
|
break;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static void __exit sbmac_platform_cleanup(void)
|
|
|
|
{
|
|
|
|
int i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
for (i = 0; i < sbmac_max_units; i++)
|
|
|
|
platform_device_unregister(sbmac_pldev[i]);
|
|
|
|
kfree(sbmac_pldev);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static struct platform_driver sbmac_driver = {
|
|
|
|
.probe = sbmac_probe,
|
|
|
|
.remove = __exit_p(sbmac_remove),
|
|
|
|
.driver = {
|
|
|
|
.name = sbmac_string,
|
|
|
|
},
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static int __init sbmac_init_module(void)
|
|
|
|
{
|
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
err = platform_driver_register(&sbmac_driver);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_platform_probe();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
return err;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-09-21 11:52:10 +00:00
|
|
|
static void __exit sbmac_cleanup_module(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-09-21 11:52:10 +00:00
|
|
|
sbmac_platform_cleanup();
|
|
|
|
platform_driver_unregister(&sbmac_driver);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(sbmac_init_module);
|
|
|
|
module_exit(sbmac_cleanup_module);
|