Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (37 commits)
  ucc_geth: use correct UCCE macros
  net_dma: acquire/release dma channels on ifup/ifdown
  cxgb3: Keep LRO off if disabled when interface is down
  sfc: SFT9001: Fix condition for LNPGA power-off
  dccp ccid-3: Fix RFC reference
  smsc911x: register irq with device name, not driver name
  smsc911x: fix smsc911x_reg_read compiler warning
  forcedeth: napi schedule lock fix
  net: fix section mismatch warnings in dccp/ccids/lib/tfrc.c
  forcedeth: remove mgmt unit for mcp79 chipset
  qlge: Remove dynamic alloc of rx ring control blocks.
  qlge: Fix schedule while atomic issue.
  qlge: Remove support for device ID 8000.
  qlge: Get rid of split addresses in hardware control blocks.
  qlge: Get rid of volatile usage for shadow register.
  forcedeth: version bump and copyright
  forcedeth: xmit lock fix
  netdev: missing validate_address hooks
  netdev: add missing set_mac_address hook
  netdev: gianfar: add MII ioctl handler
  ...
This commit is contained in:
Linus Torvalds 2009-01-12 16:22:31 -08:00
commit 23ead72912
59 changed files with 338 additions and 368 deletions

View File

@ -177,6 +177,7 @@ static const struct net_device_ops el2_netdev_ops = {
.ndo_get_stats = eip_get_stats, .ndo_get_stats = eip_get_stats,
.ndo_set_multicast_list = eip_set_multicast_list, .ndo_set_multicast_list = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = eip_poll, .ndo_poll_controller = eip_poll,

View File

@ -3109,6 +3109,8 @@ static void acpi_set_WOL(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev); struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr; void __iomem *ioaddr = vp->ioaddr;
device_set_wakeup_enable(vp->gendev, vp->enable_wol);
if (vp->enable_wol) { if (vp->enable_wol) {
/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
EL3WINDOW(7); EL3WINDOW(7);

View File

@ -1821,6 +1821,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_open = cp_open, .ndo_open = cp_open,
.ndo_stop = cp_close, .ndo_stop = cp_close,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = cp_set_rx_mode, .ndo_set_multicast_list = cp_set_rx_mode,
.ndo_get_stats = cp_get_stats, .ndo_get_stats = cp_get_stats,
.ndo_do_ioctl = cp_ioctl, .ndo_do_ioctl = cp_ioctl,
@ -1832,6 +1833,7 @@ static const struct net_device_ops cp_netdev_ops = {
#ifdef BROKEN #ifdef BROKEN
.ndo_change_mtu = cp_change_mtu, .ndo_change_mtu = cp_change_mtu,
#endif #endif
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cp_poll_controller, .ndo_poll_controller = cp_poll_controller,
#endif #endif

View File

@ -917,6 +917,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_stop = rtl8139_close, .ndo_stop = rtl8139_close,
.ndo_get_stats = rtl8139_get_stats, .ndo_get_stats = rtl8139_get_stats,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_start_xmit = rtl8139_start_xmit, .ndo_start_xmit = rtl8139_start_xmit,
.ndo_set_multicast_list = rtl8139_set_rx_mode, .ndo_set_multicast_list = rtl8139_set_rx_mode,
.ndo_do_ioctl = netdev_ioctl, .ndo_do_ioctl = netdev_ioctl,
@ -924,7 +925,6 @@ static const struct net_device_ops rtl8139_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8139_poll_controller, .ndo_poll_controller = rtl8139_poll_controller,
#endif #endif
}; };
static int __devinit rtl8139_init_one (struct pci_dev *pdev, static int __devinit rtl8139_init_one (struct pci_dev *pdev,

View File

@ -63,6 +63,7 @@ const struct net_device_ops ei_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -68,6 +68,7 @@ const struct net_device_ops eip_netdev_ops = {
.ndo_get_stats = eip_get_stats, .ndo_get_stats = eip_get_stats,
.ndo_set_multicast_list = eip_set_multicast_list, .ndo_set_multicast_list = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = eip_poll, .ndo_poll_controller = eip_poll,

View File

@ -1600,7 +1600,7 @@ config 8139_OLD_RX_RESET
old RX-reset behavior. If unsure, say N. old RX-reset behavior. If unsure, say N.
config R6040 config R6040
tristate "RDC R6040 Fast Ethernet Adapter support (EXPERIMENTAL)" tristate "RDC R6040 Fast Ethernet Adapter support"
depends on NET_PCI && PCI depends on NET_PCI && PCI
select CRC32 select CRC32
select MII select MII

View File

@ -460,6 +460,7 @@ static const struct net_device_ops ace_netdev_ops = {
.ndo_get_stats = ace_get_stats, .ndo_get_stats = ace_get_stats,
.ndo_start_xmit = ace_start_xmit, .ndo_start_xmit = ace_start_xmit,
.ndo_set_multicast_list = ace_set_multicast_list, .ndo_set_multicast_list = ace_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ace_set_mac_addr, .ndo_set_mac_address = ace_set_mac_addr,
.ndo_change_mtu = ace_change_mtu, .ndo_change_mtu = ace_change_mtu,
#if ACENIC_DO_VLAN #if ACENIC_DO_VLAN

View File

@ -646,6 +646,7 @@ static const struct net_device_ops etherh_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_addr = eth_set_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -1357,6 +1357,7 @@ static const struct net_device_ops ks8695_netdev_ops = {
.ndo_start_xmit = ks8695_start_xmit, .ndo_start_xmit = ks8695_start_xmit,
.ndo_tx_timeout = ks8695_timeout, .ndo_tx_timeout = ks8695_timeout,
.ndo_set_mac_address = ks8695_set_mac, .ndo_set_mac_address = ks8695_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = ks8695_set_multicast, .ndo_set_multicast_list = ks8695_set_multicast,
}; };

View File

@ -73,8 +73,8 @@
(BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
#define RX_PKT_OFFSET 30 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64) #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
/* minimum number of free TX descriptors required to wake up TX process */ /* minimum number of free TX descriptors required to wake up TX process */
#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
@ -682,7 +682,6 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
} }
rh = (struct rx_header *) skb->data; rh = (struct rx_header *) skb->data;
skb_reserve(skb, RX_PKT_OFFSET);
rh->len = 0; rh->len = 0;
rh->flags = 0; rh->flags = 0;
@ -693,13 +692,13 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
if (src_map != NULL) if (src_map != NULL)
src_map->skb = NULL; src_map->skb = NULL;
ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET)); ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
if (dest_idx == (B44_RX_RING_SIZE - 1)) if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= DESC_CTRL_EOT; ctrl |= DESC_CTRL_EOT;
dp = &bp->rx_ring[dest_idx]; dp = &bp->rx_ring[dest_idx];
dp->ctrl = cpu_to_le32(ctrl); dp->ctrl = cpu_to_le32(ctrl);
dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset); dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
if (bp->flags & B44_FLAG_RX_RING_HACK) if (bp->flags & B44_FLAG_RX_RING_HACK)
b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
@ -809,8 +808,8 @@ static int b44_rx(struct b44 *bp, int budget)
ssb_dma_unmap_single(bp->sdev, map, ssb_dma_unmap_single(bp->sdev, map,
skb_size, DMA_FROM_DEVICE); skb_size, DMA_FROM_DEVICE);
/* Leave out rx_header */ /* Leave out rx_header */
skb_put(skb, len + RX_PKT_OFFSET); skb_put(skb, len + RX_PKT_OFFSET);
skb_pull(skb, RX_PKT_OFFSET); skb_pull(skb, RX_PKT_OFFSET);
} else { } else {
struct sk_buff *copy_skb; struct sk_buff *copy_skb;

View File

@ -50,12 +50,17 @@ struct vlan_group;
struct adapter; struct adapter;
struct sge_qset; struct sge_qset;
enum { /* rx_offload flags */
T3_RX_CSUM = 1 << 0,
T3_LRO = 1 << 1,
};
struct port_info { struct port_info {
struct adapter *adapter; struct adapter *adapter;
struct vlan_group *vlan_grp; struct vlan_group *vlan_grp;
struct sge_qset *qs; struct sge_qset *qs;
u8 port_id; u8 port_id;
u8 rx_csum_offload; u8 rx_offload;
u8 nqsets; u8 nqsets;
u8 first_qset; u8 first_qset;
struct cphy phy; struct cphy phy;

View File

@ -546,7 +546,7 @@ static int setup_sge_qsets(struct adapter *adap)
pi->qs = &adap->sge.qs[pi->first_qset]; pi->qs = &adap->sge.qs[pi->first_qset];
for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
++j, ++qset_idx) { ++j, ++qset_idx) {
set_qset_lro(dev, qset_idx, pi->rx_csum_offload); set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
err = t3_sge_alloc_qset(adap, qset_idx, 1, err = t3_sge_alloc_qset(adap, qset_idx, 1,
(adap->flags & USING_MSIX) ? qset_idx + 1 : (adap->flags & USING_MSIX) ? qset_idx + 1 :
irq_idx, irq_idx,
@ -1657,17 +1657,19 @@ static u32 get_rx_csum(struct net_device *dev)
{ {
struct port_info *p = netdev_priv(dev); struct port_info *p = netdev_priv(dev);
return p->rx_csum_offload; return p->rx_offload & T3_RX_CSUM;
} }
static int set_rx_csum(struct net_device *dev, u32 data) static int set_rx_csum(struct net_device *dev, u32 data)
{ {
struct port_info *p = netdev_priv(dev); struct port_info *p = netdev_priv(dev);
p->rx_csum_offload = data; if (data) {
if (!data) { p->rx_offload |= T3_RX_CSUM;
} else {
int i; int i;
p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
set_qset_lro(dev, i, 0); set_qset_lro(dev, i, 0);
} }
@ -1830,15 +1832,18 @@ static int cxgb3_set_flags(struct net_device *dev, u32 data)
int i; int i;
if (data & ETH_FLAG_LRO) { if (data & ETH_FLAG_LRO) {
if (!pi->rx_csum_offload) if (!(pi->rx_offload & T3_RX_CSUM))
return -EINVAL; return -EINVAL;
pi->rx_offload |= T3_LRO;
for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
set_qset_lro(dev, i, 1); set_qset_lro(dev, i, 1);
} else } else {
pi->rx_offload &= ~T3_LRO;
for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
set_qset_lro(dev, i, 0); set_qset_lro(dev, i, 0);
}
return 0; return 0;
} }
@ -1926,7 +1931,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
pi = adap2pinfo(adapter, i); pi = adap2pinfo(adapter, i);
if (t.qset_idx >= pi->first_qset && if (t.qset_idx >= pi->first_qset &&
t.qset_idx < pi->first_qset + pi->nqsets && t.qset_idx < pi->first_qset + pi->nqsets &&
!pi->rx_csum_offload) !(pi->rx_offload & T3_RX_CSUM))
return -EINVAL; return -EINVAL;
} }
@ -2946,7 +2951,7 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->port[i] = netdev; adapter->port[i] = netdev;
pi = netdev_priv(netdev); pi = netdev_priv(netdev);
pi->adapter = adapter; pi->adapter = adapter;
pi->rx_csum_offload = 1; pi->rx_offload = T3_RX_CSUM | T3_LRO;
pi->port_id = i; pi->port_id = i;
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev); netif_tx_stop_all_queues(netdev);
@ -2955,6 +2960,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->mem_end = mmio_start + mmio_len - 1; netdev->mem_end = mmio_start + mmio_len - 1;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
netdev->features |= NETIF_F_LLTX; netdev->features |= NETIF_F_LLTX;
netdev->features |= NETIF_F_LRO;
if (pci_using_dac) if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;

View File

@ -1932,7 +1932,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb_pull(skb, sizeof(*p) + pad); skb_pull(skb, sizeof(*p) + pad);
skb->protocol = eth_type_trans(skb, adap->port[p->iff]); skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
pi = netdev_priv(skb->dev); pi = netdev_priv(skb->dev);
if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) && if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
!p->fragment) { !p->fragment) {
qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;

View File

@ -390,7 +390,8 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
} }
static DEFINE_MUTEX(nvm_mutex); static DEFINE_MUTEX(nvm_mutex);
static pid_t nvm_owner = -1; static pid_t nvm_owner_pid = -1;
static char nvm_owner_name[TASK_COMM_LEN] = "";
/** /**
* e1000_acquire_swflag_ich8lan - Acquire software control flag * e1000_acquire_swflag_ich8lan - Acquire software control flag
@ -408,11 +409,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
might_sleep(); might_sleep();
if (!mutex_trylock(&nvm_mutex)) { if (!mutex_trylock(&nvm_mutex)) {
WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n", WARN(1, KERN_ERR "e1000e mutex contention. Owned by process "
nvm_owner); "%s (pid %d), required by process %s (pid %d)\n",
nvm_owner_name, nvm_owner_pid,
current->comm, current->pid);
mutex_lock(&nvm_mutex); mutex_lock(&nvm_mutex);
} }
nvm_owner = current->pid; nvm_owner_pid = current->pid;
strncpy(nvm_owner_name, current->comm, TASK_COMM_LEN);
while (timeout) { while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL); extcnf_ctrl = er32(EXTCNF_CTRL);
@ -430,7 +435,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl); ew32(EXTCNF_CTRL, extcnf_ctrl);
nvm_owner = -1; nvm_owner_pid = -1;
strcpy(nvm_owner_name, "");
mutex_unlock(&nvm_mutex); mutex_unlock(&nvm_mutex);
return -E1000_ERR_CONFIG; return -E1000_ERR_CONFIG;
} }
@ -454,7 +460,8 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl); ew32(EXTCNF_CTRL, extcnf_ctrl);
nvm_owner = -1; nvm_owner_pid = -1;
strcpy(nvm_owner_name, "");
mutex_unlock(&nvm_mutex); mutex_unlock(&nvm_mutex);
} }

View File

@ -169,6 +169,7 @@ static const struct net_device_ops e21_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -1599,6 +1599,7 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_start_xmit = enic_hard_start_xmit, .ndo_start_xmit = enic_hard_start_xmit,
.ndo_get_stats = enic_get_stats, .ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = enic_set_multicast_list, .ndo_set_multicast_list = enic_set_multicast_list,
.ndo_change_mtu = enic_change_mtu, .ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register, .ndo_vlan_rx_register = enic_vlan_rx_register,

View File

@ -13,7 +13,7 @@
* Copyright (C) 2004 Andrew de Quincey (wol support) * Copyright (C) 2004 Andrew de Quincey (wol support)
* Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
* IRQ rate fixes, bigendian fixes, cleanups, verification) * IRQ rate fixes, bigendian fixes, cleanups, verification)
* Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@ -39,7 +39,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic. * superfluous timer interrupts from the nic.
*/ */
#define FORCEDETH_VERSION "0.61" #define FORCEDETH_VERSION "0.62"
#define DRV_NAME "forcedeth" #define DRV_NAME "forcedeth"
#include <linux/module.h> #include <linux/module.h>
@ -2096,14 +2096,15 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
} }
spin_lock_irqsave(&np->lock, flags);
empty_slots = nv_get_empty_tx_slots(np); empty_slots = nv_get_empty_tx_slots(np);
if (unlikely(empty_slots <= entries)) { if (unlikely(empty_slots <= entries)) {
spin_lock_irqsave(&np->lock, flags);
netif_stop_queue(dev); netif_stop_queue(dev);
np->tx_stop = 1; np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags); spin_unlock_irqrestore(&np->lock, flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
spin_unlock_irqrestore(&np->lock, flags);
start_tx = put_tx = np->put_tx.orig; start_tx = put_tx = np->put_tx.orig;
@ -2214,14 +2215,15 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
} }
spin_lock_irqsave(&np->lock, flags);
empty_slots = nv_get_empty_tx_slots(np); empty_slots = nv_get_empty_tx_slots(np);
if (unlikely(empty_slots <= entries)) { if (unlikely(empty_slots <= entries)) {
spin_lock_irqsave(&np->lock, flags);
netif_stop_queue(dev); netif_stop_queue(dev);
np->tx_stop = 1; np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags); spin_unlock_irqrestore(&np->lock, flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
spin_unlock_irqrestore(&np->lock, flags);
start_tx = put_tx = np->put_tx.ex; start_tx = put_tx = np->put_tx.ex;
start_tx_ctx = np->put_tx_ctx; start_tx_ctx = np->put_tx_ctx;
@ -3403,10 +3405,10 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
#ifdef CONFIG_FORCEDETH_NAPI #ifdef CONFIG_FORCEDETH_NAPI
if (events & NVREG_IRQ_RX_ALL) { if (events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
netif_rx_schedule(&np->napi); netif_rx_schedule(&np->napi);
/* Disable furthur receive irq's */ /* Disable furthur receive irq's */
spin_lock(&np->lock);
np->irqmask &= ~NVREG_IRQ_RX_ALL; np->irqmask &= ~NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED) if (np->msi_flags & NV_MSI_X_ENABLED)
@ -3520,10 +3522,10 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
#ifdef CONFIG_FORCEDETH_NAPI #ifdef CONFIG_FORCEDETH_NAPI
if (events & NVREG_IRQ_RX_ALL) { if (events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
netif_rx_schedule(&np->napi); netif_rx_schedule(&np->napi);
/* Disable furthur receive irq's */ /* Disable furthur receive irq's */
spin_lock(&np->lock);
np->irqmask &= ~NVREG_IRQ_RX_ALL; np->irqmask &= ~NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED) if (np->msi_flags & NV_MSI_X_ENABLED)
@ -6167,19 +6169,19 @@ static struct pci_device_id pci_tbl[] = {
}, },
{ /* MCP79 Ethernet Controller */ { /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
}, },
{ /* MCP79 Ethernet Controller */ { /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
}, },
{ /* MCP79 Ethernet Controller */ { /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
}, },
{ /* MCP79 Ethernet Controller */ { /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
}, },
{0,}, {0,},
}; };

View File

@ -296,6 +296,20 @@ err_out:
return err; return err;
} }
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct gfar_private *priv = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
if (!priv->phydev)
return -ENODEV;
return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
}
/* Set up the ethernet device structure, private data, /* Set up the ethernet device structure, private data,
* and anything else we need before we start */ * and anything else we need before we start */
static int gfar_probe(struct of_device *ofdev, static int gfar_probe(struct of_device *ofdev,
@ -366,6 +380,7 @@ static int gfar_probe(struct of_device *ofdev,
dev->set_multicast_list = gfar_set_multi; dev->set_multicast_list = gfar_set_multi;
dev->ethtool_ops = &gfar_ethtool_ops; dev->ethtool_ops = &gfar_ethtool_ops;
dev->do_ioctl = gfar_ioctl;
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
priv->rx_csum_enable = 1; priv->rx_csum_enable = 1;

View File

@ -576,6 +576,7 @@ static const struct net_device_ops hamachi_netdev_ops = {
.ndo_set_multicast_list = set_rx_mode, .ndo_set_multicast_list = set_rx_mode,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = hamachi_tx_timeout, .ndo_tx_timeout = hamachi_tx_timeout,
.ndo_do_ioctl = netdev_ioctl, .ndo_do_ioctl = netdev_ioctl,
}; };

View File

@ -717,11 +717,12 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
struct sixpack *sp = sp_get(tty); struct sixpack *sp = sp_get(tty);
struct net_device *dev = sp->dev; struct net_device *dev;
unsigned int tmp, err; unsigned int tmp, err;
if (!sp) if (!sp)
return -ENXIO; return -ENXIO;
dev = sp->dev;
switch(cmd) { switch(cmd) {
case SIOCGIFNAME: case SIOCGIFNAME:

View File

@ -166,6 +166,7 @@ static const struct net_device_ops hpp_netdev_ops = {
.ndo_get_stats = eip_get_stats, .ndo_get_stats = eip_get_stats,
.ndo_set_multicast_list = eip_set_multicast_list, .ndo_set_multicast_list = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = eip_poll, .ndo_poll_controller = eip_poll,

View File

@ -103,6 +103,7 @@ static const struct net_device_ops hydra_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -594,7 +594,7 @@ static int au1k_irda_rx(struct net_device *dev)
update_rx_stats(dev, flags, count); update_rx_stats(dev, flags, count);
skb=alloc_skb(count+1,GFP_ATOMIC); skb=alloc_skb(count+1,GFP_ATOMIC);
if (skb == NULL) { if (skb == NULL) {
aup->stats.rx_dropped++; aup->netdev->stats.rx_dropped++;
continue; continue;
} }
skb_reserve(skb, 1); skb_reserve(skb, 1);

View File

@ -1194,13 +1194,13 @@ toshoboe_interrupt (int irq, void *dev_id)
txp = txpc; txp = txpc;
txpc++; txpc++;
txpc %= TX_SLOTS; txpc %= TX_SLOTS;
self->stats.tx_packets++; self->netdev->stats.tx_packets++;
if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS) if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX; self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
} }
self->stats.tx_packets--; self->netdev->stats.tx_packets--;
#else #else
self->stats.tx_packets++; self->netdev->stats.tx_packets++;
#endif #endif
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX); toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
} }
@ -1280,7 +1280,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
skb_put (skb, len); skb_put (skb, len);
skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs], skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs],
len); len);
self->stats.rx_packets++; self->netdev->stats.rx_packets++;
skb->dev = self->netdev; skb->dev = self->netdev;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb->protocol = htons (ETH_P_IRDA); skb->protocol = htons (ETH_P_IRDA);

View File

@ -486,6 +486,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -952,6 +952,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_get_stats = mlx4_en_get_stats, .ndo_get_stats = mlx4_en_get_stats,
.ndo_set_multicast_list = mlx4_en_set_multicast, .ndo_set_multicast_list = mlx4_en_set_multicast,
.ndo_set_mac_address = mlx4_en_set_mac, .ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu, .ndo_change_mtu = mlx4_en_change_mtu,
.ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_register = mlx4_en_vlan_rx_register, .ndo_vlan_rx_register = mlx4_en_vlan_rx_register,

View File

@ -202,6 +202,7 @@ static const struct net_device_ops ne_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -208,6 +208,7 @@ static const struct net_device_ops ne2k_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -1956,6 +1956,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_change_mtu = ns83820_change_mtu, .ndo_change_mtu = ns83820_change_mtu,
.ndo_set_multicast_list = ns83820_set_multicast, .ndo_set_multicast_list = ns83820_set_multicast,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = ns83820_tx_timeout, .ndo_tx_timeout = ns83820_tx_timeout,
#ifdef NS83820_VLAN_ACCEL_SUPPORT #ifdef NS83820_VLAN_ACCEL_SUPPORT
.ndo_vlan_rx_register = ns83820_vlan_rx_register, .ndo_vlan_rx_register = ns83820_vlan_rx_register,

View File

@ -28,11 +28,11 @@
} while (0) } while (0)
#define QLGE_VENDOR_ID 0x1077 #define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID1 0x8012 #define QLGE_DEVICE_ID 0x8012
#define QLGE_DEVICE_ID 0x8000
#define MAX_RX_RINGS 128 #define MAX_CPUS 8
#define MAX_TX_RINGS 128 #define MAX_TX_RINGS MAX_CPUS
#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
#define NUM_TX_RING_ENTRIES 256 #define NUM_TX_RING_ENTRIES 256
#define NUM_RX_RING_ENTRIES 256 #define NUM_RX_RING_ENTRIES 256
@ -45,6 +45,7 @@
#define MAX_SPLIT_SIZE 1023 #define MAX_SPLIT_SIZE 1023
#define QLGE_SB_PAD 32 #define QLGE_SB_PAD 32
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ #define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ #define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) #define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
@ -961,8 +962,7 @@ struct ib_mac_iocb_rsp {
#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ #define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ #define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
__le32 data_len; /* */ __le32 data_len; /* */
__le32 data_addr_lo; /* */ __le64 data_addr; /* */
__le32 data_addr_hi; /* */
__le32 rss; /* */ __le32 rss; /* */
__le16 vlan_id; /* 12 bits */ __le16 vlan_id; /* 12 bits */
#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ #define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
@ -976,8 +976,7 @@ struct ib_mac_iocb_rsp {
#define IB_MAC_IOCB_RSP_HS 0x40 #define IB_MAC_IOCB_RSP_HS 0x40
#define IB_MAC_IOCB_RSP_HL 0x80 #define IB_MAC_IOCB_RSP_HL 0x80
__le32 hdr_len; /* */ __le32 hdr_len; /* */
__le32 hdr_addr_lo; /* */ __le64 hdr_addr; /* */
__le32 hdr_addr_hi; /* */
} __attribute((packed)); } __attribute((packed));
struct ib_ae_iocb_rsp { struct ib_ae_iocb_rsp {
@ -1042,10 +1041,8 @@ struct wqicb {
__le16 cq_id_rss; __le16 cq_id_rss;
#define Q_CQ_ID_RSS_RV 0x8000 #define Q_CQ_ID_RSS_RV 0x8000
__le16 rid; __le16 rid;
__le32 addr_lo; __le64 addr;
__le32 addr_hi; __le64 cnsmr_idx_addr;
__le32 cnsmr_idx_addr_lo;
__le32 cnsmr_idx_addr_hi;
} __attribute((packed)); } __attribute((packed));
/* /*
@ -1070,18 +1067,14 @@ struct cqicb {
#define LEN_CPP_64 0x0002 #define LEN_CPP_64 0x0002
#define LEN_CPP_128 0x0003 #define LEN_CPP_128 0x0003
__le16 rid; __le16 rid;
__le32 addr_lo; __le64 addr;
__le32 addr_hi; __le64 prod_idx_addr;
__le32 prod_idx_addr_lo;
__le32 prod_idx_addr_hi;
__le16 pkt_delay; __le16 pkt_delay;
__le16 irq_delay; __le16 irq_delay;
__le32 lbq_addr_lo; __le64 lbq_addr;
__le32 lbq_addr_hi;
__le16 lbq_buf_size; __le16 lbq_buf_size;
__le16 lbq_len; /* entry count */ __le16 lbq_len; /* entry count */
__le32 sbq_addr_lo; __le64 sbq_addr;
__le32 sbq_addr_hi;
__le16 sbq_buf_size; __le16 sbq_buf_size;
__le16 sbq_len; /* entry count */ __le16 sbq_len; /* entry count */
} __attribute((packed)); } __attribute((packed));
@ -1145,7 +1138,7 @@ struct tx_ring {
struct wqicb wqicb; /* structure used to inform chip of new queue */ struct wqicb wqicb; /* structure used to inform chip of new queue */
void *wq_base; /* pci_alloc:virtual addr for tx */ void *wq_base; /* pci_alloc:virtual addr for tx */
dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
u32 wq_size; /* size in bytes of queue area */ u32 wq_size; /* size in bytes of queue area */
u32 wq_len; /* number of entries in queue */ u32 wq_len; /* number of entries in queue */
@ -1181,7 +1174,7 @@ struct rx_ring {
u32 cq_size; u32 cq_size;
u32 cq_len; u32 cq_len;
u16 cq_id; u16 cq_id;
volatile __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
dma_addr_t prod_idx_sh_reg_dma; dma_addr_t prod_idx_sh_reg_dma;
void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
u32 cnsmr_idx; /* current sw idx */ u32 cnsmr_idx; /* current sw idx */
@ -1402,9 +1395,11 @@ struct ql_adapter {
int rx_ring_count; int rx_ring_count;
int ring_mem_size; int ring_mem_size;
void *ring_mem; void *ring_mem;
struct rx_ring *rx_ring;
struct rx_ring rx_ring[MAX_RX_RINGS];
struct tx_ring tx_ring[MAX_TX_RINGS];
int rx_csum; int rx_csum;
struct tx_ring *tx_ring;
u32 default_rx_queue; u32 default_rx_queue;
u16 rx_coalesce_usecs; /* cqicb->int_delay */ u16 rx_coalesce_usecs; /* cqicb->int_delay */
@ -1459,6 +1454,24 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr)
mmiowb(); mmiowb();
} }
/*
* Shadow Registers:
* Outbound queues have a consumer index that is maintained by the chip.
* Inbound queues have a producer index that is maintained by the chip.
* For lower overhead, these registers are "shadowed" to host memory
* which allows the device driver to track the queue progress without
* PCI reads. When an entry is placed on an inbound queue, the chip will
* update the relevant index register and then copy the value to the
* shadow register in host memory.
*/
static inline u32 ql_read_sh_reg(__le32 *addr)
{
u32 reg;
reg = le32_to_cpu(*addr);
rmb();
return reg;
}
extern char qlge_driver_name[]; extern char qlge_driver_name[];
extern const char qlge_driver_version[]; extern const char qlge_driver_version[];
extern const struct ethtool_ops qlge_ethtool_ops; extern const struct ethtool_ops qlge_ethtool_ops;

View File

@ -435,14 +435,10 @@ void ql_dump_wqicb(struct wqicb *wqicb)
printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n", printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
le16_to_cpu(wqicb->cq_id_rss)); le16_to_cpu(wqicb->cq_id_rss));
printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid)); printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n", printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
le32_to_cpu(wqicb->addr_lo)); (unsigned long long) le64_to_cpu(wqicb->addr));
printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n", printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
le32_to_cpu(wqicb->addr_hi)); (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
} }
void ql_dump_tx_ring(struct tx_ring *tx_ring) void ql_dump_tx_ring(struct tx_ring *tx_ring)
@ -455,10 +451,11 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base); printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n", printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
(unsigned long long) tx_ring->wq_base_dma); (unsigned long long) tx_ring->wq_base_dma);
printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n", printk(KERN_ERR PFX
tx_ring->cnsmr_idx_sh_reg); "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n", tx_ring->cnsmr_idx_sh_reg,
(unsigned long long) tx_ring->cnsmr_idx_sh_reg_dma); tx_ring->cnsmr_idx_sh_reg
? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size); printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len); printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n", printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
@ -510,30 +507,22 @@ void ql_dump_cqicb(struct cqicb *cqicb)
printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect); printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags); printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len)); printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n", printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
le32_to_cpu(cqicb->addr_lo)); (unsigned long long) le64_to_cpu(cqicb->addr));
printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n", printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
le32_to_cpu(cqicb->addr_hi)); (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
le32_to_cpu(cqicb->prod_idx_addr_lo));
printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
le32_to_cpu(cqicb->prod_idx_addr_hi));
printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n", printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
le16_to_cpu(cqicb->pkt_delay)); le16_to_cpu(cqicb->pkt_delay));
printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n", printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
le16_to_cpu(cqicb->irq_delay)); le16_to_cpu(cqicb->irq_delay));
printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n", printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
le32_to_cpu(cqicb->lbq_addr_lo)); (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
le32_to_cpu(cqicb->lbq_addr_hi));
printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n", printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
le16_to_cpu(cqicb->lbq_buf_size)); le16_to_cpu(cqicb->lbq_buf_size));
printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n", printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
le16_to_cpu(cqicb->lbq_len)); le16_to_cpu(cqicb->lbq_len));
printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n", printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
le32_to_cpu(cqicb->sbq_addr_lo)); (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
le32_to_cpu(cqicb->sbq_addr_hi));
printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n", printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
le16_to_cpu(cqicb->sbq_buf_size)); le16_to_cpu(cqicb->sbq_buf_size));
printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n", printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
@ -558,9 +547,10 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size); printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len); printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
printk(KERN_ERR PFX printk(KERN_ERR PFX
"rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n", "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg,
rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0); rx_ring->prod_idx_sh_reg
? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n", printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
(unsigned long long) rx_ring->prod_idx_sh_reg_dma); (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n", printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
@ -809,10 +799,8 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
printk(KERN_ERR PFX "data_len = %d\n", printk(KERN_ERR PFX "data_len = %d\n",
le32_to_cpu(ib_mac_rsp->data_len)); le32_to_cpu(ib_mac_rsp->data_len));
printk(KERN_ERR PFX "data_addr_hi = 0x%x\n", printk(KERN_ERR PFX "data_addr = 0x%llx\n",
le32_to_cpu(ib_mac_rsp->data_addr_hi)); (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
printk(KERN_ERR PFX "data_addr_lo = 0x%x\n",
le32_to_cpu(ib_mac_rsp->data_addr_lo));
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
printk(KERN_ERR PFX "rss = %x\n", printk(KERN_ERR PFX "rss = %x\n",
le32_to_cpu(ib_mac_rsp->rss)); le32_to_cpu(ib_mac_rsp->rss));
@ -828,10 +816,8 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
printk(KERN_ERR PFX "hdr length = %d.\n", printk(KERN_ERR PFX "hdr length = %d.\n",
le32_to_cpu(ib_mac_rsp->hdr_len)); le32_to_cpu(ib_mac_rsp->hdr_len));
printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n", printk(KERN_ERR PFX "hdr addr = 0x%llx.\n",
le32_to_cpu(ib_mac_rsp->hdr_addr_hi)); (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n",
le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
} }
} }
#endif #endif

View File

@ -76,7 +76,6 @@ MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static struct pci_device_id qlge_pci_tbl[] __devinitdata = { static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
/* required last entry */ /* required last entry */
{0,} {0,}
}; };
@ -127,12 +126,12 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
{ {
unsigned int seconds = 3; unsigned int wait_count = 30;
do { do {
if (!ql_sem_trylock(qdev, sem_mask)) if (!ql_sem_trylock(qdev, sem_mask))
return 0; return 0;
ssleep(1); udelay(100);
} while (--seconds); } while (--wait_count);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
@ -1545,7 +1544,7 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
{ {
struct ql_adapter *qdev = rx_ring->qdev; struct ql_adapter *qdev = rx_ring->qdev;
u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct ob_mac_iocb_rsp *net_rsp = NULL; struct ob_mac_iocb_rsp *net_rsp = NULL;
int count = 0; int count = 0;
@ -1571,7 +1570,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
} }
count++; count++;
ql_update_cq(rx_ring); ql_update_cq(rx_ring);
prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
} }
ql_write_cq_idx(rx_ring); ql_write_cq_idx(rx_ring);
if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
@ -1591,7 +1590,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
{ {
struct ql_adapter *qdev = rx_ring->qdev; struct ql_adapter *qdev = rx_ring->qdev;
u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct ql_net_rsp_iocb *net_rsp; struct ql_net_rsp_iocb *net_rsp;
int count = 0; int count = 0;
@ -1624,7 +1623,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
} }
count++; count++;
ql_update_cq(rx_ring); ql_update_cq(rx_ring);
prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
if (count == budget) if (count == budget)
break; break;
} }
@ -1787,7 +1786,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* Check the default queue and wake handler if active. * Check the default queue and wake handler if active.
*/ */
rx_ring = &qdev->rx_ring[0]; rx_ring = &qdev->rx_ring[0];
if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
ql_disable_completion_interrupt(qdev, intr_context->intr); ql_disable_completion_interrupt(qdev, intr_context->intr);
queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue, queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
@ -1801,7 +1800,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/ */
for (i = 1; i < qdev->rx_ring_count; i++) { for (i = 1; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i]; rx_ring = &qdev->rx_ring[i];
if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) { rx_ring->cnsmr_idx) {
QPRINTK(qdev, INTR, INFO, QPRINTK(qdev, INTR, INFO,
"Waking handler for rx_ring[%d].\n", i); "Waking handler for rx_ring[%d].\n", i);
@ -2356,28 +2355,6 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
} }
} }
static void ql_free_ring_cb(struct ql_adapter *qdev)
{
kfree(qdev->ring_mem);
}
static int ql_alloc_ring_cb(struct ql_adapter *qdev)
{
/* Allocate space for tx/rx ring control blocks. */
qdev->ring_mem_size =
(qdev->tx_ring_count * sizeof(struct tx_ring)) +
(qdev->rx_ring_count * sizeof(struct rx_ring));
qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
if (qdev->ring_mem == NULL) {
return -ENOMEM;
} else {
qdev->rx_ring = qdev->ring_mem;
qdev->tx_ring = qdev->ring_mem +
(qdev->rx_ring_count * sizeof(struct rx_ring));
}
return 0;
}
static void ql_free_mem_resources(struct ql_adapter *qdev) static void ql_free_mem_resources(struct ql_adapter *qdev)
{ {
int i; int i;
@ -2467,12 +2444,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma); cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma); cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
cqicb->prod_idx_addr_hi =
cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
/* /*
* Set up the control block load flags. * Set up the control block load flags.
@ -2483,10 +2457,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->lbq_len) { if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */ cqicb->flags |= FLAGS_LL; /* Load lbq values */
*((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma; *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
cqicb->lbq_addr_lo = cqicb->lbq_addr =
cpu_to_le32(rx_ring->lbq_base_indirect_dma); cpu_to_le64(rx_ring->lbq_base_indirect_dma);
cqicb->lbq_addr_hi =
cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
(u16) rx_ring->lbq_buf_size; (u16) rx_ring->lbq_buf_size;
cqicb->lbq_buf_size = cpu_to_le16(bq_len); cqicb->lbq_buf_size = cpu_to_le16(bq_len);
@ -2501,10 +2473,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->sbq_len) { if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */ cqicb->flags |= FLAGS_LS; /* Load sbq values */
*((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma; *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
cqicb->sbq_addr_lo = cqicb->sbq_addr =
cpu_to_le32(rx_ring->sbq_base_indirect_dma); cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_addr_hi =
cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
cqicb->sbq_buf_size = cqicb->sbq_buf_size =
cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
bq_len = (rx_ring->sbq_len == 65536) ? 0 : bq_len = (rx_ring->sbq_len == 65536) ? 0 :
@ -2611,12 +2581,9 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
wqicb->rid = 0; wqicb->rid = 0;
wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma); wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma); wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
wqicb->cnsmr_idx_addr_hi =
cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
ql_init_tx_ring(qdev, tx_ring); ql_init_tx_ring(qdev, tx_ring);
@ -2746,14 +2713,14 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
* Outbound queue is for outbound completions only. * Outbound queue is for outbound completions only.
*/ */
intr_context->handler = qlge_msix_tx_isr; intr_context->handler = qlge_msix_tx_isr;
sprintf(intr_context->name, "%s-txq-%d", sprintf(intr_context->name, "%s-tx-%d",
qdev->ndev->name, i); qdev->ndev->name, i);
} else { } else {
/* /*
* Inbound queues handle unicast frames only. * Inbound queues handle unicast frames only.
*/ */
intr_context->handler = qlge_msix_rx_isr; intr_context->handler = qlge_msix_rx_isr;
sprintf(intr_context->name, "%s-rxq-%d", sprintf(intr_context->name, "%s-rx-%d",
qdev->ndev->name, i); qdev->ndev->name, i);
} }
} }
@ -3247,7 +3214,6 @@ static int qlge_close(struct net_device *ndev)
msleep(1); msleep(1);
ql_adapter_down(qdev); ql_adapter_down(qdev);
ql_release_adapter_resources(qdev); ql_release_adapter_resources(qdev);
ql_free_ring_cb(qdev);
return 0; return 0;
} }
@ -3273,8 +3239,8 @@ static int ql_configure_rings(struct ql_adapter *qdev)
* This limitation can be removed when requested. * This limitation can be removed when requested.
*/ */
if (cpu_cnt > 8) if (cpu_cnt > MAX_CPUS)
cpu_cnt = 8; cpu_cnt = MAX_CPUS;
/* /*
* rx_ring[0] is always the default queue. * rx_ring[0] is always the default queue.
@ -3294,9 +3260,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
*/ */
qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
if (ql_alloc_ring_cb(qdev))
return -ENOMEM;
for (i = 0; i < qdev->tx_ring_count; i++) { for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i]; tx_ring = &qdev->tx_ring[i];
memset((void *)tx_ring, 0, sizeof(tx_ring)); memset((void *)tx_ring, 0, sizeof(tx_ring));
@ -3393,7 +3356,6 @@ static int qlge_open(struct net_device *ndev)
error_up: error_up:
ql_release_adapter_resources(qdev); ql_release_adapter_resources(qdev);
ql_free_ring_cb(qdev);
return err; return err;
} }

View File

@ -49,8 +49,8 @@
#include <asm/processor.h> #include <asm/processor.h>
#define DRV_NAME "r6040" #define DRV_NAME "r6040"
#define DRV_VERSION "0.20" #define DRV_VERSION "0.21"
#define DRV_RELDATE "07Jan2009" #define DRV_RELDATE "09Jan2009"
/* PHY CHIP Address */ /* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */ #define PHY1_ADDR 1 /* For MAC1 */
@ -457,22 +457,12 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H); iowrite16(adrp[2], ioaddr + MID_0H);
free_irq(dev->irq, dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
/* Free TX buffer */
r6040_free_txbufs(dev);
/* Free Descriptor memory */
pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
} }
static int r6040_close(struct net_device *dev) static int r6040_close(struct net_device *dev)
{ {
struct r6040_private *lp = netdev_priv(dev); struct r6040_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pdev;
/* deleted timer */ /* deleted timer */
del_timer_sync(&lp->timer); del_timer_sync(&lp->timer);
@ -481,8 +471,28 @@ static int r6040_close(struct net_device *dev)
napi_disable(&lp->napi); napi_disable(&lp->napi);
netif_stop_queue(dev); netif_stop_queue(dev);
r6040_down(dev); r6040_down(dev);
free_irq(dev->irq, dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
/* Free TX buffer */
r6040_free_txbufs(dev);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
/* Free Descriptor memory */
if (lp->rx_ring) {
pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = 0;
}
if (lp->tx_ring) {
pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
lp->tx_ring = 0;
}
return 0; return 0;
} }
@ -1049,6 +1059,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_multicast_list = r6040_multicast_list, .ndo_set_multicast_list = r6040_multicast_list,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = r6040_ioctl, .ndo_do_ioctl = r6040_ioctl,
.ndo_tx_timeout = r6040_tx_timeout, .ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
@ -1143,8 +1154,10 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Some bootloader/BIOSes do not initialize /* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */ * MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2])) if (!(adrp[0] || adrp[1] || adrp[2])) {
printk(KERN_WARNING DRV_NAME ": MAC address not initialized\n"); printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n");
random_ether_addr(dev->dev_addr);
}
/* Link new device into r6040_root_dev */ /* Link new device into r6040_root_dev */
lp->pdev = pdev; lp->pdev = pdev;

View File

@ -1408,6 +1408,7 @@ static const struct net_device_ops sc92031_netdev_ops = {
.ndo_set_multicast_list = sc92031_set_multicast_list, .ndo_set_multicast_list = sc92031_set_multicast_list,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = sc92031_tx_timeout, .ndo_tx_timeout = sc92031_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sc92031_poll_controller, .ndo_poll_controller = sc92031_poll_controller,

View File

@ -636,10 +636,11 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
{ {
int reg; int reg;
if (efx->phy_type == PHY_TYPE_SFT9001B) { if (efx->phy_type == PHY_TYPE_SFT9001B)
device_remove_file(&efx->pci_dev->dev, device_remove_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach); &dev_attr_phy_short_reach);
} else {
if (efx->phy_type == PHY_TYPE_SFX7101) {
/* Power down the LNPGA */ /* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,

View File

@ -389,6 +389,7 @@ static const struct net_device_ops sis900_netdev_ops = {
.ndo_set_multicast_list = set_rx_mode, .ndo_set_multicast_list = set_rx_mode,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = mii_ioctl, .ndo_do_ioctl = mii_ioctl,
.ndo_tx_timeout = sis900_tx_timeout, .ndo_tx_timeout = sis900_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER

View File

@ -192,6 +192,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -196,6 +196,7 @@ static const struct net_device_ops ultra_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -144,6 +144,7 @@ static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
} }
BUG(); BUG();
return 0;
} }
static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
@ -1740,6 +1741,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_set_multicast_list = smsc911x_set_multicast_list, .ndo_set_multicast_list = smsc911x_set_multicast_list,
.ndo_do_ioctl = smsc911x_do_ioctl, .ndo_do_ioctl = smsc911x_do_ioctl,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = smsc911x_poll_controller, .ndo_poll_controller = smsc911x_poll_controller,
#endif #endif
@ -1967,7 +1969,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED, retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED,
SMSC_CHIPNAME, dev); dev->name, dev);
if (retval) { if (retval) {
SMSC_WARNING(PROBE, SMSC_WARNING(PROBE,
"Unable to claim requested irq: %d", dev->irq); "Unable to claim requested irq: %d", dev->irq);

View File

@ -1551,6 +1551,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
.ndo_set_multicast_list = smsc9420_set_multicast_list, .ndo_set_multicast_list = smsc9420_set_multicast_list,
.ndo_do_ioctl = smsc9420_do_ioctl, .ndo_do_ioctl = smsc9420_do_ioctl,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = smsc9420_poll_controller, .ndo_poll_controller = smsc9420_poll_controller,
#endif /* CONFIG_NET_POLL_CONTROLLER */ #endif /* CONFIG_NET_POLL_CONTROLLER */

View File

@ -442,40 +442,30 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
{ {
struct ucc_fast_private *uccf; struct ucc_fast_private *uccf;
struct ucc_geth __iomem *ug_regs; struct ucc_geth __iomem *ug_regs;
u32 maccfg2, uccm;
uccf = ugeth->uccf; uccf = ugeth->uccf;
ug_regs = ugeth->ug_regs; ug_regs = ugeth->ug_regs;
/* Enable interrupts for magic packet detection */ /* Enable interrupts for magic packet detection */
uccm = in_be32(uccf->p_uccm); setbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
uccm |= UCCE_MPD;
out_be32(uccf->p_uccm, uccm);
/* Enable magic packet detection */ /* Enable magic packet detection */
maccfg2 = in_be32(&ug_regs->maccfg2); setbits32(&ug_regs->maccfg2, MACCFG2_MPE);
maccfg2 |= MACCFG2_MPE;
out_be32(&ug_regs->maccfg2, maccfg2);
} }
static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
{ {
struct ucc_fast_private *uccf; struct ucc_fast_private *uccf;
struct ucc_geth __iomem *ug_regs; struct ucc_geth __iomem *ug_regs;
u32 maccfg2, uccm;
uccf = ugeth->uccf; uccf = ugeth->uccf;
ug_regs = ugeth->ug_regs; ug_regs = ugeth->ug_regs;
/* Disable interrupts for magic packet detection */ /* Disable interrupts for magic packet detection */
uccm = in_be32(uccf->p_uccm); clrbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
uccm &= ~UCCE_MPD;
out_be32(uccf->p_uccm, uccm);
/* Disable magic packet detection */ /* Disable magic packet detection */
maccfg2 = in_be32(&ug_regs->maccfg2); clrbits32(&ug_regs->maccfg2, MACCFG2_MPE);
maccfg2 &= ~MACCFG2_MPE;
out_be32(&ug_regs->maccfg2, maccfg2);
} }
#endif /* MAGIC_PACKET */ #endif /* MAGIC_PACKET */
@ -585,7 +575,8 @@ static void get_statistics(struct ucc_geth_private *ugeth,
/* Hardware only if user handed pointer and driver actually /* Hardware only if user handed pointer and driver actually
gathers hardware statistics */ gathers hardware statistics */
if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) { if (hardware_statistics &&
(in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
hardware_statistics->tx64 = in_be32(&ug_regs->tx64); hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
hardware_statistics->tx127 = in_be32(&ug_regs->tx127); hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
hardware_statistics->tx255 = in_be32(&ug_regs->tx255); hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
@ -1181,9 +1172,7 @@ int init_flow_control_params(u32 automatic_flow_control_mode,
out_be32(uempr_register, value); out_be32(uempr_register, value);
/* Set UPSMR register */ /* Set UPSMR register */
value = in_be32(upsmr_register); setbits32(upsmr_register, automatic_flow_control_mode);
value |= automatic_flow_control_mode;
out_be32(upsmr_register, value);
value = in_be32(maccfg1_register); value = in_be32(maccfg1_register);
if (rx_flow_control_enable) if (rx_flow_control_enable)
@ -1200,14 +1189,11 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
u32 __iomem *upsmr_register, u32 __iomem *upsmr_register,
u16 __iomem *uescr_register) u16 __iomem *uescr_register)
{ {
u32 upsmr_value = 0;
u16 uescr_value = 0; u16 uescr_value = 0;
/* Enable hardware statistics gathering if requested */ /* Enable hardware statistics gathering if requested */
if (enable_hardware_statistics) { if (enable_hardware_statistics)
upsmr_value = in_be32(upsmr_register); setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
upsmr_value |= UPSMR_HSE;
out_be32(upsmr_register, upsmr_value);
}
/* Clear hardware statistics counters */ /* Clear hardware statistics counters */
uescr_value = in_be16(uescr_register); uescr_value = in_be16(uescr_register);
@ -1233,23 +1219,17 @@ static int init_firmware_statistics_gathering_mode(int
{ {
/* Note: this function does not check if */ /* Note: this function does not check if */
/* the parameters it receives are NULL */ /* the parameters it receives are NULL */
u16 temoder_value;
u32 remoder_value;
if (enable_tx_firmware_statistics) { if (enable_tx_firmware_statistics) {
out_be32(tx_rmon_base_ptr, out_be32(tx_rmon_base_ptr,
tx_firmware_statistics_structure_address); tx_firmware_statistics_structure_address);
temoder_value = in_be16(temoder_register); setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
out_be16(temoder_register, temoder_value);
} }
if (enable_rx_firmware_statistics) { if (enable_rx_firmware_statistics) {
out_be32(rx_rmon_base_ptr, out_be32(rx_rmon_base_ptr,
rx_firmware_statistics_structure_address); rx_firmware_statistics_structure_address);
remoder_value = in_be32(remoder_register); setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
out_be32(remoder_register, remoder_value);
} }
return 0; return 0;
@ -1316,15 +1296,12 @@ static int init_check_frame_length_mode(int length_check,
static int init_preamble_length(u8 preamble_length, static int init_preamble_length(u8 preamble_length,
u32 __iomem *maccfg2_register) u32 __iomem *maccfg2_register)
{ {
u32 value = 0;
if ((preamble_length < 3) || (preamble_length > 7)) if ((preamble_length < 3) || (preamble_length > 7))
return -EINVAL; return -EINVAL;
value = in_be32(maccfg2_register); clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
value &= ~MACCFG2_PREL_MASK; preamble_length << MACCFG2_PREL_SHIFT);
value |= (preamble_length << MACCFG2_PREL_SHIFT);
out_be32(maccfg2_register, value);
return 0; return 0;
} }
@ -1337,19 +1314,19 @@ static int init_rx_parameters(int reject_broadcast,
value = in_be32(upsmr_register); value = in_be32(upsmr_register);
if (reject_broadcast) if (reject_broadcast)
value |= UPSMR_BRO; value |= UCC_GETH_UPSMR_BRO;
else else
value &= ~UPSMR_BRO; value &= ~UCC_GETH_UPSMR_BRO;
if (receive_short_frames) if (receive_short_frames)
value |= UPSMR_RSH; value |= UCC_GETH_UPSMR_RSH;
else else
value &= ~UPSMR_RSH; value &= ~UCC_GETH_UPSMR_RSH;
if (promiscuous) if (promiscuous)
value |= UPSMR_PRO; value |= UCC_GETH_UPSMR_PRO;
else else
value &= ~UPSMR_PRO; value &= ~UCC_GETH_UPSMR_PRO;
out_be32(upsmr_register, value); out_be32(upsmr_register, value);
@ -1410,26 +1387,27 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
/* Set UPSMR */ /* Set UPSMR */
upsmr = in_be32(&uf_regs->upsmr); upsmr = in_be32(&uf_regs->upsmr);
upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM); upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UPSMR_RPM; upsmr |= UCC_GETH_UPSMR_RPM;
switch (ugeth->max_speed) { switch (ugeth->max_speed) {
case SPEED_10: case SPEED_10:
upsmr |= UPSMR_R10M; upsmr |= UCC_GETH_UPSMR_R10M;
/* FALLTHROUGH */ /* FALLTHROUGH */
case SPEED_100: case SPEED_100:
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
upsmr |= UPSMR_RMM; upsmr |= UCC_GETH_UPSMR_RMM;
} }
} }
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UPSMR_TBIM; upsmr |= UCC_GETH_UPSMR_TBIM;
} }
out_be32(&uf_regs->upsmr, upsmr); out_be32(&uf_regs->upsmr, upsmr);
@ -1517,9 +1495,9 @@ static void adjust_link(struct net_device *dev)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
if (phydev->speed == SPEED_10) if (phydev->speed == SPEED_10)
upsmr |= UPSMR_R10M; upsmr |= UCC_GETH_UPSMR_R10M;
else else
upsmr &= ~(UPSMR_R10M); upsmr &= ~UCC_GETH_UPSMR_R10M;
} }
break; break;
default: default:
@ -1602,10 +1580,8 @@ static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
uccf = ugeth->uccf; uccf = ugeth->uccf;
/* Mask GRACEFUL STOP TX interrupt bit and clear it */ /* Mask GRACEFUL STOP TX interrupt bit and clear it */
temp = in_be32(uccf->p_uccm); clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
temp &= ~UCCE_GRA; out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
out_be32(uccf->p_uccm, temp);
out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
/* Issue host command */ /* Issue host command */
cecr_subblock = cecr_subblock =
@ -1617,7 +1593,7 @@ static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
do { do {
msleep(10); msleep(10);
temp = in_be32(uccf->p_ucce); temp = in_be32(uccf->p_ucce);
} while (!(temp & UCCE_GRA) && --i); } while (!(temp & UCC_GETH_UCCE_GRA) && --i);
uccf->stopped_tx = 1; uccf->stopped_tx = 1;
@ -1975,12 +1951,9 @@ static void ucc_geth_set_multi(struct net_device *dev)
uf_regs = ugeth->uccf->uf_regs; uf_regs = ugeth->uccf->uf_regs;
if (dev->flags & IFF_PROMISC) { if (dev->flags & IFF_PROMISC) {
setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO);
} else { } else {
clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO);
p_82xx_addr_filt = p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
@ -2020,7 +1993,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{ {
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
struct phy_device *phydev = ugeth->phydev; struct phy_device *phydev = ugeth->phydev;
u32 tempval;
ugeth_vdbg("%s: IN", __func__); ugeth_vdbg("%s: IN", __func__);
@ -2037,9 +2009,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
out_be32(ugeth->uccf->p_ucce, 0xffffffff); out_be32(ugeth->uccf->p_ucce, 0xffffffff);
/* Disable Rx and Tx */ /* Disable Rx and Tx */
tempval = in_be32(&ug_regs->maccfg1); clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
out_be32(&ug_regs->maccfg1, tempval);
ucc_geth_memclean(ugeth); ucc_geth_memclean(ugeth);
} }
@ -2153,10 +2123,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* Generate uccm_mask for receive */ /* Generate uccm_mask for receive */
uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
for (i = 0; i < ug_info->numQueuesRx; i++) for (i = 0; i < ug_info->numQueuesRx; i++)
uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i); uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
for (i = 0; i < ug_info->numQueuesTx; i++) for (i = 0; i < ug_info->numQueuesTx; i++)
uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
/* Initialize the general fast UCC block. */ /* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) { if (ucc_fast_init(uf_info, &ugeth->uccf)) {
if (netif_msg_probe(ugeth)) if (netif_msg_probe(ugeth))
@ -2185,7 +2155,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
struct ucc_geth __iomem *ug_regs; struct ucc_geth __iomem *ug_regs;
int ret_val = -EINVAL; int ret_val = -EINVAL;
u32 remoder = UCC_GETH_REMODER_INIT; u32 remoder = UCC_GETH_REMODER_INIT;
u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; u32 init_enet_pram_offset, cecr_subblock, command;
u32 ifstat, i, j, size, l2qt, l3qt, length; u32 ifstat, i, j, size, l2qt, l3qt, length;
u16 temoder = UCC_GETH_TEMODER_INIT; u16 temoder = UCC_GETH_TEMODER_INIT;
u16 test; u16 test;
@ -2281,10 +2251,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
&uf_regs->upsmr, &uf_regs->upsmr,
&ug_regs->uempr, &ug_regs->maccfg1); &ug_regs->uempr, &ug_regs->maccfg1);
maccfg1 = in_be32(&ug_regs->maccfg1); setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
maccfg1 |= MACCFG1_ENABLE_RX;
maccfg1 |= MACCFG1_ENABLE_TX;
out_be32(&ug_regs->maccfg1, maccfg1);
/* Set IPGIFG */ /* Set IPGIFG */
/* For more details see the hardware spec. */ /* For more details see the hardware spec. */
@ -3274,7 +3241,6 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
static int ucc_geth_poll(struct napi_struct *napi, int budget) static int ucc_geth_poll(struct napi_struct *napi, int budget)
{ {
struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
struct net_device *dev = ugeth->dev;
struct ucc_geth_info *ug_info; struct ucc_geth_info *ug_info;
int howmany, i; int howmany, i;
@ -3285,14 +3251,8 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
howmany += ucc_geth_rx(ugeth, i, budget - howmany); howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) { if (howmany < budget) {
struct ucc_fast_private *uccf;
u32 uccm;
netif_rx_complete(napi); netif_rx_complete(napi);
uccf = ugeth->uccf; setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
uccm = in_be32(uccf->p_uccm);
uccm |= UCCE_RX_EVENTS;
out_be32(uccf->p_uccm, uccm);
} }
return howmany; return howmany;
@ -3332,7 +3292,7 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
/* Tx event processing */ /* Tx event processing */
if (ucce & UCCE_TX_EVENTS) { if (ucce & UCCE_TX_EVENTS) {
spin_lock(&ugeth->lock); spin_lock(&ugeth->lock);
tx_mask = UCCE_TXBF_SINGLE_MASK; tx_mask = UCC_GETH_UCCE_TXB0;
for (i = 0; i < ug_info->numQueuesTx; i++) { for (i = 0; i < ug_info->numQueuesTx; i++) {
if (ucce & tx_mask) if (ucce & tx_mask)
ucc_geth_tx(dev, i); ucc_geth_tx(dev, i);
@ -3344,12 +3304,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
/* Errors and other events */ /* Errors and other events */
if (ucce & UCCE_OTHER) { if (ucce & UCCE_OTHER) {
if (ucce & UCCE_BSY) { if (ucce & UCC_GETH_UCCE_BSY)
dev->stats.rx_errors++; dev->stats.rx_errors++;
} if (ucce & UCC_GETH_UCCE_TXE)
if (ucce & UCCE_TXE) {
dev->stats.tx_errors++; dev->stats.tx_errors++;
}
} }
return IRQ_HANDLED; return IRQ_HANDLED;

View File

@ -162,92 +162,27 @@ struct ucc_geth {
boundary */ boundary */
/* UCC GETH Event Register */ /* UCC GETH Event Register */
#define UCCE_MPD 0x80000000 /* Magic packet #define UCCE_TXB (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \
detection */ UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \
#define UCCE_SCAR 0x40000000 UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \
#define UCCE_GRA 0x20000000 /* Tx graceful UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0)
stop
complete */
#define UCCE_CBPR 0x10000000
#define UCCE_BSY 0x08000000
#define UCCE_RXC 0x04000000
#define UCCE_TXC 0x02000000
#define UCCE_TXE 0x01000000
#define UCCE_TXB7 0x00800000
#define UCCE_TXB6 0x00400000
#define UCCE_TXB5 0x00200000
#define UCCE_TXB4 0x00100000
#define UCCE_TXB3 0x00080000
#define UCCE_TXB2 0x00040000
#define UCCE_TXB1 0x00020000
#define UCCE_TXB0 0x00010000
#define UCCE_RXB7 0x00008000
#define UCCE_RXB6 0x00004000
#define UCCE_RXB5 0x00002000
#define UCCE_RXB4 0x00001000
#define UCCE_RXB3 0x00000800
#define UCCE_RXB2 0x00000400
#define UCCE_RXB1 0x00000200
#define UCCE_RXB0 0x00000100
#define UCCE_RXF7 0x00000080
#define UCCE_RXF6 0x00000040
#define UCCE_RXF5 0x00000020
#define UCCE_RXF4 0x00000010
#define UCCE_RXF3 0x00000008
#define UCCE_RXF2 0x00000004
#define UCCE_RXF1 0x00000002
#define UCCE_RXF0 0x00000001
#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0) #define UCCE_RXB (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \
#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0) UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \
UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \
UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0)
#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\ #define UCCE_RXF (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \
UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0) UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \
#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\ UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \
UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0) UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0)
#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\
UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\
UCCE_RXC | UCCE_TXC | UCCE_TXE)
#define UCCE_RX_EVENTS (UCCE_RXF | UCCE_BSY) #define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \
#define UCCE_TX_EVENTS (UCCE_TXB | UCCE_TXE) UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \
UCC_GETH_UCCE_RXC | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE)
/* UCC GETH UPSMR (Protocol Specific Mode Register) */ #define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY)
#define UPSMR_ECM 0x04000000 /* Enable CAM #define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE)
Miss or
Enable
Filtering
Miss */
#define UPSMR_HSE 0x02000000 /* Hardware
Statistics
Enable */
#define UPSMR_PRO 0x00400000 /* Promiscuous*/
#define UPSMR_CAP 0x00200000 /* CAM polarity
*/
#define UPSMR_RSH 0x00100000 /* Receive
Short Frames
*/
#define UPSMR_RPM 0x00080000 /* Reduced Pin
Mode
interfaces */
#define UPSMR_R10M 0x00040000 /* RGMII/RMII
10 Mode */
#define UPSMR_RLPB 0x00020000 /* RMII
Loopback
Mode */
#define UPSMR_TBIM 0x00010000 /* Ten-bit
Interface
Mode */
#define UPSMR_RMM 0x00001000 /* RMII/RGMII
Mode */
#define UPSMR_CAM 0x00000400 /* CAM Address
Matching */
#define UPSMR_BRO 0x00000200 /* Broadcast
Address */
#define UPSMR_RES1 0x00002000 /* Reserved
feild - must
be 1 */
/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ /* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control #define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
@ -945,9 +880,10 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_REMODER_INIT 0 /* bits that must be #define UCC_GETH_REMODER_INIT 0 /* bits that must be
set */ set */
#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ #define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value
for this /* Initial value for UPSMR */
register */ #define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
#define UCC_GETH_MACCFG1_INIT 0 #define UCC_GETH_MACCFG1_INIT 0
#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) #define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)

View File

@ -622,6 +622,7 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_get_stats = rhine_get_stats, .ndo_get_stats = rhine_get_stats,
.ndo_set_multicast_list = rhine_set_rx_mode, .ndo_set_multicast_list = rhine_set_rx_mode,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl, .ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = rhine_tx_timeout, .ndo_tx_timeout = rhine_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER

View File

@ -855,6 +855,7 @@ static const struct net_device_ops velocity_netdev_ops = {
.ndo_start_xmit = velocity_xmit, .ndo_start_xmit = velocity_xmit,
.ndo_get_stats = velocity_get_stats, .ndo_get_stats = velocity_get_stats,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = velocity_set_multi, .ndo_set_multicast_list = velocity_set_multi,
.ndo_change_mtu = velocity_change_mtu, .ndo_change_mtu = velocity_change_mtu,
.ndo_do_ioctl = velocity_ioctl, .ndo_do_ioctl = velocity_ioctl,

View File

@ -155,6 +155,7 @@ static const struct net_device_ops wd_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -1369,7 +1369,7 @@ EXPORT_SYMBOL_GPL(lbs_start_card);
void lbs_stop_card(struct lbs_private *priv) void lbs_stop_card(struct lbs_private *priv)
{ {
struct net_device *dev = priv->dev; struct net_device *dev;
struct cmd_ctrl_node *cmdnode; struct cmd_ctrl_node *cmdnode;
unsigned long flags; unsigned long flags;
@ -1377,9 +1377,10 @@ void lbs_stop_card(struct lbs_private *priv)
if (!priv) if (!priv)
goto out; goto out;
dev = priv->dev;
netif_stop_queue(priv->dev); netif_stop_queue(dev);
netif_carrier_off(priv->dev); netif_carrier_off(dev);
lbs_debugfs_remove_one(priv); lbs_debugfs_remove_one(priv);
if (priv->mesh_tlv) { if (priv->mesh_tlv) {

View File

@ -362,6 +362,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_multicast_list = set_rx_mode, .ndo_set_multicast_list = set_rx_mode,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl, .ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = yellowfin_tx_timeout, .ndo_tx_timeout = yellowfin_tx_timeout,
}; };

View File

@ -147,6 +147,7 @@ static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_get_stats = ei_get_stats, .ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll, .ndo_poll_controller = ei_poll,

View File

@ -916,7 +916,7 @@ static struct ethtool_ops qeth_l2_osn_ops = {
.get_drvinfo = qeth_core_get_drvinfo, .get_drvinfo = qeth_core_get_drvinfo,
}; };
static struct net_device_ops qeth_l2_netdev_ops = { static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_l2_open, .ndo_open = qeth_l2_open,
.ndo_stop = qeth_l2_stop, .ndo_stop = qeth_l2_stop,
.ndo_get_stats = qeth_get_stats, .ndo_get_stats = qeth_get_stats,

View File

@ -2894,7 +2894,7 @@ qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
return 0; return 0;
} }
static struct net_device_ops qeth_l3_netdev_ops = { static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_l3_open, .ndo_open = qeth_l3_open,
.ndo_stop = qeth_l3_stop, .ndo_stop = qeth_l3_stop,
.ndo_get_stats = qeth_get_stats, .ndo_get_stats = qeth_get_stats,
@ -2909,6 +2909,22 @@ static struct net_device_ops qeth_l3_netdev_ops = {
.ndo_tx_timeout = qeth_tx_timeout, .ndo_tx_timeout = qeth_tx_timeout,
}; };
static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_open = qeth_l3_open,
.ndo_stop = qeth_l3_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_l3_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
static int qeth_l3_setup_netdev(struct qeth_card *card) static int qeth_l3_setup_netdev(struct qeth_card *card)
{ {
if (card->info.type == QETH_CARD_TYPE_OSAE) { if (card->info.type == QETH_CARD_TYPE_OSAE) {
@ -2919,12 +2935,12 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
#endif #endif
if (!card->dev) if (!card->dev)
return -ENODEV; return -ENODEV;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
} else { } else {
card->dev = alloc_etherdev(0); card->dev = alloc_etherdev(0);
if (!card->dev) if (!card->dev)
return -ENODEV; return -ENODEV;
qeth_l3_netdev_ops.ndo_neigh_setup = card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
qeth_l3_neigh_setup;
/*IPv6 address autoconfiguration stuff*/ /*IPv6 address autoconfiguration stuff*/
qeth_l3_get_unique_id(card); qeth_l3_get_unique_id(card);
@ -2937,6 +2953,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
if (!card->dev) if (!card->dev)
return -ENODEV; return -ENODEV;
card->dev->flags |= IFF_NOARP; card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
qeth_l3_iqd_read_initial_mac(card); qeth_l3_iqd_read_initial_mac(card);
} else } else
return -ENODEV; return -ENODEV;
@ -2944,7 +2961,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->ml_priv = card; card->dev->ml_priv = card;
card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu; card->dev->mtu = card->info.initial_mtu;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
card->dev->features |= NETIF_F_HW_VLAN_TX | card->dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_RX |

View File

@ -270,8 +270,18 @@ struct dma_device {
/* --- public DMA engine API --- */ /* --- public DMA engine API --- */
#ifdef CONFIG_DMA_ENGINE
void dmaengine_get(void); void dmaengine_get(void);
void dmaengine_put(void); void dmaengine_put(void);
#else
static inline void dmaengine_get(void)
{
}
static inline void dmaengine_put(void)
{
}
#endif
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len); void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,

View File

@ -323,8 +323,8 @@ struct input_dev;
* *
* @rf_hw: [private] State of the hardware radio switch (OFF/ON) * @rf_hw: [private] State of the hardware radio switch (OFF/ON)
* *
* @debufs_dentry: [private] Used to hook up a debugfs entry. This * @debugfs_dentry: [private] Used to hook up a debugfs entry. This
* shows up in the debugfs root as wimax:DEVICENAME. * shows up in the debugfs root as wimax\:DEVICENAME.
* *
* Description: * Description:
* This structure defines a common interface to access all WiMAX * This structure defines a common interface to access all WiMAX

View File

@ -85,12 +85,13 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
static inline int ebt_dev_check(char *entry, const struct net_device *device) static inline int ebt_dev_check(char *entry, const struct net_device *device)
{ {
int i = 0; int i = 0;
const char *devname = device->name; const char *devname;
if (*entry == '\0') if (*entry == '\0')
return 0; return 0;
if (!device) if (!device)
return 1; return 1;
devname = device->name;
/* 1 is the wildcard token */ /* 1 is the wildcard token */
while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i]) while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
i++; i++;

View File

@ -1087,6 +1087,11 @@ int dev_open(struct net_device *dev)
*/ */
dev->flags |= IFF_UP; dev->flags |= IFF_UP;
/*
* Enable NET_DMA
*/
dmaengine_get();
/* /*
* Initialize multicasting status * Initialize multicasting status
*/ */
@ -1164,6 +1169,11 @@ int dev_close(struct net_device *dev)
*/ */
call_netdevice_notifiers(NETDEV_DOWN, dev); call_netdevice_notifiers(NETDEV_DOWN, dev);
/*
* Shutdown NET_DMA
*/
dmaengine_put();
return 0; return 0;
} }
@ -5151,9 +5161,6 @@ static int __init net_dev_init(void)
hotcpu_notifier(dev_cpu_callback, 0); hotcpu_notifier(dev_cpu_callback, 0);
dst_init(); dst_init();
dev_mcast_init(); dev_mcast_init();
#ifdef CONFIG_NET_DMA
dmaengine_get();
#endif
rc = 0; rc = 0;
out: out:
return rc; return rc;

View File

@ -29,7 +29,7 @@ config IP_DCCP_CCID3
http://www.ietf.org/rfc/rfc4342.txt http://www.ietf.org/rfc/rfc4342.txt
The TFRC congestion control algorithms were initially described in The TFRC congestion control algorithms were initially described in
RFC 5448. RFC 5348.
This text was extracted from RFC 4340 (sec. 10.2), This text was extracted from RFC 4340 (sec. 10.2),
http://www.ietf.org/rfc/rfc4340.txt http://www.ietf.org/rfc/rfc4340.txt

View File

@ -36,7 +36,7 @@ out:
return rc; return rc;
} }
void __exit tfrc_lib_exit(void) void tfrc_lib_exit(void)
{ {
tfrc_rx_packet_history_exit(); tfrc_rx_packet_history_exit();
tfrc_tx_packet_history_exit(); tfrc_tx_packet_history_exit();

View File

@ -124,7 +124,7 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
struct tipc_node_map *nm_diff) struct tipc_node_map *nm_diff)
{ {
int stop = sizeof(nm_a->map) / sizeof(u32); int stop = ARRAY_SIZE(nm_a->map);
int w; int w;
int b; int b;
u32 map; u32 map;

View File

@ -63,7 +63,6 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
if (len > skb_tailroom(skb)) if (len > skb_tailroom(skb))
len = skb_tailroom(skb); len = skb_tailroom(skb);
skb->truesize += len;
__skb_put(skb, len); __skb_put(skb, len);
len += plen; len += plen;