Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (166 commits) [PATCH] net: au1000_eth: PHY framework conversion [PATCH] 3c5zz ethernet: fix section warnings [PATCH] smc ethernet: fix section mismatch warnings [PATCH] hp ethernet: fix section mismatches [PATCH] Section mismatch in drivers/net/ne.o during modpost [PATCH] e1000: prevent statistics from getting garbled during reset [PATCH] smc911x Kconfig fix [PATCH] forcedeth: new device ids [PATCH] forcedeth config: version [PATCH] forcedeth config: module parameters [PATCH] forcedeth config: diagnostics [PATCH] forcedeth config: move functions [PATCH] forcedeth config: statistics [PATCH] forcedeth config: csum [PATCH] forcedeth config: wol [PATCH] forcedeth config: phy [PATCH] forcedeth config: flow control [PATCH] forcedeth config: ring sizes [PATCH] forcedeth config: tso cleanup [DOC] Update bonding documentation with sysfs info ...
This commit is contained in:
commit
2090af7180
@ -14,8 +14,8 @@ Copyright (C) 2004-2006, Intel Corporation
|
||||
|
||||
README.ipw2200
|
||||
|
||||
Version: 1.0.8
|
||||
Date : October 20, 2005
|
||||
Version: 1.1.2
|
||||
Date : March 30, 2006
|
||||
|
||||
|
||||
Index
|
||||
@ -103,7 +103,7 @@ file.
|
||||
|
||||
1.1. Overview of Features
|
||||
-----------------------------------------------
|
||||
The current release (1.0.8) supports the following features:
|
||||
The current release (1.1.2) supports the following features:
|
||||
|
||||
+ BSS mode (Infrastructure, Managed)
|
||||
+ IBSS mode (Ad-Hoc)
|
||||
@ -247,8 +247,8 @@ and can set the contents via echo. For example:
|
||||
% cat /sys/bus/pci/drivers/ipw2200/debug_level
|
||||
|
||||
Will report the current debug level of the driver's logging subsystem
|
||||
(only available if CONFIG_IPW_DEBUG was configured when the driver was
|
||||
built).
|
||||
(only available if CONFIG_IPW2200_DEBUG was configured when the driver
|
||||
was built).
|
||||
|
||||
You can set the debug level via:
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
|
||||
Linux Ethernet Bonding Driver HOWTO
|
||||
|
||||
Latest update: 21 June 2005
|
||||
Latest update: 24 April 2006
|
||||
|
||||
Initial release : Thomas Davis <tadavis at lbl.gov>
|
||||
Corrections, HA extensions : 2000/10/03-15 :
|
||||
@ -12,6 +12,8 @@ Corrections, HA extensions : 2000/10/03-15 :
|
||||
- Jay Vosburgh <fubar at us dot ibm dot com>
|
||||
|
||||
Reorganized and updated Feb 2005 by Jay Vosburgh
|
||||
Added Sysfs information: 2006/04/24
|
||||
- Mitch Williams <mitch.a.williams at intel.com>
|
||||
|
||||
Introduction
|
||||
============
|
||||
@ -38,61 +40,62 @@ Table of Contents
|
||||
2. Bonding Driver Options
|
||||
|
||||
3. Configuring Bonding Devices
|
||||
3.1 Configuration with sysconfig support
|
||||
3.1.1 Using DHCP with sysconfig
|
||||
3.1.2 Configuring Multiple Bonds with sysconfig
|
||||
3.2 Configuration with initscripts support
|
||||
3.2.1 Using DHCP with initscripts
|
||||
3.2.2 Configuring Multiple Bonds with initscripts
|
||||
3.3 Configuring Bonding Manually
|
||||
3.1 Configuration with Sysconfig Support
|
||||
3.1.1 Using DHCP with Sysconfig
|
||||
3.1.2 Configuring Multiple Bonds with Sysconfig
|
||||
3.2 Configuration with Initscripts Support
|
||||
3.2.1 Using DHCP with Initscripts
|
||||
3.2.2 Configuring Multiple Bonds with Initscripts
|
||||
3.3 Configuring Bonding Manually with Ifenslave
|
||||
3.3.1 Configuring Multiple Bonds Manually
|
||||
3.4 Configuring Bonding Manually via Sysfs
|
||||
|
||||
5. Querying Bonding Configuration
|
||||
5.1 Bonding Configuration
|
||||
5.2 Network Configuration
|
||||
4. Querying Bonding Configuration
|
||||
4.1 Bonding Configuration
|
||||
4.2 Network Configuration
|
||||
|
||||
6. Switch Configuration
|
||||
5. Switch Configuration
|
||||
|
||||
7. 802.1q VLAN Support
|
||||
6. 802.1q VLAN Support
|
||||
|
||||
8. Link Monitoring
|
||||
8.1 ARP Monitor Operation
|
||||
8.2 Configuring Multiple ARP Targets
|
||||
8.3 MII Monitor Operation
|
||||
7. Link Monitoring
|
||||
7.1 ARP Monitor Operation
|
||||
7.2 Configuring Multiple ARP Targets
|
||||
7.3 MII Monitor Operation
|
||||
|
||||
9. Potential Trouble Sources
|
||||
9.1 Adventures in Routing
|
||||
9.2 Ethernet Device Renaming
|
||||
9.3 Painfully Slow Or No Failed Link Detection By Miimon
|
||||
8. Potential Trouble Sources
|
||||
8.1 Adventures in Routing
|
||||
8.2 Ethernet Device Renaming
|
||||
8.3 Painfully Slow Or No Failed Link Detection By Miimon
|
||||
|
||||
10. SNMP agents
|
||||
9. SNMP agents
|
||||
|
||||
11. Promiscuous mode
|
||||
10. Promiscuous mode
|
||||
|
||||
12. Configuring Bonding for High Availability
|
||||
12.1 High Availability in a Single Switch Topology
|
||||
12.2 High Availability in a Multiple Switch Topology
|
||||
12.2.1 HA Bonding Mode Selection for Multiple Switch Topology
|
||||
12.2.2 HA Link Monitoring for Multiple Switch Topology
|
||||
11. Configuring Bonding for High Availability
|
||||
11.1 High Availability in a Single Switch Topology
|
||||
11.2 High Availability in a Multiple Switch Topology
|
||||
11.2.1 HA Bonding Mode Selection for Multiple Switch Topology
|
||||
11.2.2 HA Link Monitoring for Multiple Switch Topology
|
||||
|
||||
13. Configuring Bonding for Maximum Throughput
|
||||
13.1 Maximum Throughput in a Single Switch Topology
|
||||
13.1.1 MT Bonding Mode Selection for Single Switch Topology
|
||||
13.1.2 MT Link Monitoring for Single Switch Topology
|
||||
13.2 Maximum Throughput in a Multiple Switch Topology
|
||||
13.2.1 MT Bonding Mode Selection for Multiple Switch Topology
|
||||
13.2.2 MT Link Monitoring for Multiple Switch Topology
|
||||
12. Configuring Bonding for Maximum Throughput
|
||||
12.1 Maximum Throughput in a Single Switch Topology
|
||||
12.1.1 MT Bonding Mode Selection for Single Switch Topology
|
||||
12.1.2 MT Link Monitoring for Single Switch Topology
|
||||
12.2 Maximum Throughput in a Multiple Switch Topology
|
||||
12.2.1 MT Bonding Mode Selection for Multiple Switch Topology
|
||||
12.2.2 MT Link Monitoring for Multiple Switch Topology
|
||||
|
||||
14. Switch Behavior Issues
|
||||
14.1 Link Establishment and Failover Delays
|
||||
14.2 Duplicated Incoming Packets
|
||||
13. Switch Behavior Issues
|
||||
13.1 Link Establishment and Failover Delays
|
||||
13.2 Duplicated Incoming Packets
|
||||
|
||||
15. Hardware Specific Considerations
|
||||
15.1 IBM BladeCenter
|
||||
14. Hardware Specific Considerations
|
||||
14.1 IBM BladeCenter
|
||||
|
||||
16. Frequently Asked Questions
|
||||
15. Frequently Asked Questions
|
||||
|
||||
17. Resources and Links
|
||||
16. Resources and Links
|
||||
|
||||
|
||||
1. Bonding Driver Installation
|
||||
@ -156,6 +159,9 @@ you're trying to build it for. Some distros (e.g., Red Hat from 7.1
|
||||
onwards) do not have /usr/include/linux symbolically linked to the
|
||||
default kernel source include directory.
|
||||
|
||||
SECOND IMPORTANT NOTE:
|
||||
If you plan to configure bonding using sysfs, you do not need
|
||||
to use ifenslave.
|
||||
|
||||
2. Bonding Driver Options
|
||||
=========================
|
||||
@ -270,7 +276,7 @@ mode
|
||||
In bonding version 2.6.2 or later, when a failover
|
||||
occurs in active-backup mode, bonding will issue one
|
||||
or more gratuitous ARPs on the newly active slave.
|
||||
One gratutious ARP is issued for the bonding master
|
||||
One gratuitous ARP is issued for the bonding master
|
||||
interface and each VLAN interfaces configured above
|
||||
it, provided that the interface has at least one IP
|
||||
address configured. Gratuitous ARPs issued for VLAN
|
||||
@ -377,7 +383,7 @@ mode
|
||||
When a link is reconnected or a new slave joins the
|
||||
bond the receive traffic is redistributed among all
|
||||
active slaves in the bond by initiating ARP Replies
|
||||
with the selected mac address to each of the
|
||||
with the selected MAC address to each of the
|
||||
clients. The updelay parameter (detailed below) must
|
||||
be set to a value equal or greater than the switch's
|
||||
forwarding delay so that the ARP Replies sent to the
|
||||
@ -498,11 +504,12 @@ not exist, and the layer2 policy is the only policy.
|
||||
3. Configuring Bonding Devices
|
||||
==============================
|
||||
|
||||
There are, essentially, two methods for configuring bonding:
|
||||
with support from the distro's network initialization scripts, and
|
||||
without. Distros generally use one of two packages for the network
|
||||
initialization scripts: initscripts or sysconfig. Recent versions of
|
||||
these packages have support for bonding, while older versions do not.
|
||||
You can configure bonding using either your distro's network
|
||||
initialization scripts, or manually using either ifenslave or the
|
||||
sysfs interface. Distros generally use one of two packages for the
|
||||
network initialization scripts: initscripts or sysconfig. Recent
|
||||
versions of these packages have support for bonding, while older
|
||||
versions do not.
|
||||
|
||||
We will first describe the options for configuring bonding for
|
||||
distros using versions of initscripts and sysconfig with full or
|
||||
@ -530,7 +537,7 @@ $ grep ifenslave /sbin/ifup
|
||||
If this returns any matches, then your initscripts or
|
||||
sysconfig has support for bonding.
|
||||
|
||||
3.1 Configuration with sysconfig support
|
||||
3.1 Configuration with Sysconfig Support
|
||||
----------------------------------------
|
||||
|
||||
This section applies to distros using a version of sysconfig
|
||||
@ -538,7 +545,7 @@ with bonding support, for example, SuSE Linux Enterprise Server 9.
|
||||
|
||||
SuSE SLES 9's networking configuration system does support
|
||||
bonding, however, at this writing, the YaST system configuration
|
||||
frontend does not provide any means to work with bonding devices.
|
||||
front end does not provide any means to work with bonding devices.
|
||||
Bonding devices can be managed by hand, however, as follows.
|
||||
|
||||
First, if they have not already been configured, configure the
|
||||
@ -660,7 +667,7 @@ format can be found in an example ifcfg template file:
|
||||
Note that the template does not document the various BONDING_
|
||||
settings described above, but does describe many of the other options.
|
||||
|
||||
3.1.1 Using DHCP with sysconfig
|
||||
3.1.1 Using DHCP with Sysconfig
|
||||
-------------------------------
|
||||
|
||||
Under sysconfig, configuring a device with BOOTPROTO='dhcp'
|
||||
@ -670,7 +677,7 @@ attempt to obtain the device address from DHCP prior to adding any of
|
||||
the slave devices. Without active slaves, the DHCP requests are not
|
||||
sent to the network.
|
||||
|
||||
3.1.2 Configuring Multiple Bonds with sysconfig
|
||||
3.1.2 Configuring Multiple Bonds with Sysconfig
|
||||
-----------------------------------------------
|
||||
|
||||
The sysconfig network initialization system is capable of
|
||||
@ -685,7 +692,7 @@ ifcfg-bondX files.
|
||||
options in the ifcfg-bondX file, it is not necessary to add them to
|
||||
the system /etc/modules.conf or /etc/modprobe.conf configuration file.
|
||||
|
||||
3.2 Configuration with initscripts support
|
||||
3.2 Configuration with Initscripts Support
|
||||
------------------------------------------
|
||||
|
||||
This section applies to distros using a version of initscripts
|
||||
@ -756,7 +763,7 @@ options for your configuration.
|
||||
will restart the networking subsystem and your bond link should be now
|
||||
up and running.
|
||||
|
||||
3.2.1 Using DHCP with initscripts
|
||||
3.2.1 Using DHCP with Initscripts
|
||||
---------------------------------
|
||||
|
||||
Recent versions of initscripts (the version supplied with
|
||||
@ -768,7 +775,7 @@ above, except replace the line "BOOTPROTO=none" with "BOOTPROTO=dhcp"
|
||||
and add a line consisting of "TYPE=Bonding". Note that the TYPE value
|
||||
is case sensitive.
|
||||
|
||||
3.2.2 Configuring Multiple Bonds with initscripts
|
||||
3.2.2 Configuring Multiple Bonds with Initscripts
|
||||
-------------------------------------------------
|
||||
|
||||
At this writing, the initscripts package does not directly
|
||||
@ -784,8 +791,8 @@ Fedora Core kernels, and has been seen on RHEL 4 as well. On kernels
|
||||
exhibiting this problem, it will be impossible to configure multiple
|
||||
bonds with differing parameters.
|
||||
|
||||
3.3 Configuring Bonding Manually
|
||||
--------------------------------
|
||||
3.3 Configuring Bonding Manually with Ifenslave
|
||||
-----------------------------------------------
|
||||
|
||||
This section applies to distros whose network initialization
|
||||
scripts (the sysconfig or initscripts package) do not have specific
|
||||
@ -889,11 +896,139 @@ install bond1 /sbin/modprobe --ignore-install bonding -o bond1 \
|
||||
This may be repeated any number of times, specifying a new and
|
||||
unique name in place of bond1 for each subsequent instance.
|
||||
|
||||
3.4 Configuring Bonding Manually via Sysfs
|
||||
------------------------------------------
|
||||
|
||||
5. Querying Bonding Configuration
|
||||
Starting with version 3.0, Channel Bonding may be configured
|
||||
via the sysfs interface. This interface allows dynamic configuration
|
||||
of all bonds in the system without unloading the module. It also
|
||||
allows for adding and removing bonds at runtime. Ifenslave is no
|
||||
longer required, though it is still supported.
|
||||
|
||||
Use of the sysfs interface allows you to use multiple bonds
|
||||
with different configurations without having to reload the module.
|
||||
It also allows you to use multiple, differently configured bonds when
|
||||
bonding is compiled into the kernel.
|
||||
|
||||
You must have the sysfs filesystem mounted to configure
|
||||
bonding this way. The examples in this document assume that you
|
||||
are using the standard mount point for sysfs, e.g. /sys. If your
|
||||
sysfs filesystem is mounted elsewhere, you will need to adjust the
|
||||
example paths accordingly.
|
||||
|
||||
Creating and Destroying Bonds
|
||||
-----------------------------
|
||||
To add a new bond foo:
|
||||
# echo +foo > /sys/class/net/bonding_masters
|
||||
|
||||
To remove an existing bond bar:
|
||||
# echo -bar > /sys/class/net/bonding_masters
|
||||
|
||||
To show all existing bonds:
|
||||
# cat /sys/class/net/bonding_masters
|
||||
|
||||
NOTE: due to 4K size limitation of sysfs files, this list may be
|
||||
truncated if you have more than a few hundred bonds. This is unlikely
|
||||
to occur under normal operating conditions.
|
||||
|
||||
Adding and Removing Slaves
|
||||
--------------------------
|
||||
Interfaces may be enslaved to a bond using the file
|
||||
/sys/class/net/<bond>/bonding/slaves. The semantics for this file
|
||||
are the same as for the bonding_masters file.
|
||||
|
||||
To enslave interface eth0 to bond bond0:
|
||||
# ifconfig bond0 up
|
||||
# echo +eth0 > /sys/class/net/bond0/bonding/slaves
|
||||
|
||||
To free slave eth0 from bond bond0:
|
||||
# echo -eth0 > /sys/class/net/bond0/bonding/slaves
|
||||
|
||||
NOTE: The bond must be up before slaves can be added. All
|
||||
slaves are freed when the interface is brought down.
|
||||
|
||||
When an interface is enslaved to a bond, symlinks between the
|
||||
two are created in the sysfs filesystem. In this case, you would get
|
||||
/sys/class/net/bond0/slave_eth0 pointing to /sys/class/net/eth0, and
|
||||
/sys/class/net/eth0/master pointing to /sys/class/net/bond0.
|
||||
|
||||
This means that you can tell quickly whether or not an
|
||||
interface is enslaved by looking for the master symlink. Thus:
|
||||
# echo -eth0 > /sys/class/net/eth0/master/bonding/slaves
|
||||
will free eth0 from whatever bond it is enslaved to, regardless of
|
||||
the name of the bond interface.
|
||||
|
||||
Changing a Bond's Configuration
|
||||
-------------------------------
|
||||
Each bond may be configured individually by manipulating the
|
||||
files located in /sys/class/net/<bond name>/bonding
|
||||
|
||||
The names of these files correspond directly with the command-
|
||||
line parameters described elsewhere in in this file, and, with the
|
||||
exception of arp_ip_target, they accept the same values. To see the
|
||||
current setting, simply cat the appropriate file.
|
||||
|
||||
A few examples will be given here; for specific usage
|
||||
guidelines for each parameter, see the appropriate section in this
|
||||
document.
|
||||
|
||||
To configure bond0 for balance-alb mode:
|
||||
# ifconfig bond0 down
|
||||
# echo 6 > /sys/class/net/bond0/bonding/mode
|
||||
- or -
|
||||
# echo balance-alb > /sys/class/net/bond0/bonding/mode
|
||||
NOTE: The bond interface must be down before the mode can be
|
||||
changed.
|
||||
|
||||
To enable MII monitoring on bond0 with a 1 second interval:
|
||||
# echo 1000 > /sys/class/net/bond0/bonding/miimon
|
||||
NOTE: If ARP monitoring is enabled, it will disabled when MII
|
||||
monitoring is enabled, and vice-versa.
|
||||
|
||||
To add ARP targets:
|
||||
# echo +192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
|
||||
# echo +192.168.0.101 > /sys/class/net/bond0/bonding/arp_ip_target
|
||||
NOTE: up to 10 target addresses may be specified.
|
||||
|
||||
To remove an ARP target:
|
||||
# echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
|
||||
|
||||
Example Configuration
|
||||
---------------------
|
||||
We begin with the same example that is shown in section 3.3,
|
||||
executed with sysfs, and without using ifenslave.
|
||||
|
||||
To make a simple bond of two e100 devices (presumed to be eth0
|
||||
and eth1), and have it persist across reboots, edit the appropriate
|
||||
file (/etc/init.d/boot.local or /etc/rc.d/rc.local), and add the
|
||||
following:
|
||||
|
||||
modprobe bonding
|
||||
modprobe e100
|
||||
echo balance-alb > /sys/class/net/bond0/bonding/mode
|
||||
ifconfig bond0 192.168.1.1 netmask 255.255.255.0 up
|
||||
echo 100 > /sys/class/net/bond0/bonding/miimon
|
||||
echo +eth0 > /sys/class/net/bond0/bonding/slaves
|
||||
echo +eth1 > /sys/class/net/bond0/bonding/slaves
|
||||
|
||||
To add a second bond, with two e1000 interfaces in
|
||||
active-backup mode, using ARP monitoring, add the following lines to
|
||||
your init script:
|
||||
|
||||
modprobe e1000
|
||||
echo +bond1 > /sys/class/net/bonding_masters
|
||||
echo active-backup > /sys/class/net/bond1/bonding/mode
|
||||
ifconfig bond1 192.168.2.1 netmask 255.255.255.0 up
|
||||
echo +192.168.2.100 /sys/class/net/bond1/bonding/arp_ip_target
|
||||
echo 2000 > /sys/class/net/bond1/bonding/arp_interval
|
||||
echo +eth2 > /sys/class/net/bond1/bonding/slaves
|
||||
echo +eth3 > /sys/class/net/bond1/bonding/slaves
|
||||
|
||||
|
||||
4. Querying Bonding Configuration
|
||||
=================================
|
||||
|
||||
5.1 Bonding Configuration
|
||||
4.1 Bonding Configuration
|
||||
-------------------------
|
||||
|
||||
Each bonding device has a read-only file residing in the
|
||||
@ -923,7 +1058,7 @@ generally as follows:
|
||||
The precise format and contents will change depending upon the
|
||||
bonding configuration, state, and version of the bonding driver.
|
||||
|
||||
5.2 Network configuration
|
||||
4.2 Network configuration
|
||||
-------------------------
|
||||
|
||||
The network configuration can be inspected using the ifconfig
|
||||
@ -958,7 +1093,7 @@ eth1 Link encap:Ethernet HWaddr 00:C0:F0:1F:37:B4
|
||||
collisions:0 txqueuelen:100
|
||||
Interrupt:9 Base address:0x1400
|
||||
|
||||
6. Switch Configuration
|
||||
5. Switch Configuration
|
||||
=======================
|
||||
|
||||
For this section, "switch" refers to whatever system the
|
||||
@ -991,7 +1126,7 @@ transmit policy for an EtherChannel group; all three will interoperate
|
||||
with another EtherChannel group.
|
||||
|
||||
|
||||
7. 802.1q VLAN Support
|
||||
6. 802.1q VLAN Support
|
||||
======================
|
||||
|
||||
It is possible to configure VLAN devices over a bond interface
|
||||
@ -1042,7 +1177,7 @@ underlying device -- i.e. the bonding interface -- to promiscuous
|
||||
mode, which might not be what you want.
|
||||
|
||||
|
||||
8. Link Monitoring
|
||||
7. Link Monitoring
|
||||
==================
|
||||
|
||||
The bonding driver at present supports two schemes for
|
||||
@ -1053,7 +1188,7 @@ monitor.
|
||||
bonding driver itself, it is not possible to enable both ARP and MII
|
||||
monitoring simultaneously.
|
||||
|
||||
8.1 ARP Monitor Operation
|
||||
7.1 ARP Monitor Operation
|
||||
-------------------------
|
||||
|
||||
The ARP monitor operates as its name suggests: it sends ARP
|
||||
@ -1071,7 +1206,7 @@ those slaves will stay down. If networking monitoring (tcpdump, etc)
|
||||
shows the ARP requests and replies on the network, then it may be that
|
||||
your device driver is not updating last_rx and trans_start.
|
||||
|
||||
8.2 Configuring Multiple ARP Targets
|
||||
7.2 Configuring Multiple ARP Targets
|
||||
------------------------------------
|
||||
|
||||
While ARP monitoring can be done with just one target, it can
|
||||
@ -1094,7 +1229,7 @@ alias bond0 bonding
|
||||
options bond0 arp_interval=60 arp_ip_target=192.168.0.100
|
||||
|
||||
|
||||
8.3 MII Monitor Operation
|
||||
7.3 MII Monitor Operation
|
||||
-------------------------
|
||||
|
||||
The MII monitor monitors only the carrier state of the local
|
||||
@ -1120,14 +1255,14 @@ does not support or had some error in processing both the MII register
|
||||
and ethtool requests), then the MII monitor will assume the link is
|
||||
up.
|
||||
|
||||
9. Potential Sources of Trouble
|
||||
8. Potential Sources of Trouble
|
||||
===============================
|
||||
|
||||
9.1 Adventures in Routing
|
||||
8.1 Adventures in Routing
|
||||
-------------------------
|
||||
|
||||
When bonding is configured, it is important that the slave
|
||||
devices not have routes that supercede routes of the master (or,
|
||||
devices not have routes that supersede routes of the master (or,
|
||||
generally, not have routes at all). For example, suppose the bonding
|
||||
device bond0 has two slaves, eth0 and eth1, and the routing table is
|
||||
as follows:
|
||||
@ -1154,11 +1289,11 @@ by the state of the routing table.
|
||||
|
||||
The solution here is simply to insure that slaves do not have
|
||||
routes of their own, and if for some reason they must, those routes do
|
||||
not supercede routes of their master. This should generally be the
|
||||
not supersede routes of their master. This should generally be the
|
||||
case, but unusual configurations or errant manual or automatic static
|
||||
route additions may cause trouble.
|
||||
|
||||
9.2 Ethernet Device Renaming
|
||||
8.2 Ethernet Device Renaming
|
||||
----------------------------
|
||||
|
||||
On systems with network configuration scripts that do not
|
||||
@ -1207,7 +1342,7 @@ modprobe with --ignore-install to cause the normal action to then take
|
||||
place. Full documentation on this can be found in the modprobe.conf
|
||||
and modprobe manual pages.
|
||||
|
||||
9.3. Painfully Slow Or No Failed Link Detection By Miimon
|
||||
8.3. Painfully Slow Or No Failed Link Detection By Miimon
|
||||
---------------------------------------------------------
|
||||
|
||||
By default, bonding enables the use_carrier option, which
|
||||
@ -1235,7 +1370,7 @@ carrier state. It has no way to determine the state of devices on or
|
||||
beyond other ports of a switch, or if a switch is refusing to pass
|
||||
traffic while still maintaining carrier on.
|
||||
|
||||
10. SNMP agents
|
||||
9. SNMP agents
|
||||
===============
|
||||
|
||||
If running SNMP agents, the bonding driver should be loaded
|
||||
@ -1281,7 +1416,7 @@ ifDescr, the association between the IP address and IfIndex remains
|
||||
and SNMP functions such as Interface_Scan_Next will report that
|
||||
association.
|
||||
|
||||
11. Promiscuous mode
|
||||
10. Promiscuous mode
|
||||
====================
|
||||
|
||||
When running network monitoring tools, e.g., tcpdump, it is
|
||||
@ -1308,7 +1443,7 @@ sending to peers that are unassigned or if the load is unbalanced.
|
||||
the active slave changes (e.g., due to a link failure), the
|
||||
promiscuous setting will be propagated to the new active slave.
|
||||
|
||||
12. Configuring Bonding for High Availability
|
||||
11. Configuring Bonding for High Availability
|
||||
=============================================
|
||||
|
||||
High Availability refers to configurations that provide
|
||||
@ -1318,7 +1453,7 @@ goal is to provide the maximum availability of network connectivity
|
||||
(i.e., the network always works), even though other configurations
|
||||
could provide higher throughput.
|
||||
|
||||
12.1 High Availability in a Single Switch Topology
|
||||
11.1 High Availability in a Single Switch Topology
|
||||
--------------------------------------------------
|
||||
|
||||
If two hosts (or a host and a single switch) are directly
|
||||
@ -1332,7 +1467,7 @@ the load will be rebalanced across the remaining devices.
|
||||
See Section 13, "Configuring Bonding for Maximum Throughput"
|
||||
for information on configuring bonding with one peer device.
|
||||
|
||||
12.2 High Availability in a Multiple Switch Topology
|
||||
11.2 High Availability in a Multiple Switch Topology
|
||||
----------------------------------------------------
|
||||
|
||||
With multiple switches, the configuration of bonding and the
|
||||
@ -1359,7 +1494,7 @@ switches (ISL, or inter switch link), and multiple ports connecting to
|
||||
the outside world ("port3" on each switch). There is no technical
|
||||
reason that this could not be extended to a third switch.
|
||||
|
||||
12.2.1 HA Bonding Mode Selection for Multiple Switch Topology
|
||||
11.2.1 HA Bonding Mode Selection for Multiple Switch Topology
|
||||
-------------------------------------------------------------
|
||||
|
||||
In a topology such as the example above, the active-backup and
|
||||
@ -1381,7 +1516,7 @@ broadcast: This mode is really a special purpose mode, and is suitable
|
||||
necessary for some specific one-way traffic to reach both
|
||||
independent networks, then the broadcast mode may be suitable.
|
||||
|
||||
12.2.2 HA Link Monitoring Selection for Multiple Switch Topology
|
||||
11.2.2 HA Link Monitoring Selection for Multiple Switch Topology
|
||||
----------------------------------------------------------------
|
||||
|
||||
The choice of link monitoring ultimately depends upon your
|
||||
@ -1402,10 +1537,10 @@ regardless of which switch is active, the ARP monitor has a suitable
|
||||
target to query.
|
||||
|
||||
|
||||
13. Configuring Bonding for Maximum Throughput
|
||||
12. Configuring Bonding for Maximum Throughput
|
||||
==============================================
|
||||
|
||||
13.1 Maximizing Throughput in a Single Switch Topology
|
||||
12.1 Maximizing Throughput in a Single Switch Topology
|
||||
------------------------------------------------------
|
||||
|
||||
In a single switch configuration, the best method to maximize
|
||||
@ -1476,7 +1611,7 @@ destination to make load balancing decisions. The behavior of each
|
||||
mode is described below.
|
||||
|
||||
|
||||
13.1.1 MT Bonding Mode Selection for Single Switch Topology
|
||||
12.1.1 MT Bonding Mode Selection for Single Switch Topology
|
||||
-----------------------------------------------------------
|
||||
|
||||
This configuration is the easiest to set up and to understand,
|
||||
@ -1607,7 +1742,7 @@ balance-alb: This mode is everything that balance-tlb is, and more.
|
||||
device driver must support changing the hardware address while
|
||||
the device is open.
|
||||
|
||||
13.1.2 MT Link Monitoring for Single Switch Topology
|
||||
12.1.2 MT Link Monitoring for Single Switch Topology
|
||||
----------------------------------------------------
|
||||
|
||||
The choice of link monitoring may largely depend upon which
|
||||
@ -1616,7 +1751,7 @@ support the use of the ARP monitor, and are thus restricted to using
|
||||
the MII monitor (which does not provide as high a level of end to end
|
||||
assurance as the ARP monitor).
|
||||
|
||||
13.2 Maximum Throughput in a Multiple Switch Topology
|
||||
12.2 Maximum Throughput in a Multiple Switch Topology
|
||||
-----------------------------------------------------
|
||||
|
||||
Multiple switches may be utilized to optimize for throughput
|
||||
@ -1651,7 +1786,7 @@ a single 72 port switch.
|
||||
can be equipped with an additional network device connected to an
|
||||
external network; this host then additionally acts as a gateway.
|
||||
|
||||
13.2.1 MT Bonding Mode Selection for Multiple Switch Topology
|
||||
12.2.1 MT Bonding Mode Selection for Multiple Switch Topology
|
||||
-------------------------------------------------------------
|
||||
|
||||
In actual practice, the bonding mode typically employed in
|
||||
@ -1664,7 +1799,7 @@ packets has arrived). When employed in this fashion, the balance-rr
|
||||
mode allows individual connections between two hosts to effectively
|
||||
utilize greater than one interface's bandwidth.
|
||||
|
||||
13.2.2 MT Link Monitoring for Multiple Switch Topology
|
||||
12.2.2 MT Link Monitoring for Multiple Switch Topology
|
||||
------------------------------------------------------
|
||||
|
||||
Again, in actual practice, the MII monitor is most often used
|
||||
@ -1674,10 +1809,10 @@ advantages over the MII monitor are mitigated by the volume of probes
|
||||
needed as the number of systems involved grows (remember that each
|
||||
host in the network is configured with bonding).
|
||||
|
||||
14. Switch Behavior Issues
|
||||
13. Switch Behavior Issues
|
||||
==========================
|
||||
|
||||
14.1 Link Establishment and Failover Delays
|
||||
13.1 Link Establishment and Failover Delays
|
||||
-------------------------------------------
|
||||
|
||||
Some switches exhibit undesirable behavior with regard to the
|
||||
@ -1712,7 +1847,7 @@ switches take a long time to go into backup mode, it may be desirable
|
||||
to not activate a backup interface immediately after a link goes down.
|
||||
Failover may be delayed via the downdelay bonding module option.
|
||||
|
||||
14.2 Duplicated Incoming Packets
|
||||
13.2 Duplicated Incoming Packets
|
||||
--------------------------------
|
||||
|
||||
It is not uncommon to observe a short burst of duplicated
|
||||
@ -1751,14 +1886,14 @@ behavior, it can be induced by clearing the MAC forwarding table (on
|
||||
most Cisco switches, the privileged command "clear mac address-table
|
||||
dynamic" will accomplish this).
|
||||
|
||||
15. Hardware Specific Considerations
|
||||
14. Hardware Specific Considerations
|
||||
====================================
|
||||
|
||||
This section contains additional information for configuring
|
||||
bonding on specific hardware platforms, or for interfacing bonding
|
||||
with particular switches or other devices.
|
||||
|
||||
15.1 IBM BladeCenter
|
||||
14.1 IBM BladeCenter
|
||||
--------------------
|
||||
|
||||
This applies to the JS20 and similar systems.
|
||||
@ -1861,7 +1996,7 @@ bonding driver.
|
||||
avoid fail-over delay issues when using bonding.
|
||||
|
||||
|
||||
16. Frequently Asked Questions
|
||||
15. Frequently Asked Questions
|
||||
==============================
|
||||
|
||||
1. Is it SMP safe?
|
||||
@ -1925,7 +2060,7 @@ not have special switch requirements, but do need device drivers that
|
||||
support specific features (described in the appropriate section under
|
||||
module parameters, above).
|
||||
|
||||
In 802.3ad mode, it works with with systems that support IEEE
|
||||
In 802.3ad mode, it works with systems that support IEEE
|
||||
802.3ad Dynamic Link Aggregation. Most managed and many unmanaged
|
||||
switches currently available support 802.3ad.
|
||||
|
||||
|
@ -1425,6 +1425,8 @@ P: Jesse Brandeburg
|
||||
M: jesse.brandeburg@intel.com
|
||||
P: Jeff Kirsher
|
||||
M: jeffrey.t.kirsher@intel.com
|
||||
P: Auke Kok
|
||||
M: auke-jan.h.kok@intel.com
|
||||
W: http://sourceforge.net/projects/e1000/
|
||||
S: Supported
|
||||
|
||||
@ -1437,6 +1439,8 @@ P: Jesse Brandeburg
|
||||
M: jesse.brandeburg@intel.com
|
||||
P: Jeff Kirsher
|
||||
M: jeffrey.t.kirsher@intel.com
|
||||
P: Auke Kok
|
||||
M: auke-jan.h.kok@intel.com
|
||||
W: http://sourceforge.net/projects/e1000/
|
||||
S: Supported
|
||||
|
||||
@ -1449,6 +1453,8 @@ P: John Ronciak
|
||||
M: john.ronciak@intel.com
|
||||
P: Jesse Brandeburg
|
||||
M: jesse.brandeburg@intel.com
|
||||
P: Auke Kok
|
||||
M: auke-jan.h.kok@intel.com
|
||||
W: http://sourceforge.net/projects/e1000/
|
||||
S: Supported
|
||||
|
||||
|
@ -909,7 +909,7 @@ MODULE_PARM_DESC(irq, "EtherLink IRQ number");
|
||||
* here also causes the module to be unloaded
|
||||
*/
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
dev_3c501 = el1_probe(-1);
|
||||
if (IS_ERR(dev_3c501))
|
||||
|
@ -688,7 +688,7 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
/* This is set up so that only a single autoprobe takes place per call.
|
||||
ISA device autoprobes on a running machine are not recommended. */
|
||||
int
|
||||
int __init
|
||||
init_module(void)
|
||||
{
|
||||
struct net_device *dev;
|
||||
|
@ -1633,7 +1633,7 @@ MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)");
|
||||
MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)");
|
||||
MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)");
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
int this_dev, found = 0;
|
||||
|
||||
|
@ -932,7 +932,7 @@ module_param(irq, int, 0);
|
||||
MODULE_PARM_DESC(io, "EtherLink16 I/O base address");
|
||||
MODULE_PARM_DESC(irq, "(ignored)");
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
if (io == 0)
|
||||
printk("3c507: You should not use auto-probing with insmod!\n");
|
||||
|
@ -1277,7 +1277,7 @@ MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)");
|
||||
MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
int this_dev,found = 0;
|
||||
|
||||
|
@ -1646,7 +1646,7 @@ static struct net_device *this_device;
|
||||
* insmod multiple modules for now but it's a hack.
|
||||
*/
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
this_device = mc32_probe(-1);
|
||||
if (IS_ERR(this_device))
|
||||
|
@ -19,11 +19,11 @@
|
||||
See the file COPYING in this distribution for more information.
|
||||
|
||||
Contributors:
|
||||
|
||||
|
||||
Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
|
||||
PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
|
||||
LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
|
||||
|
||||
|
||||
TODO:
|
||||
* Test Tx checksumming thoroughly
|
||||
* Implement dev->tx_timeout
|
||||
@ -461,7 +461,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
static inline void cp_set_rxbufsize (struct cp_private *cp)
|
||||
{
|
||||
unsigned int mtu = cp->dev->mtu;
|
||||
|
||||
|
||||
if (mtu > ETH_DATA_LEN)
|
||||
/* MTU + ethernet header + FCS + optional VLAN tag */
|
||||
cp->rx_buf_sz = mtu + ETH_HLEN + 8;
|
||||
@ -510,7 +510,7 @@ static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
|
||||
static inline unsigned int cp_rx_csum_ok (u32 status)
|
||||
{
|
||||
unsigned int protocol = (status >> 16) & 0x3;
|
||||
|
||||
|
||||
if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
|
||||
return 1;
|
||||
else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
|
||||
@ -1061,7 +1061,7 @@ static void cp_init_hw (struct cp_private *cp)
|
||||
cpw8(Config3, PARMEnable);
|
||||
cp->wol_enabled = 0;
|
||||
|
||||
cpw8(Config5, cpr8(Config5) & PMEStatus);
|
||||
cpw8(Config5, cpr8(Config5) & PMEStatus);
|
||||
|
||||
cpw32_f(HiTxRingAddr, 0);
|
||||
cpw32_f(HiTxRingAddr + 4, 0);
|
||||
@ -1351,7 +1351,7 @@ static void netdev_get_wol (struct cp_private *cp,
|
||||
WAKE_MCAST | WAKE_UCAST;
|
||||
/* We don't need to go on if WOL is disabled */
|
||||
if (!cp->wol_enabled) return;
|
||||
|
||||
|
||||
options = cpr8 (Config3);
|
||||
if (options & LinkUp) wol->wolopts |= WAKE_PHY;
|
||||
if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
|
||||
@ -1919,7 +1919,7 @@ static int cp_resume (struct pci_dev *pdev)
|
||||
mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
|
||||
|
||||
spin_unlock_irqrestore (&cp->lock, flags);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
@ -165,7 +165,7 @@ static int multicast_filter_limit = 32;
|
||||
static int debug = -1;
|
||||
|
||||
/*
|
||||
* Receive ring size
|
||||
* Receive ring size
|
||||
* Warning: 64K ring has hardware issues and may lock up.
|
||||
*/
|
||||
#if defined(CONFIG_SH_DREAMCAST)
|
||||
@ -257,7 +257,7 @@ static struct pci_device_id rtl8139_pci_tbl[] = {
|
||||
{0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
|
||||
{0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
|
||||
{0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
|
||||
{0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
|
||||
{0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
|
||||
|
||||
#ifdef CONFIG_SH_SECUREEDGE5410
|
||||
/* Bogus 8139 silicon reports 8129 without external PROM :-( */
|
||||
@ -1824,7 +1824,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
|
||||
int tmp_work;
|
||||
#endif
|
||||
|
||||
if (netif_msg_rx_err (tp))
|
||||
if (netif_msg_rx_err (tp))
|
||||
printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n",
|
||||
dev->name, rx_status);
|
||||
tp->stats.rx_errors++;
|
||||
@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
|
||||
RTL_R16 (RxBufAddr),
|
||||
RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
|
||||
|
||||
while (netif_running(dev) && received < budget
|
||||
while (netif_running(dev) && received < budget
|
||||
&& (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
|
||||
u32 ring_offset = cur_rx % RX_BUF_LEN;
|
||||
u32 rx_status;
|
||||
@ -2031,7 +2031,7 @@ no_early_rx:
|
||||
|
||||
netif_receive_skb (skb);
|
||||
} else {
|
||||
if (net_ratelimit())
|
||||
if (net_ratelimit())
|
||||
printk (KERN_WARNING
|
||||
"%s: Memory squeeze, dropping packet.\n",
|
||||
dev->name);
|
||||
@ -2158,13 +2158,13 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance,
|
||||
status = RTL_R16 (IntrStatus);
|
||||
|
||||
/* shared irq? */
|
||||
if (unlikely((status & rtl8139_intr_mask) == 0))
|
||||
if (unlikely((status & rtl8139_intr_mask) == 0))
|
||||
goto out;
|
||||
|
||||
handled = 1;
|
||||
|
||||
/* h/w no longer present (hotplug?) or major error, bail */
|
||||
if (unlikely(status == 0xFFFF))
|
||||
if (unlikely(status == 0xFFFF))
|
||||
goto out;
|
||||
|
||||
/* close possible race's with dev_close */
|
||||
|
@ -447,6 +447,7 @@ config MIPS_GT96100ETH
|
||||
config MIPS_AU1X00_ENET
|
||||
bool "MIPS AU1000 Ethernet support"
|
||||
depends on NET_ETHERNET && SOC_AU1X00
|
||||
select PHYLIB
|
||||
select CRC32
|
||||
help
|
||||
If you have an Alchemy Semi AU1X00 based system
|
||||
@ -865,6 +866,22 @@ config DM9000
|
||||
<file:Documentation/networking/net-modules.txt>. The module will be
|
||||
called dm9000.
|
||||
|
||||
config SMC911X
|
||||
tristate "SMSC LAN911[5678] support"
|
||||
select CRC32
|
||||
select MII
|
||||
depends on NET_ETHERNET && ARCH_PXA
|
||||
help
|
||||
This is a driver for SMSC's LAN911x series of Ethernet chipsets
|
||||
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
|
||||
Say Y if you want it compiled into the kernel,
|
||||
and read the Ethernet-HOWTO, available from
|
||||
<http://www.linuxdoc.org/docs.html#howto>.
|
||||
|
||||
This driver is also available as a module. The module will be
|
||||
called smc911x. If you want to compile it as a module, say M
|
||||
here and read <file:Documentation/modules.txt>
|
||||
|
||||
config NET_VENDOR_RACAL
|
||||
bool "Racal-Interlan (Micom) NI cards"
|
||||
depends on NET_ETHERNET && ISA
|
||||
@ -2311,6 +2328,23 @@ config S2IO_NAPI
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config MYRI10GE
|
||||
tristate "Myricom Myri-10G Ethernet support"
|
||||
depends on PCI
|
||||
select FW_LOADER
|
||||
select CRC32
|
||||
---help---
|
||||
This driver supports Myricom Myri-10G Dual Protocol interface in
|
||||
Ethernet mode. If the eeprom on your board is not recent enough,
|
||||
you will need a newer firmware image.
|
||||
You may get this image or more information, at:
|
||||
|
||||
<http://www.myri.com/Myri-10G/>
|
||||
|
||||
To compile this driver as a module, choose M here and read
|
||||
<file:Documentation/networking/net-modules.txt>. The module
|
||||
will be called myri10ge.
|
||||
|
||||
endmenu
|
||||
|
||||
source "drivers/net/tokenring/Kconfig"
|
||||
|
@ -192,7 +192,9 @@ obj-$(CONFIG_R8169) += r8169.o
|
||||
obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
|
||||
obj-$(CONFIG_IBMVETH) += ibmveth.o
|
||||
obj-$(CONFIG_S2IO) += s2io.o
|
||||
obj-$(CONFIG_MYRI10GE) += myri10ge/
|
||||
obj-$(CONFIG_SMC91X) += smc91x.o
|
||||
obj-$(CONFIG_SMC911X) += smc911x.o
|
||||
obj-$(CONFIG_DM9000) += dm9000.o
|
||||
obj-$(CONFIG_FEC_8XX) += fec_8xx/
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -40,120 +40,6 @@
|
||||
|
||||
#define MULTICAST_FILTER_LIMIT 64
|
||||
|
||||
/* FIXME
|
||||
* The PHY defines should be in a separate file.
|
||||
*/
|
||||
|
||||
/* MII register offsets */
|
||||
#define MII_CONTROL 0x0000
|
||||
#define MII_STATUS 0x0001
|
||||
#define MII_PHY_ID0 0x0002
|
||||
#define MII_PHY_ID1 0x0003
|
||||
#define MII_ANADV 0x0004
|
||||
#define MII_ANLPAR 0x0005
|
||||
#define MII_AEXP 0x0006
|
||||
#define MII_ANEXT 0x0007
|
||||
#define MII_LSI_PHY_CONFIG 0x0011
|
||||
/* Status register */
|
||||
#define MII_LSI_PHY_STAT 0x0012
|
||||
#define MII_AMD_PHY_STAT MII_LSI_PHY_STAT
|
||||
#define MII_INTEL_PHY_STAT 0x0011
|
||||
|
||||
#define MII_AUX_CNTRL 0x0018
|
||||
/* mii registers specific to AMD 79C901 */
|
||||
#define MII_STATUS_SUMMARY = 0x0018
|
||||
|
||||
/* MII Control register bit definitions. */
|
||||
#define MII_CNTL_FDX 0x0100
|
||||
#define MII_CNTL_RST_AUTO 0x0200
|
||||
#define MII_CNTL_ISOLATE 0x0400
|
||||
#define MII_CNTL_PWRDWN 0x0800
|
||||
#define MII_CNTL_AUTO 0x1000
|
||||
#define MII_CNTL_F100 0x2000
|
||||
#define MII_CNTL_LPBK 0x4000
|
||||
#define MII_CNTL_RESET 0x8000
|
||||
|
||||
/* MII Status register bit */
|
||||
#define MII_STAT_EXT 0x0001
|
||||
#define MII_STAT_JAB 0x0002
|
||||
#define MII_STAT_LINK 0x0004
|
||||
#define MII_STAT_CAN_AUTO 0x0008
|
||||
#define MII_STAT_FAULT 0x0010
|
||||
#define MII_STAT_AUTO_DONE 0x0020
|
||||
#define MII_STAT_CAN_T 0x0800
|
||||
#define MII_STAT_CAN_T_FDX 0x1000
|
||||
#define MII_STAT_CAN_TX 0x2000
|
||||
#define MII_STAT_CAN_TX_FDX 0x4000
|
||||
#define MII_STAT_CAN_T4 0x8000
|
||||
|
||||
|
||||
#define MII_ID1_OUI_LO 0xFC00 /* low bits of OUI mask */
|
||||
#define MII_ID1_MODEL 0x03F0 /* model number */
|
||||
#define MII_ID1_REV 0x000F /* model number */
|
||||
|
||||
/* MII NWAY Register Bits ...
|
||||
valid for the ANAR (Auto-Negotiation Advertisement) and
|
||||
ANLPAR (Auto-Negotiation Link Partner) registers */
|
||||
#define MII_NWAY_NODE_SEL 0x001f
|
||||
#define MII_NWAY_CSMA_CD 0x0001
|
||||
#define MII_NWAY_T 0x0020
|
||||
#define MII_NWAY_T_FDX 0x0040
|
||||
#define MII_NWAY_TX 0x0080
|
||||
#define MII_NWAY_TX_FDX 0x0100
|
||||
#define MII_NWAY_T4 0x0200
|
||||
#define MII_NWAY_PAUSE 0x0400
|
||||
#define MII_NWAY_RF 0x2000 /* Remote Fault */
|
||||
#define MII_NWAY_ACK 0x4000 /* Remote Acknowledge */
|
||||
#define MII_NWAY_NP 0x8000 /* Next Page (Enable) */
|
||||
|
||||
/* mii stsout register bits */
|
||||
#define MII_STSOUT_LINK_FAIL 0x4000
|
||||
#define MII_STSOUT_SPD 0x0080
|
||||
#define MII_STSOUT_DPLX 0x0040
|
||||
|
||||
/* mii stsics register bits */
|
||||
#define MII_STSICS_SPD 0x8000
|
||||
#define MII_STSICS_DPLX 0x4000
|
||||
#define MII_STSICS_LINKSTS 0x0001
|
||||
|
||||
/* mii stssum register bits */
|
||||
#define MII_STSSUM_LINK 0x0008
|
||||
#define MII_STSSUM_DPLX 0x0004
|
||||
#define MII_STSSUM_AUTO 0x0002
|
||||
#define MII_STSSUM_SPD 0x0001
|
||||
|
||||
/* lsi phy status register */
|
||||
#define MII_LSI_PHY_STAT_FDX 0x0040
|
||||
#define MII_LSI_PHY_STAT_SPD 0x0080
|
||||
|
||||
/* amd phy status register */
|
||||
#define MII_AMD_PHY_STAT_FDX 0x0800
|
||||
#define MII_AMD_PHY_STAT_SPD 0x0400
|
||||
|
||||
/* intel phy status register */
|
||||
#define MII_INTEL_PHY_STAT_FDX 0x0200
|
||||
#define MII_INTEL_PHY_STAT_SPD 0x4000
|
||||
|
||||
/* Auxilliary Control/Status Register */
|
||||
#define MII_AUX_FDX 0x0001
|
||||
#define MII_AUX_100 0x0002
|
||||
#define MII_AUX_F100 0x0004
|
||||
#define MII_AUX_ANEG 0x0008
|
||||
|
||||
typedef struct mii_phy {
|
||||
struct mii_phy * next;
|
||||
struct mii_chip_info * chip_info;
|
||||
u16 status;
|
||||
u32 *mii_control_reg;
|
||||
u32 *mii_data_reg;
|
||||
} mii_phy_t;
|
||||
|
||||
struct phy_ops {
|
||||
int (*phy_init) (struct net_device *, int);
|
||||
int (*phy_reset) (struct net_device *, int);
|
||||
int (*phy_status) (struct net_device *, int, u16 *, u16 *);
|
||||
};
|
||||
|
||||
/*
|
||||
* Data Buffer Descriptor. Data buffers must be aligned on 32 byte
|
||||
* boundary for both, receive and transmit.
|
||||
@ -200,7 +86,6 @@ typedef struct mac_reg {
|
||||
|
||||
|
||||
struct au1000_private {
|
||||
|
||||
db_dest_t *pDBfree;
|
||||
db_dest_t db[NUM_RX_BUFFS+NUM_TX_BUFFS];
|
||||
volatile rx_dma_t *rx_dma_ring[NUM_RX_DMA];
|
||||
@ -213,8 +98,15 @@ struct au1000_private {
|
||||
u32 tx_full;
|
||||
|
||||
int mac_id;
|
||||
mii_phy_t *mii;
|
||||
struct phy_ops *phy_ops;
|
||||
|
||||
int mac_enabled; /* whether MAC is currently enabled and running (req. for mdio) */
|
||||
|
||||
int old_link; /* used by au1000_adjust_link */
|
||||
int old_speed;
|
||||
int old_duplex;
|
||||
|
||||
struct phy_device *phy_dev;
|
||||
struct mii_bus mii_bus;
|
||||
|
||||
/* These variables are just for quick access to certain regs addresses. */
|
||||
volatile mac_reg_t *mac; /* mac registers */
|
||||
@ -223,14 +115,6 @@ struct au1000_private {
|
||||
u32 vaddr; /* virtual address of rx/tx buffers */
|
||||
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
|
||||
|
||||
u8 *hash_table;
|
||||
u32 hash_mode;
|
||||
u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
|
||||
int phy_addr; /* phy address */
|
||||
u32 options; /* User-settable misc. driver options. */
|
||||
u32 drv_flags;
|
||||
int want_autoneg;
|
||||
struct net_device_stats stats;
|
||||
struct timer_list timer;
|
||||
spinlock_t lock; /* Serialise access to device */
|
||||
};
|
||||
|
@ -4877,7 +4877,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
static int cas_version_printed = 0;
|
||||
unsigned long casreg_base, casreg_len;
|
||||
unsigned long casreg_len;
|
||||
struct net_device *dev;
|
||||
struct cas *cp;
|
||||
int i, err, pci_using_dac;
|
||||
@ -4972,7 +4972,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
|
||||
pci_using_dac = 0;
|
||||
}
|
||||
|
||||
casreg_base = pci_resource_start(pdev, 0);
|
||||
casreg_len = pci_resource_len(pdev, 0);
|
||||
|
||||
cp = netdev_priv(dev);
|
||||
@ -5024,7 +5023,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
|
||||
cp->timer_ticks = 0;
|
||||
|
||||
/* give us access to cassini registers */
|
||||
cp->regs = ioremap(casreg_base, casreg_len);
|
||||
cp->regs = pci_iomap(pdev, 0, casreg_len);
|
||||
if (cp->regs == 0UL) {
|
||||
printk(KERN_ERR PFX "Cannot map device registers, "
|
||||
"aborting.\n");
|
||||
@ -5123,7 +5122,7 @@ err_out_iounmap:
|
||||
cas_shutdown(cp);
|
||||
mutex_unlock(&cp->pm_mutex);
|
||||
|
||||
iounmap(cp->regs);
|
||||
pci_iounmap(pdev, cp->regs);
|
||||
|
||||
|
||||
err_out_free_res:
|
||||
@ -5171,7 +5170,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
|
||||
#endif
|
||||
pci_free_consistent(pdev, sizeof(struct cas_init_block),
|
||||
cp->init_block, cp->block_dvma);
|
||||
iounmap(cp->regs);
|
||||
pci_iounmap(pdev, cp->regs);
|
||||
free_netdev(dev);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
@ -2780,6 +2780,80 @@ static void e100_shutdown(struct pci_dev *pdev)
|
||||
DPRINTK(PROBE,ERR, "Error enabling wake\n");
|
||||
}
|
||||
|
||||
/* ------------------ PCI Error Recovery infrastructure -------------- */
|
||||
/**
|
||||
* e100_io_error_detected - called when PCI error is detected.
|
||||
* @pdev: Pointer to PCI device
|
||||
* @state: The current pci conneection state
|
||||
*/
|
||||
static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
|
||||
/* Similar to calling e100_down(), but avoids adpater I/O. */
|
||||
netdev->stop(netdev);
|
||||
|
||||
/* Detach; put netif into state similar to hotplug unplug. */
|
||||
netif_poll_enable(netdev);
|
||||
netif_device_detach(netdev);
|
||||
|
||||
/* Request a slot reset. */
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
/**
|
||||
* e100_io_slot_reset - called after the pci bus has been reset.
|
||||
* @pdev: Pointer to PCI device
|
||||
*
|
||||
* Restart the card from scratch.
|
||||
*/
|
||||
static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
|
||||
if (pci_enable_device(pdev)) {
|
||||
printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
pci_set_master(pdev);
|
||||
|
||||
/* Only one device per card can do a reset */
|
||||
if (0 != PCI_FUNC(pdev->devfn))
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
e100_hw_reset(nic);
|
||||
e100_phy_init(nic);
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
/**
|
||||
* e100_io_resume - resume normal operations
|
||||
* @pdev: Pointer to PCI device
|
||||
*
|
||||
* Resume normal operations after an error recovery
|
||||
* sequence has been completed.
|
||||
*/
|
||||
static void e100_io_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
|
||||
/* ack any pending wake events, disable PME */
|
||||
pci_enable_wake(pdev, 0, 0);
|
||||
|
||||
netif_device_attach(netdev);
|
||||
if (netif_running(netdev)) {
|
||||
e100_open(netdev);
|
||||
mod_timer(&nic->watchdog, jiffies);
|
||||
}
|
||||
}
|
||||
|
||||
static struct pci_error_handlers e100_err_handler = {
|
||||
.error_detected = e100_io_error_detected,
|
||||
.slot_reset = e100_io_slot_reset,
|
||||
.resume = e100_io_resume,
|
||||
};
|
||||
|
||||
static struct pci_driver e100_driver = {
|
||||
.name = DRV_NAME,
|
||||
@ -2791,6 +2865,7 @@ static struct pci_driver e100_driver = {
|
||||
.resume = e100_resume,
|
||||
#endif
|
||||
.shutdown = e100_shutdown,
|
||||
.err_handler = &e100_err_handler,
|
||||
};
|
||||
|
||||
static int __init e100_init_module(void)
|
||||
|
@ -1,7 +1,7 @@
|
||||
################################################################################
|
||||
#
|
||||
#
|
||||
# Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
|
||||
# Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License as published by the Free
|
||||
@ -22,6 +22,7 @@
|
||||
#
|
||||
# Contact Information:
|
||||
# Linux NICS <linux.nics@intel.com>
|
||||
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
#
|
||||
################################################################################
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -22,6 +22,7 @@
|
||||
|
||||
Contact Information:
|
||||
Linux NICS <linux.nics@intel.com>
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
@ -114,6 +115,8 @@ struct e1000_adapter;
|
||||
/* Supported Rx Buffer Sizes */
|
||||
#define E1000_RXBUFFER_128 128 /* Used for packet split */
|
||||
#define E1000_RXBUFFER_256 256 /* Used for packet split */
|
||||
#define E1000_RXBUFFER_512 512
|
||||
#define E1000_RXBUFFER_1024 1024
|
||||
#define E1000_RXBUFFER_2048 2048
|
||||
#define E1000_RXBUFFER_4096 4096
|
||||
#define E1000_RXBUFFER_8192 8192
|
||||
@ -334,7 +337,6 @@ struct e1000_adapter {
|
||||
boolean_t have_msi;
|
||||
#endif
|
||||
/* to not mess up cache alignment, always add to the bottom */
|
||||
boolean_t txb2b;
|
||||
#ifdef NETIF_F_TSO
|
||||
boolean_t tso_force;
|
||||
#endif
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -22,6 +22,7 @@
|
||||
|
||||
Contact Information:
|
||||
Linux NICS <linux.nics@intel.com>
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
@ -864,8 +865,8 @@ static int
|
||||
e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
uint32_t mask, i=0, shared_int = TRUE;
|
||||
uint32_t irq = adapter->pdev->irq;
|
||||
uint32_t mask, i=0, shared_int = TRUE;
|
||||
uint32_t irq = adapter->pdev->irq;
|
||||
|
||||
*data = 0;
|
||||
|
||||
@ -891,22 +892,22 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
|
||||
/* Interrupt to test */
|
||||
mask = 1 << i;
|
||||
|
||||
if (!shared_int) {
|
||||
/* Disable the interrupt to be reported in
|
||||
* the cause register and then force the same
|
||||
* interrupt and see if one gets posted. If
|
||||
* an interrupt was posted to the bus, the
|
||||
* test failed.
|
||||
*/
|
||||
adapter->test_icr = 0;
|
||||
E1000_WRITE_REG(&adapter->hw, IMC, mask);
|
||||
E1000_WRITE_REG(&adapter->hw, ICS, mask);
|
||||
msec_delay(10);
|
||||
if (!shared_int) {
|
||||
/* Disable the interrupt to be reported in
|
||||
* the cause register and then force the same
|
||||
* interrupt and see if one gets posted. If
|
||||
* an interrupt was posted to the bus, the
|
||||
* test failed.
|
||||
*/
|
||||
adapter->test_icr = 0;
|
||||
E1000_WRITE_REG(&adapter->hw, IMC, mask);
|
||||
E1000_WRITE_REG(&adapter->hw, ICS, mask);
|
||||
msec_delay(10);
|
||||
|
||||
if (adapter->test_icr & mask) {
|
||||
*data = 3;
|
||||
break;
|
||||
}
|
||||
if (adapter->test_icr & mask) {
|
||||
*data = 3;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Enable the interrupt to be reported in
|
||||
@ -925,7 +926,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!shared_int) {
|
||||
if (!shared_int) {
|
||||
/* Disable the other interrupts to be reported in
|
||||
* the cause register and then force the other
|
||||
* interrupts and see if any get posted. If
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -22,6 +22,7 @@
|
||||
|
||||
Contact Information:
|
||||
Linux NICS <linux.nics@intel.com>
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
@ -764,7 +765,7 @@ e1000_init_hw(struct e1000_hw *hw)
|
||||
}
|
||||
|
||||
if (hw->mac_type == e1000_82573) {
|
||||
e1000_enable_tx_pkt_filtering(hw);
|
||||
e1000_enable_tx_pkt_filtering(hw);
|
||||
}
|
||||
|
||||
switch (hw->mac_type) {
|
||||
@ -860,7 +861,7 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
|
||||
|
||||
if(eeprom_data != EEPROM_RESERVED_WORD) {
|
||||
/* Adjust SERDES output amplitude only. */
|
||||
eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
|
||||
eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
|
||||
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
|
||||
if(ret_val)
|
||||
return ret_val;
|
||||
@ -1227,7 +1228,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
|
||||
|
||||
if (hw->phy_reset_disable)
|
||||
return E1000_SUCCESS;
|
||||
|
||||
|
||||
ret_val = e1000_phy_reset(hw);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("Error Resetting the PHY\n");
|
||||
@ -1369,7 +1370,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
|
||||
DEBUGFUNC("e1000_copper_link_ggp_setup");
|
||||
|
||||
if(!hw->phy_reset_disable) {
|
||||
|
||||
|
||||
/* Enable CRS on TX for half-duplex operation. */
|
||||
ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
|
||||
&phy_data);
|
||||
@ -1518,7 +1519,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
|
||||
|
||||
if(hw->phy_reset_disable)
|
||||
return E1000_SUCCESS;
|
||||
|
||||
|
||||
/* Enable CRS on TX. This must be set for half-duplex operation. */
|
||||
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
|
||||
if(ret_val)
|
||||
@ -1664,7 +1665,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
|
||||
* collision distance in the Transmit Control Register.
|
||||
* 2) Set up flow control on the MAC to that established with
|
||||
* the link partner.
|
||||
* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
|
||||
* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
|
||||
*
|
||||
* hw - Struct containing variables accessed by shared code
|
||||
******************************************************************************/
|
||||
@ -1673,7 +1674,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
|
||||
{
|
||||
int32_t ret_val;
|
||||
DEBUGFUNC("e1000_copper_link_postconfig");
|
||||
|
||||
|
||||
if(hw->mac_type >= e1000_82544) {
|
||||
e1000_config_collision_dist(hw);
|
||||
} else {
|
||||
@ -1697,7 +1698,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
|
||||
return ret_val;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
@ -1753,11 +1754,11 @@ e1000_setup_copper_link(struct e1000_hw *hw)
|
||||
}
|
||||
|
||||
if(hw->autoneg) {
|
||||
/* Setup autoneg and flow control advertisement
|
||||
* and perform autonegotiation */
|
||||
/* Setup autoneg and flow control advertisement
|
||||
* and perform autonegotiation */
|
||||
ret_val = e1000_copper_link_autoneg(hw);
|
||||
if(ret_val)
|
||||
return ret_val;
|
||||
return ret_val;
|
||||
} else {
|
||||
/* PHY will be set to 10H, 10F, 100H,or 100F
|
||||
* depending on value from forced_speed_duplex. */
|
||||
@ -1785,7 +1786,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
|
||||
ret_val = e1000_copper_link_postconfig(hw);
|
||||
if(ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
||||
DEBUGOUT("Valid link established!!!\n");
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
@ -1983,7 +1984,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
|
||||
|
||||
DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
|
||||
|
||||
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
|
||||
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
|
||||
if(ret_val)
|
||||
return ret_val;
|
||||
|
||||
@ -2272,7 +2273,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
|
||||
|
||||
DEBUGFUNC("e1000_config_mac_to_phy");
|
||||
|
||||
/* 82544 or newer MAC, Auto Speed Detection takes care of
|
||||
/* 82544 or newer MAC, Auto Speed Detection takes care of
|
||||
* MAC speed/duplex configuration.*/
|
||||
if (hw->mac_type >= e1000_82544)
|
||||
return E1000_SUCCESS;
|
||||
@ -2291,9 +2292,9 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
|
||||
if(ret_val)
|
||||
return ret_val;
|
||||
|
||||
if(phy_data & M88E1000_PSSR_DPLX)
|
||||
if(phy_data & M88E1000_PSSR_DPLX)
|
||||
ctrl |= E1000_CTRL_FD;
|
||||
else
|
||||
else
|
||||
ctrl &= ~E1000_CTRL_FD;
|
||||
|
||||
e1000_config_collision_dist(hw);
|
||||
@ -2492,10 +2493,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
|
||||
*/
|
||||
if(hw->original_fc == e1000_fc_full) {
|
||||
hw->fc = e1000_fc_full;
|
||||
DEBUGOUT("Flow Control = FULL.\r\n");
|
||||
DEBUGOUT("Flow Control = FULL.\n");
|
||||
} else {
|
||||
hw->fc = e1000_fc_rx_pause;
|
||||
DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
|
||||
DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
|
||||
}
|
||||
}
|
||||
/* For receiving PAUSE frames ONLY.
|
||||
@ -2511,7 +2512,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
|
||||
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
|
||||
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
|
||||
hw->fc = e1000_fc_tx_pause;
|
||||
DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
|
||||
DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
|
||||
}
|
||||
/* For transmitting PAUSE frames ONLY.
|
||||
*
|
||||
@ -2526,7 +2527,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
|
||||
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
|
||||
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
|
||||
hw->fc = e1000_fc_rx_pause;
|
||||
DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
|
||||
DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
|
||||
}
|
||||
/* Per the IEEE spec, at this point flow control should be
|
||||
* disabled. However, we want to consider that we could
|
||||
@ -2552,10 +2553,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
|
||||
hw->original_fc == e1000_fc_tx_pause) ||
|
||||
hw->fc_strict_ieee) {
|
||||
hw->fc = e1000_fc_none;
|
||||
DEBUGOUT("Flow Control = NONE.\r\n");
|
||||
DEBUGOUT("Flow Control = NONE.\n");
|
||||
} else {
|
||||
hw->fc = e1000_fc_rx_pause;
|
||||
DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
|
||||
DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
|
||||
}
|
||||
|
||||
/* Now we need to do one last check... If we auto-
|
||||
@ -2580,7 +2581,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
|
||||
return ret_val;
|
||||
}
|
||||
} else {
|
||||
DEBUGOUT("Copper PHY and Auto Neg has not completed.\r\n");
|
||||
DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
|
||||
}
|
||||
}
|
||||
return E1000_SUCCESS;
|
||||
@ -2763,7 +2764,7 @@ e1000_check_for_link(struct e1000_hw *hw)
|
||||
hw->autoneg_failed = 1;
|
||||
return 0;
|
||||
}
|
||||
DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\r\n");
|
||||
DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
|
||||
|
||||
/* Disable auto-negotiation in the TXCW register */
|
||||
E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE));
|
||||
@ -2788,7 +2789,7 @@ e1000_check_for_link(struct e1000_hw *hw)
|
||||
else if(((hw->media_type == e1000_media_type_fiber) ||
|
||||
(hw->media_type == e1000_media_type_internal_serdes)) &&
|
||||
(ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
|
||||
DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\r\n");
|
||||
DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
|
||||
E1000_WRITE_REG(hw, TXCW, hw->txcw);
|
||||
E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
|
||||
|
||||
@ -2851,13 +2852,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
|
||||
|
||||
if(status & E1000_STATUS_FD) {
|
||||
*duplex = FULL_DUPLEX;
|
||||
DEBUGOUT("Full Duplex\r\n");
|
||||
DEBUGOUT("Full Duplex\n");
|
||||
} else {
|
||||
*duplex = HALF_DUPLEX;
|
||||
DEBUGOUT(" Half Duplex\r\n");
|
||||
DEBUGOUT(" Half Duplex\n");
|
||||
}
|
||||
} else {
|
||||
DEBUGOUT("1000 Mbs, Full Duplex\r\n");
|
||||
DEBUGOUT("1000 Mbs, Full Duplex\n");
|
||||
*speed = SPEED_1000;
|
||||
*duplex = FULL_DUPLEX;
|
||||
}
|
||||
@ -2883,7 +2884,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
|
||||
}
|
||||
}
|
||||
|
||||
if ((hw->mac_type == e1000_80003es2lan) &&
|
||||
if ((hw->mac_type == e1000_80003es2lan) &&
|
||||
(hw->media_type == e1000_media_type_copper)) {
|
||||
if (*speed == SPEED_1000)
|
||||
ret_val = e1000_configure_kmrn_for_1000(hw);
|
||||
@ -3159,7 +3160,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
|
||||
if (e1000_swfw_sync_acquire(hw, swfw))
|
||||
return -E1000_ERR_SWFW_SYNC;
|
||||
|
||||
if((hw->phy_type == e1000_phy_igp ||
|
||||
if((hw->phy_type == e1000_phy_igp ||
|
||||
hw->phy_type == e1000_phy_igp_2) &&
|
||||
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
|
||||
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
|
||||
@ -3298,7 +3299,7 @@ e1000_write_phy_reg(struct e1000_hw *hw,
|
||||
if (e1000_swfw_sync_acquire(hw, swfw))
|
||||
return -E1000_ERR_SWFW_SYNC;
|
||||
|
||||
if((hw->phy_type == e1000_phy_igp ||
|
||||
if((hw->phy_type == e1000_phy_igp ||
|
||||
hw->phy_type == e1000_phy_igp_2) &&
|
||||
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
|
||||
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
|
||||
@ -3496,22 +3497,22 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
|
||||
}
|
||||
/* Read the device control register and assert the E1000_CTRL_PHY_RST
|
||||
* bit. Then, take it out of reset.
|
||||
* For pre-e1000_82571 hardware, we delay for 10ms between the assert
|
||||
* For pre-e1000_82571 hardware, we delay for 10ms between the assert
|
||||
* and deassert. For e1000_82571 hardware and later, we instead delay
|
||||
* for 50us between and 10ms after the deassertion.
|
||||
*/
|
||||
ctrl = E1000_READ_REG(hw, CTRL);
|
||||
E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
|
||||
if (hw->mac_type < e1000_82571)
|
||||
|
||||
if (hw->mac_type < e1000_82571)
|
||||
msec_delay(10);
|
||||
else
|
||||
udelay(100);
|
||||
|
||||
|
||||
E1000_WRITE_REG(hw, CTRL, ctrl);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
|
||||
|
||||
if (hw->mac_type >= e1000_82571)
|
||||
msec_delay(10);
|
||||
e1000_swfw_sync_release(hw, swfw);
|
||||
@ -3815,7 +3816,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
|
||||
/* Check polarity status */
|
||||
ret_val = e1000_check_polarity(hw, &polarity);
|
||||
if(ret_val)
|
||||
return ret_val;
|
||||
return ret_val;
|
||||
phy_info->cable_polarity = polarity;
|
||||
|
||||
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
|
||||
@ -4540,14 +4541,14 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
|
||||
|
||||
E1000_WRITE_REG(hw, EERD, eerd);
|
||||
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
|
||||
|
||||
|
||||
if(error) {
|
||||
break;
|
||||
}
|
||||
data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -4573,24 +4574,24 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
|
||||
return -E1000_ERR_SWFW_SYNC;
|
||||
|
||||
for (i = 0; i < words; i++) {
|
||||
register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
|
||||
((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
|
||||
register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
|
||||
((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
|
||||
E1000_EEPROM_RW_REG_START;
|
||||
|
||||
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
|
||||
if(error) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
E1000_WRITE_REG(hw, EEWR, register_value);
|
||||
|
||||
|
||||
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
|
||||
|
||||
|
||||
if(error) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
|
||||
return error;
|
||||
}
|
||||
@ -4610,7 +4611,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
|
||||
for(i = 0; i < attempts; i++) {
|
||||
if(eerd == E1000_EEPROM_POLL_READ)
|
||||
reg = E1000_READ_REG(hw, EERD);
|
||||
else
|
||||
else
|
||||
reg = E1000_READ_REG(hw, EEWR);
|
||||
|
||||
if(reg & E1000_EEPROM_RW_REG_DONE) {
|
||||
@ -5135,7 +5136,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
|
||||
uint32_t i;
|
||||
uint32_t num_rar_entry;
|
||||
uint32_t num_mta_entry;
|
||||
|
||||
|
||||
DEBUGFUNC("e1000_mc_addr_list_update");
|
||||
|
||||
/* Set the new number of MC addresses that we are being requested to use. */
|
||||
@ -6240,7 +6241,7 @@ e1000_check_polarity(struct e1000_hw *hw,
|
||||
* 1 - Downshift ocured.
|
||||
*
|
||||
* returns: - E1000_ERR_XXX
|
||||
* E1000_SUCCESS
|
||||
* E1000_SUCCESS
|
||||
*
|
||||
* For phy's older then IGP, this function reads the Downshift bit in the Phy
|
||||
* Specific Status register. For IGP phy's, it reads the Downgrade bit in the
|
||||
@ -6255,7 +6256,7 @@ e1000_check_downshift(struct e1000_hw *hw)
|
||||
|
||||
DEBUGFUNC("e1000_check_downshift");
|
||||
|
||||
if(hw->phy_type == e1000_phy_igp ||
|
||||
if(hw->phy_type == e1000_phy_igp ||
|
||||
hw->phy_type == e1000_phy_igp_2) {
|
||||
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
|
||||
&phy_data);
|
||||
@ -6684,8 +6685,8 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
|
||||
|
||||
|
||||
} else {
|
||||
|
||||
phy_data |= IGP02E1000_PM_D0_LPLU;
|
||||
|
||||
phy_data |= IGP02E1000_PM_D0_LPLU;
|
||||
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
@ -6777,7 +6778,7 @@ int32_t
|
||||
e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
|
||||
{
|
||||
uint8_t i;
|
||||
uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
|
||||
uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
|
||||
uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH;
|
||||
|
||||
length = (length >> 2);
|
||||
@ -6796,7 +6797,7 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
|
||||
* and also checks whether the previous command is completed.
|
||||
* It busy waits in case of previous command is not completed.
|
||||
*
|
||||
* returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
|
||||
* returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
|
||||
* timeout
|
||||
* - E1000_SUCCESS for success.
|
||||
****************************************************************************/
|
||||
@ -6820,7 +6821,7 @@ e1000_mng_enable_host_if(struct e1000_hw * hw)
|
||||
msec_delay_irq(1);
|
||||
}
|
||||
|
||||
if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
|
||||
if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
|
||||
DEBUGOUT("Previous command timeout failed .\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -22,6 +22,7 @@
|
||||
|
||||
Contact Information:
|
||||
Linux NICS <linux.nics@intel.com>
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
@ -374,7 +375,7 @@ struct e1000_host_mng_dhcp_cookie{
|
||||
};
|
||||
#endif
|
||||
|
||||
int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
|
||||
int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
|
||||
uint16_t length);
|
||||
boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
|
||||
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
|
||||
@ -1801,7 +1802,7 @@ struct e1000_hw {
|
||||
* value2 = [0..64512], default=4096
|
||||
* value3 = [0..64512], default=0
|
||||
*/
|
||||
|
||||
|
||||
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
|
||||
#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
|
||||
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -22,51 +22,13 @@
|
||||
|
||||
Contact Information:
|
||||
Linux NICS <linux.nics@intel.com>
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#include "e1000.h"
|
||||
|
||||
/* Change Log
|
||||
* 7.0.33 3-Feb-2006
|
||||
* o Added another fix for the pass false carrier bit
|
||||
* 7.0.32 24-Jan-2006
|
||||
* o Need to rebuild with noew version number for the pass false carrier
|
||||
* fix in e1000_hw.c
|
||||
* 7.0.30 18-Jan-2006
|
||||
* o fixup for tso workaround to disable it for pci-x
|
||||
* o fix mem leak on 82542
|
||||
* o fixes for 10 Mb/s connections and incorrect stats
|
||||
* 7.0.28 01/06/2006
|
||||
* o hardware workaround to only set "speed mode" bit for 1G link.
|
||||
* 7.0.26 12/23/2005
|
||||
* o wake on lan support modified for device ID 10B5
|
||||
* o fix dhcp + vlan issue not making it to the iAMT firmware
|
||||
* 7.0.24 12/9/2005
|
||||
* o New hardware support for the Gigabit NIC embedded in the south bridge
|
||||
* o Fixes to the recycling logic (skb->tail) from IBM LTC
|
||||
* 6.3.9 12/16/2005
|
||||
* o incorporate fix for recycled skbs from IBM LTC
|
||||
* 6.3.7 11/18/2005
|
||||
* o Honor eeprom setting for enabling/disabling Wake On Lan
|
||||
* 6.3.5 11/17/2005
|
||||
* o Fix memory leak in rx ring handling for PCI Express adapters
|
||||
* 6.3.4 11/8/05
|
||||
* o Patch from Jesper Juhl to remove redundant NULL checks for kfree
|
||||
* 6.3.2 9/20/05
|
||||
* o Render logic that sets/resets DRV_LOAD as inline functions to
|
||||
* avoid code replication. If f/w is AMT then set DRV_LOAD only when
|
||||
* network interface is open.
|
||||
* o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
|
||||
* o Adjust PBA partioning for Jumbo frames using MTU size and not
|
||||
* rx_buffer_len
|
||||
* 6.3.1 9/19/05
|
||||
* o Use adapter->tx_timeout_factor in Tx Hung Detect logic
|
||||
* (e1000_clean_tx_irq)
|
||||
* o Support for 8086:10B5 device (Quad Port)
|
||||
*/
|
||||
|
||||
char e1000_driver_name[] = "e1000";
|
||||
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
|
||||
#ifndef CONFIG_E1000_NAPI
|
||||
@ -74,9 +36,9 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
|
||||
#else
|
||||
#define DRIVERNAPI "-NAPI"
|
||||
#endif
|
||||
#define DRV_VERSION "7.0.33-k2"DRIVERNAPI
|
||||
#define DRV_VERSION "7.0.38-k4"DRIVERNAPI
|
||||
char e1000_driver_version[] = DRV_VERSION;
|
||||
static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
|
||||
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
|
||||
|
||||
/* e1000_pci_tbl - PCI Device ID Table
|
||||
*
|
||||
@ -208,8 +170,8 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
|
||||
static void e1000_tx_timeout(struct net_device *dev);
|
||||
static void e1000_reset_task(struct net_device *dev);
|
||||
static void e1000_smartspeed(struct e1000_adapter *adapter);
|
||||
static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
|
||||
struct sk_buff *skb);
|
||||
static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
|
||||
struct sk_buff *skb);
|
||||
|
||||
static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
|
||||
static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
|
||||
@ -227,6 +189,16 @@ static void e1000_shutdown(struct pci_dev *pdev);
|
||||
static void e1000_netpoll (struct net_device *netdev);
|
||||
#endif
|
||||
|
||||
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state);
|
||||
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
|
||||
static void e1000_io_resume(struct pci_dev *pdev);
|
||||
|
||||
static struct pci_error_handlers e1000_err_handler = {
|
||||
.error_detected = e1000_io_error_detected,
|
||||
.slot_reset = e1000_io_slot_reset,
|
||||
.resume = e1000_io_resume,
|
||||
};
|
||||
|
||||
static struct pci_driver e1000_driver = {
|
||||
.name = e1000_driver_name,
|
||||
@ -238,7 +210,8 @@ static struct pci_driver e1000_driver = {
|
||||
.suspend = e1000_suspend,
|
||||
.resume = e1000_resume,
|
||||
#endif
|
||||
.shutdown = e1000_shutdown
|
||||
.shutdown = e1000_shutdown,
|
||||
.err_handler = &e1000_err_handler
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
|
||||
@ -293,7 +266,7 @@ module_exit(e1000_exit_module);
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
static inline void
|
||||
static void
|
||||
e1000_irq_disable(struct e1000_adapter *adapter)
|
||||
{
|
||||
atomic_inc(&adapter->irq_sem);
|
||||
@ -307,7 +280,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
static inline void
|
||||
static void
|
||||
e1000_irq_enable(struct e1000_adapter *adapter)
|
||||
{
|
||||
if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
|
||||
@ -348,10 +321,10 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
|
||||
* For ASF and Pass Through versions of f/w this means that the
|
||||
* driver is no longer loaded. For AMT version (only with 82573) i
|
||||
* of the f/w this means that the netowrk i/f is closed.
|
||||
*
|
||||
*
|
||||
**/
|
||||
|
||||
static inline void
|
||||
static void
|
||||
e1000_release_hw_control(struct e1000_adapter *adapter)
|
||||
{
|
||||
uint32_t ctrl_ext;
|
||||
@ -361,6 +334,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
|
||||
switch (adapter->hw.mac_type) {
|
||||
case e1000_82571:
|
||||
case e1000_82572:
|
||||
case e1000_80003es2lan:
|
||||
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
|
||||
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
|
||||
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
|
||||
@ -379,13 +353,13 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
|
||||
* @adapter: address of board private structure
|
||||
*
|
||||
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
|
||||
* For ASF and Pass Through versions of f/w this means that
|
||||
* the driver is loaded. For AMT version (only with 82573)
|
||||
* For ASF and Pass Through versions of f/w this means that
|
||||
* the driver is loaded. For AMT version (only with 82573)
|
||||
* of the f/w this means that the netowrk i/f is open.
|
||||
*
|
||||
*
|
||||
**/
|
||||
|
||||
static inline void
|
||||
static void
|
||||
e1000_get_hw_control(struct e1000_adapter *adapter)
|
||||
{
|
||||
uint32_t ctrl_ext;
|
||||
@ -394,6 +368,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
|
||||
switch (adapter->hw.mac_type) {
|
||||
case e1000_82571:
|
||||
case e1000_82572:
|
||||
case e1000_80003es2lan:
|
||||
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
|
||||
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
|
||||
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
|
||||
@ -421,7 +396,7 @@ e1000_up(struct e1000_adapter *adapter)
|
||||
uint16_t mii_reg;
|
||||
e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
|
||||
if (mii_reg & MII_CR_POWER_DOWN)
|
||||
e1000_phy_reset(&adapter->hw);
|
||||
e1000_phy_hw_reset(&adapter->hw);
|
||||
}
|
||||
|
||||
e1000_set_multi(netdev);
|
||||
@ -711,8 +686,8 @@ e1000_probe(struct pci_dev *pdev,
|
||||
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
|
||||
|
||||
/* if ksp3, indicate if it's port a being setup */
|
||||
if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
|
||||
e1000_ksp3_port_a == 0)
|
||||
if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
|
||||
e1000_ksp3_port_a == 0)
|
||||
adapter->ksp3_port_a = 1;
|
||||
e1000_ksp3_port_a++;
|
||||
/* Reset for multiple KP3 adapters */
|
||||
@ -740,9 +715,9 @@ e1000_probe(struct pci_dev *pdev,
|
||||
if (pci_using_dac)
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
|
||||
/* hard_start_xmit is safe against parallel locking */
|
||||
netdev->features |= NETIF_F_LLTX;
|
||||
|
||||
/* hard_start_xmit is safe against parallel locking */
|
||||
netdev->features |= NETIF_F_LLTX;
|
||||
|
||||
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
|
||||
|
||||
/* before reading the EEPROM, reset the controller to
|
||||
@ -972,8 +947,8 @@ e1000_sw_init(struct e1000_adapter *adapter)
|
||||
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
|
||||
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
|
||||
adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
|
||||
adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
|
||||
adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
|
||||
hw->max_frame_size = netdev->mtu +
|
||||
ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
|
||||
hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
|
||||
@ -1181,7 +1156,7 @@ e1000_close(struct net_device *netdev)
|
||||
* @start: address of beginning of memory
|
||||
* @len: length of memory
|
||||
**/
|
||||
static inline boolean_t
|
||||
static boolean_t
|
||||
e1000_check_64k_bound(struct e1000_adapter *adapter,
|
||||
void *start, unsigned long len)
|
||||
{
|
||||
@ -1599,14 +1574,21 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
|
||||
rctl |= E1000_RCTL_LPE;
|
||||
|
||||
/* Setup buffer sizes */
|
||||
if (adapter->hw.mac_type >= e1000_82571) {
|
||||
/* We can now specify buffers in 1K increments.
|
||||
* BSIZE and BSEX are ignored in this case. */
|
||||
rctl |= adapter->rx_buffer_len << 0x11;
|
||||
} else {
|
||||
rctl &= ~E1000_RCTL_SZ_4096;
|
||||
rctl |= E1000_RCTL_BSEX;
|
||||
switch (adapter->rx_buffer_len) {
|
||||
rctl &= ~E1000_RCTL_SZ_4096;
|
||||
rctl |= E1000_RCTL_BSEX;
|
||||
switch (adapter->rx_buffer_len) {
|
||||
case E1000_RXBUFFER_256:
|
||||
rctl |= E1000_RCTL_SZ_256;
|
||||
rctl &= ~E1000_RCTL_BSEX;
|
||||
break;
|
||||
case E1000_RXBUFFER_512:
|
||||
rctl |= E1000_RCTL_SZ_512;
|
||||
rctl &= ~E1000_RCTL_BSEX;
|
||||
break;
|
||||
case E1000_RXBUFFER_1024:
|
||||
rctl |= E1000_RCTL_SZ_1024;
|
||||
rctl &= ~E1000_RCTL_BSEX;
|
||||
break;
|
||||
case E1000_RXBUFFER_2048:
|
||||
default:
|
||||
rctl |= E1000_RCTL_SZ_2048;
|
||||
@ -1621,7 +1603,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
|
||||
case E1000_RXBUFFER_16384:
|
||||
rctl |= E1000_RCTL_SZ_16384;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
|
||||
@ -1715,7 +1696,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
|
||||
if (hw->mac_type >= e1000_82571) {
|
||||
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
|
||||
/* Reset delay timers after every interrupt */
|
||||
ctrl_ext |= E1000_CTRL_EXT_CANC;
|
||||
ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
|
||||
#ifdef CONFIG_E1000_NAPI
|
||||
/* Auto-Mask interrupts upon ICR read. */
|
||||
ctrl_ext |= E1000_CTRL_EXT_IAME;
|
||||
@ -1807,7 +1788,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
|
||||
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
|
||||
}
|
||||
|
||||
static inline void
|
||||
static void
|
||||
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
|
||||
struct e1000_buffer *buffer_info)
|
||||
{
|
||||
@ -2247,6 +2228,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
|
||||
|
||||
if (link) {
|
||||
if (!netif_carrier_ok(netdev)) {
|
||||
boolean_t txb2b = 1;
|
||||
e1000_get_speed_and_duplex(&adapter->hw,
|
||||
&adapter->link_speed,
|
||||
&adapter->link_duplex);
|
||||
@ -2260,23 +2242,22 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
|
||||
* and adjust the timeout factor */
|
||||
netdev->tx_queue_len = adapter->tx_queue_len;
|
||||
adapter->tx_timeout_factor = 1;
|
||||
adapter->txb2b = 1;
|
||||
switch (adapter->link_speed) {
|
||||
case SPEED_10:
|
||||
adapter->txb2b = 0;
|
||||
txb2b = 0;
|
||||
netdev->tx_queue_len = 10;
|
||||
adapter->tx_timeout_factor = 8;
|
||||
break;
|
||||
case SPEED_100:
|
||||
adapter->txb2b = 0;
|
||||
txb2b = 0;
|
||||
netdev->tx_queue_len = 100;
|
||||
/* maybe add some timeout factor ? */
|
||||
break;
|
||||
}
|
||||
|
||||
if ((adapter->hw.mac_type == e1000_82571 ||
|
||||
if ((adapter->hw.mac_type == e1000_82571 ||
|
||||
adapter->hw.mac_type == e1000_82572) &&
|
||||
adapter->txb2b == 0) {
|
||||
txb2b == 0) {
|
||||
#define SPEED_MODE_BIT (1 << 21)
|
||||
uint32_t tarc0;
|
||||
tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
|
||||
@ -2400,7 +2381,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
|
||||
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
|
||||
#define E1000_TX_FLAGS_VLAN_SHIFT 16
|
||||
|
||||
static inline int
|
||||
static int
|
||||
e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
@ -2422,7 +2403,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
|
||||
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
|
||||
mss = skb_shinfo(skb)->tso_size;
|
||||
if (skb->protocol == ntohs(ETH_P_IP)) {
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
skb->nh.iph->tot_len = 0;
|
||||
skb->nh.iph->check = 0;
|
||||
skb->h.th->check =
|
||||
@ -2480,7 +2461,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static inline boolean_t
|
||||
static boolean_t
|
||||
e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
@ -2516,7 +2497,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
#define E1000_MAX_TXD_PWR 12
|
||||
#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
|
||||
|
||||
static inline int
|
||||
static int
|
||||
e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
|
||||
unsigned int nr_frags, unsigned int mss)
|
||||
@ -2625,7 +2606,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline void
|
||||
static void
|
||||
e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
int tx_flags, int count)
|
||||
{
|
||||
@ -2689,7 +2670,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||
#define E1000_FIFO_HDR 0x10
|
||||
#define E1000_82547_PAD_LEN 0x3E0
|
||||
|
||||
static inline int
|
||||
static int
|
||||
e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
|
||||
{
|
||||
uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
|
||||
@ -2716,7 +2697,7 @@ no_fifo_stall_required:
|
||||
}
|
||||
|
||||
#define MINIMUM_DHCP_PACKET_SIZE 282
|
||||
static inline int
|
||||
static int
|
||||
e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
@ -2764,7 +2745,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
unsigned int nr_frags = 0;
|
||||
unsigned int mss = 0;
|
||||
int count = 0;
|
||||
int tso;
|
||||
int tso;
|
||||
unsigned int f;
|
||||
len -= skb->data_len;
|
||||
|
||||
@ -2777,7 +2758,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
#ifdef NETIF_F_TSO
|
||||
mss = skb_shinfo(skb)->tso_size;
|
||||
/* The controller does a simple calculation to
|
||||
/* The controller does a simple calculation to
|
||||
* make sure there is enough room in the FIFO before
|
||||
* initiating the DMA for each buffer. The calc is:
|
||||
* 4 = ceil(buffer len/mss). To make sure we don't
|
||||
@ -2800,7 +2781,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
case e1000_82573:
|
||||
pull_size = min((unsigned int)4, skb->data_len);
|
||||
if (!__pskb_pull_tail(skb, pull_size)) {
|
||||
printk(KERN_ERR
|
||||
printk(KERN_ERR
|
||||
"__pskb_pull_tail failed.\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
@ -2901,7 +2882,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
/* Old method was to assume IPv4 packet by default if TSO was enabled.
|
||||
* 82571 hardware supports TSO capabilities for IPv6 as well...
|
||||
* no longer assume, we must. */
|
||||
if (likely(skb->protocol == ntohs(ETH_P_IP)))
|
||||
if (likely(skb->protocol == htons(ETH_P_IP)))
|
||||
tx_flags |= E1000_TX_FLAGS_IPV4;
|
||||
|
||||
e1000_tx_queue(adapter, tx_ring, tx_flags,
|
||||
@ -2982,8 +2963,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
|
||||
/* Adapter-specific max frame size limits. */
|
||||
switch (adapter->hw.mac_type) {
|
||||
case e1000_82542_rev2_0:
|
||||
case e1000_82542_rev2_1:
|
||||
case e1000_undefined ... e1000_82542_rev2_1:
|
||||
if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
|
||||
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
|
||||
return -EINVAL;
|
||||
@ -3017,27 +2997,32 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
break;
|
||||
}
|
||||
|
||||
/* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
|
||||
* means we reserve 2 more, this pushes us to allocate from the next
|
||||
* larger slab size
|
||||
* i.e. RXBUFFER_2048 --> size-4096 slab */
|
||||
|
||||
if (adapter->hw.mac_type > e1000_82547_rev_2) {
|
||||
adapter->rx_buffer_len = max_frame;
|
||||
E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
|
||||
} else {
|
||||
if(unlikely((adapter->hw.mac_type < e1000_82543) &&
|
||||
(max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
|
||||
DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
|
||||
"on 82542\n");
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if(max_frame <= E1000_RXBUFFER_2048)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
|
||||
else if(max_frame <= E1000_RXBUFFER_4096)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_4096;
|
||||
else if(max_frame <= E1000_RXBUFFER_8192)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_8192;
|
||||
else if(max_frame <= E1000_RXBUFFER_16384)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
|
||||
}
|
||||
}
|
||||
if (max_frame <= E1000_RXBUFFER_256)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_256;
|
||||
else if (max_frame <= E1000_RXBUFFER_512)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_512;
|
||||
else if (max_frame <= E1000_RXBUFFER_1024)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_1024;
|
||||
else if (max_frame <= E1000_RXBUFFER_2048)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
|
||||
else if (max_frame <= E1000_RXBUFFER_4096)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_4096;
|
||||
else if (max_frame <= E1000_RXBUFFER_8192)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_8192;
|
||||
else if (max_frame <= E1000_RXBUFFER_16384)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
|
||||
|
||||
/* adjust allocation if LPE protects us, and we aren't using SBP */
|
||||
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
|
||||
if (!adapter->hw.tbi_compatibility_on &&
|
||||
((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
|
||||
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
|
||||
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
|
||||
|
||||
netdev->mtu = new_mtu;
|
||||
|
||||
@ -3060,11 +3045,21 @@ void
|
||||
e1000_update_stats(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
unsigned long flags;
|
||||
uint16_t phy_tmp;
|
||||
|
||||
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
|
||||
|
||||
/*
|
||||
* Prevent stats update while adapter is being reset, or if the pci
|
||||
* connection is down.
|
||||
*/
|
||||
if (adapter->link_speed == 0)
|
||||
return;
|
||||
if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&adapter->stats_lock, flags);
|
||||
|
||||
/* these counters are modified from e1000_adjust_tbi_stats,
|
||||
@ -3165,7 +3160,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
|
||||
adapter->stats.crcerrs + adapter->stats.algnerrc +
|
||||
adapter->stats.ruc + adapter->stats.roc +
|
||||
adapter->stats.cexterr;
|
||||
adapter->net_stats.rx_dropped = 0;
|
||||
adapter->net_stats.rx_length_errors = adapter->stats.ruc +
|
||||
adapter->stats.roc;
|
||||
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
|
||||
@ -3391,13 +3385,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
|
||||
|
||||
tx_ring->next_to_clean = i;
|
||||
|
||||
spin_lock(&tx_ring->tx_lock);
|
||||
|
||||
#define TX_WAKE_THRESHOLD 32
|
||||
if (unlikely(cleaned && netif_queue_stopped(netdev) &&
|
||||
netif_carrier_ok(netdev)))
|
||||
netif_wake_queue(netdev);
|
||||
|
||||
spin_unlock(&tx_ring->tx_lock);
|
||||
netif_carrier_ok(netdev))) {
|
||||
spin_lock(&tx_ring->tx_lock);
|
||||
if (netif_queue_stopped(netdev) &&
|
||||
(E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
|
||||
netif_wake_queue(netdev);
|
||||
spin_unlock(&tx_ring->tx_lock);
|
||||
}
|
||||
|
||||
if (adapter->detect_tx_hung) {
|
||||
/* Detect a transmit hang in hardware, this serializes the
|
||||
@ -3445,7 +3441,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
|
||||
* @sk_buff: socket buffer with received data
|
||||
**/
|
||||
|
||||
static inline void
|
||||
static void
|
||||
e1000_rx_checksum(struct e1000_adapter *adapter,
|
||||
uint32_t status_err, uint32_t csum,
|
||||
struct sk_buff *skb)
|
||||
@ -3567,7 +3563,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
||||
flags);
|
||||
length--;
|
||||
} else {
|
||||
dev_kfree_skb_irq(skb);
|
||||
/* recycle */
|
||||
buffer_info->skb = skb;
|
||||
goto next_desc;
|
||||
}
|
||||
}
|
||||
@ -3675,6 +3672,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
|
||||
i = rx_ring->next_to_clean;
|
||||
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
|
||||
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
|
||||
buffer_info = &rx_ring->buffer_info[i];
|
||||
|
||||
while (staterr & E1000_RXD_STAT_DD) {
|
||||
buffer_info = &rx_ring->buffer_info[i];
|
||||
@ -3733,9 +3731,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
|
||||
|
||||
/* page alloc/put takes too long and effects small packet
|
||||
* throughput, so unsplit small packets and save the alloc/put*/
|
||||
if (l1 && ((length + l1) < E1000_CB_LENGTH)) {
|
||||
if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) {
|
||||
u8 *vaddr;
|
||||
/* there is no documentation about how to call
|
||||
/* there is no documentation about how to call
|
||||
* kmap_atomic, so we can't hold the mapping
|
||||
* very long */
|
||||
pci_dma_sync_single_for_cpu(pdev,
|
||||
@ -4155,7 +4153,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
spin_unlock_irqrestore(&adapter->stats_lock, flags);
|
||||
return -EIO;
|
||||
}
|
||||
if (adapter->hw.phy_type == e1000_media_type_copper) {
|
||||
if (adapter->hw.media_type == e1000_media_type_copper) {
|
||||
switch (data->reg_num) {
|
||||
case PHY_CTRL:
|
||||
if (mii_reg & MII_CR_POWER_DOWN)
|
||||
@ -4514,21 +4512,13 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
|
||||
E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
|
||||
E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
|
||||
retval = pci_enable_wake(pdev, PCI_D3hot, 1);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
|
||||
retval = pci_enable_wake(pdev, PCI_D3cold, 1);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
|
||||
pci_enable_wake(pdev, PCI_D3hot, 1);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 1);
|
||||
} else {
|
||||
E1000_WRITE_REG(&adapter->hw, WUC, 0);
|
||||
E1000_WRITE_REG(&adapter->hw, WUFC, 0);
|
||||
retval = pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
|
||||
retval = pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
}
|
||||
|
||||
if (adapter->hw.mac_type >= e1000_82540 &&
|
||||
@ -4537,13 +4527,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
if (manc & E1000_MANC_SMBUS_EN) {
|
||||
manc |= E1000_MANC_ARP_EN;
|
||||
E1000_WRITE_REG(&adapter->hw, MANC, manc);
|
||||
retval = pci_enable_wake(pdev, PCI_D3hot, 1);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
|
||||
retval = pci_enable_wake(pdev, PCI_D3cold, 1);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR,
|
||||
"Error enabling D3 cold wake\n");
|
||||
pci_enable_wake(pdev, PCI_D3hot, 1);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4553,9 +4538,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
|
||||
pci_disable_device(pdev);
|
||||
|
||||
retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error in setting power state\n");
|
||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -4566,22 +4549,15 @@ e1000_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
int retval;
|
||||
uint32_t manc, ret_val;
|
||||
|
||||
retval = pci_set_power_state(pdev, PCI_D0);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error in setting power state\n");
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
e1000_pci_restore_state(adapter);
|
||||
ret_val = pci_enable_device(pdev);
|
||||
pci_set_master(pdev);
|
||||
|
||||
retval = pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
|
||||
retval = pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
if (retval)
|
||||
DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
|
||||
e1000_reset(adapter);
|
||||
E1000_WRITE_REG(&adapter->hw, WUS, ~0);
|
||||
@ -4635,4 +4611,101 @@ e1000_netpoll(struct net_device *netdev)
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* e1000_io_error_detected - called when PCI error is detected
|
||||
* @pdev: Pointer to PCI device
|
||||
* @state: The current pci conneection state
|
||||
*
|
||||
* This function is called after a PCI bus error affecting
|
||||
* this device has been detected.
|
||||
*/
|
||||
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev->priv;
|
||||
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (netif_running(netdev))
|
||||
e1000_down(adapter);
|
||||
|
||||
/* Request a slot slot reset. */
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_io_slot_reset - called after the pci bus has been reset.
|
||||
* @pdev: Pointer to PCI device
|
||||
*
|
||||
* Restart the card from scratch, as if from a cold-boot. Implementation
|
||||
* resembles the first-half of the e1000_resume routine.
|
||||
*/
|
||||
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev->priv;
|
||||
|
||||
if (pci_enable_device(pdev)) {
|
||||
printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
pci_set_master(pdev);
|
||||
|
||||
pci_enable_wake(pdev, 3, 0);
|
||||
pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
|
||||
|
||||
/* Perform card reset only on one instance of the card */
|
||||
if (PCI_FUNC (pdev->devfn) != 0)
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
|
||||
e1000_reset(adapter);
|
||||
E1000_WRITE_REG(&adapter->hw, WUS, ~0);
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_io_resume - called when traffic can start flowing again.
|
||||
* @pdev: Pointer to PCI device
|
||||
*
|
||||
* This callback is called when the error recovery driver tells us that
|
||||
* its OK to resume normal operation. Implementation resembles the
|
||||
* second-half of the e1000_resume routine.
|
||||
*/
|
||||
static void e1000_io_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev->priv;
|
||||
uint32_t manc, swsm;
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
if (e1000_up(adapter)) {
|
||||
printk("e1000: can't bring device back up after reset\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
netif_device_attach(netdev);
|
||||
|
||||
if (adapter->hw.mac_type >= e1000_82540 &&
|
||||
adapter->hw.media_type == e1000_media_type_copper) {
|
||||
manc = E1000_READ_REG(&adapter->hw, MANC);
|
||||
manc &= ~(E1000_MANC_ARP_EN);
|
||||
E1000_WRITE_REG(&adapter->hw, MANC, manc);
|
||||
}
|
||||
|
||||
switch (adapter->hw.mac_type) {
|
||||
case e1000_82573:
|
||||
swsm = E1000_READ_REG(&adapter->hw, SWSM);
|
||||
E1000_WRITE_REG(&adapter->hw, SWSM,
|
||||
swsm | E1000_SWSM_DRV_LOAD);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (netif_running(netdev))
|
||||
mod_timer(&adapter->watchdog_timer, jiffies);
|
||||
}
|
||||
|
||||
/* e1000_main.c */
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -22,6 +22,7 @@
|
||||
|
||||
Contact Information:
|
||||
Linux NICS <linux.nics@intel.com>
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -22,6 +22,7 @@
|
||||
|
||||
Contact Information:
|
||||
Linux NICS <linux.nics@intel.com>
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
@ -21,15 +21,15 @@
|
||||
http://www.scyld.com/network/epic100.html
|
||||
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
Linux kernel-specific changes:
|
||||
|
||||
|
||||
LK1.1.2 (jgarzik):
|
||||
* Merge becker version 1.09 (4/08/2000)
|
||||
|
||||
LK1.1.3:
|
||||
* Major bugfix to 1.09 driver (Francis Romieu)
|
||||
|
||||
|
||||
LK1.1.4 (jgarzik):
|
||||
* Merge becker test version 1.09 (5/29/2000)
|
||||
|
||||
@ -66,7 +66,7 @@
|
||||
LK1.1.14 (Kryzsztof Halasa):
|
||||
* fix spurious bad initializations
|
||||
* pound phy a la SMSC's app note on the subject
|
||||
|
||||
|
||||
AC1.1.14ac
|
||||
* fix power up/down for ethtool that broke in 1.11
|
||||
|
||||
@ -244,7 +244,7 @@ static struct pci_device_id epic_pci_tbl[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
|
||||
|
||||
|
||||
|
||||
#ifndef USE_IO_OPS
|
||||
#undef inb
|
||||
#undef inw
|
||||
@ -370,7 +370,7 @@ static int epic_close(struct net_device *dev);
|
||||
static struct net_device_stats *epic_get_stats(struct net_device *dev);
|
||||
static void set_rx_mode(struct net_device *dev);
|
||||
|
||||
|
||||
|
||||
|
||||
static int __devinit epic_init_one (struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
@ -392,9 +392,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
|
||||
printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
|
||||
version, version2, version3);
|
||||
#endif
|
||||
|
||||
|
||||
card_idx++;
|
||||
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -405,7 +405,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
|
||||
ret = -ENODEV;
|
||||
goto err_out_disable;
|
||||
}
|
||||
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
ret = pci_request_regions(pdev, DRV_NAME);
|
||||
@ -498,7 +498,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
|
||||
ep->pci_dev = pdev;
|
||||
ep->chip_id = chip_idx;
|
||||
ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
|
||||
ep->irq_mask =
|
||||
ep->irq_mask =
|
||||
(ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
|
||||
| CntFull | TxUnderrun | EpicNapiEvent;
|
||||
|
||||
@ -587,7 +587,7 @@ err_out_disable:
|
||||
pci_disable_device(pdev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/* Serial EEPROM section. */
|
||||
|
||||
/* EEPROM_Ctrl bits. */
|
||||
@ -709,7 +709,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
|
||||
|
||||
outw(value, ioaddr + MIIData);
|
||||
outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
|
||||
for (i = 10000; i > 0; i--) {
|
||||
for (i = 10000; i > 0; i--) {
|
||||
barrier();
|
||||
if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
|
||||
break;
|
||||
@ -717,7 +717,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int epic_open(struct net_device *dev)
|
||||
{
|
||||
struct epic_private *ep = dev->priv;
|
||||
@ -760,7 +760,7 @@ static int epic_open(struct net_device *dev)
|
||||
#endif
|
||||
|
||||
udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
|
||||
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
|
||||
|
||||
@ -803,7 +803,7 @@ static int epic_open(struct net_device *dev)
|
||||
|
||||
/* Enable interrupts by setting the interrupt mask. */
|
||||
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
|
||||
| CntFull | TxUnderrun
|
||||
| CntFull | TxUnderrun
|
||||
| RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
|
||||
|
||||
if (debug > 1)
|
||||
@ -831,7 +831,7 @@ static void epic_pause(struct net_device *dev)
|
||||
struct epic_private *ep = dev->priv;
|
||||
|
||||
netif_stop_queue (dev);
|
||||
|
||||
|
||||
/* Disable interrupts by clearing the interrupt mask. */
|
||||
outl(0x00000000, ioaddr + INTMASK);
|
||||
/* Stop the chip's Tx and Rx DMA processes. */
|
||||
@ -987,7 +987,7 @@ static void epic_init_ring(struct net_device *dev)
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
ep->rx_ring[i].rxstatus = 0;
|
||||
ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
|
||||
ep->rx_ring[i].next = ep->rx_ring_dma +
|
||||
ep->rx_ring[i].next = ep->rx_ring_dma +
|
||||
(i+1)*sizeof(struct epic_rx_desc);
|
||||
ep->rx_skbuff[i] = NULL;
|
||||
}
|
||||
@ -1002,7 +1002,7 @@ static void epic_init_ring(struct net_device *dev)
|
||||
break;
|
||||
skb->dev = dev; /* Mark as being used by this device. */
|
||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
||||
ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
|
||||
ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
|
||||
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
|
||||
}
|
||||
@ -1013,7 +1013,7 @@ static void epic_init_ring(struct net_device *dev)
|
||||
for (i = 0; i < TX_RING_SIZE; i++) {
|
||||
ep->tx_skbuff[i] = NULL;
|
||||
ep->tx_ring[i].txstatus = 0x0000;
|
||||
ep->tx_ring[i].next = ep->tx_ring_dma +
|
||||
ep->tx_ring[i].next = ep->tx_ring_dma +
|
||||
(i+1)*sizeof(struct epic_tx_desc);
|
||||
}
|
||||
ep->tx_ring[i-1].next = ep->tx_ring_dma;
|
||||
@ -1026,7 +1026,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
int entry, free_count;
|
||||
u32 ctrl_word;
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
if (skb->len < ETH_ZLEN) {
|
||||
skb = skb_padto(skb, ETH_ZLEN);
|
||||
if (skb == NULL)
|
||||
@ -1042,7 +1042,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
entry = ep->cur_tx % TX_RING_SIZE;
|
||||
|
||||
ep->tx_skbuff[entry] = skb;
|
||||
ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
|
||||
ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
|
||||
skb->len, PCI_DMA_TODEVICE);
|
||||
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
|
||||
ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
|
||||
@ -1126,7 +1126,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
|
||||
|
||||
/* Free the original skb. */
|
||||
skb = ep->tx_skbuff[entry];
|
||||
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
|
||||
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
|
||||
skb->len, PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb_irq(skb);
|
||||
ep->tx_skbuff[entry] = NULL;
|
||||
@ -1281,8 +1281,8 @@ static int epic_rx(struct net_device *dev, int budget)
|
||||
ep->rx_buf_sz,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
} else {
|
||||
pci_unmap_single(ep->pci_dev,
|
||||
ep->rx_ring[entry].bufaddr,
|
||||
pci_unmap_single(ep->pci_dev,
|
||||
ep->rx_ring[entry].bufaddr,
|
||||
ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
skb_put(skb = ep->rx_skbuff[entry], pkt_len);
|
||||
ep->rx_skbuff[entry] = NULL;
|
||||
@ -1307,7 +1307,7 @@ static int epic_rx(struct net_device *dev, int budget)
|
||||
break;
|
||||
skb->dev = dev; /* Mark as being used by this device. */
|
||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||
ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
|
||||
ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
|
||||
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
work_done++;
|
||||
}
|
||||
@ -1403,7 +1403,7 @@ static int epic_close(struct net_device *dev)
|
||||
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
|
||||
ep->rx_ring[i].buflength = 0;
|
||||
if (skb) {
|
||||
pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
|
||||
pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
|
||||
ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
@ -1414,7 +1414,7 @@ static int epic_close(struct net_device *dev)
|
||||
ep->tx_skbuff[i] = NULL;
|
||||
if (!skb)
|
||||
continue;
|
||||
pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
|
||||
pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
|
||||
skb->len, PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
@ -1607,7 +1607,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
struct epic_private *ep = dev->priv;
|
||||
|
||||
|
||||
pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
|
||||
pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
|
||||
unregister_netdev(dev);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -446,7 +446,7 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
/* This is set up so that only a single autoprobe takes place per call.
|
||||
ISA device autoprobes on a running machine are not recommended. */
|
||||
int
|
||||
int __init
|
||||
init_module(void)
|
||||
{
|
||||
struct net_device *dev;
|
||||
|
@ -384,7 +384,7 @@ hp_block_output(struct net_device *dev, int count,
|
||||
}
|
||||
|
||||
/* This function resets the ethercard if something screws up. */
|
||||
static void
|
||||
static void __init
|
||||
hp_init_card(struct net_device *dev)
|
||||
{
|
||||
int irq = dev->irq;
|
||||
@ -409,7 +409,7 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
/* This is set up so that only a single autoprobe takes place per call.
|
||||
ISA device autoprobes on a running machine are not recommended. */
|
||||
int
|
||||
int __init
|
||||
init_module(void)
|
||||
{
|
||||
struct net_device *dev;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*
|
||||
/*
|
||||
net-3-driver for the IBM LAN Adapter/A
|
||||
|
||||
This is an extension to the Linux operating system, and is covered by the
|
||||
@ -11,9 +11,9 @@ This driver is based both on the SK_MCA driver, which is itself based on the
|
||||
SK_G16 and 3C523 driver.
|
||||
|
||||
paper sources:
|
||||
'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
|
||||
'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
|
||||
Hans-Peter Messmer for the basic Microchannel stuff
|
||||
|
||||
|
||||
'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
|
||||
for help on Ethernet driver programming
|
||||
|
||||
@ -27,14 +27,14 @@ paper sources:
|
||||
|
||||
special acknowledgements to:
|
||||
- Bob Eager for helping me out with documentation from IBM
|
||||
- Jim Shorney for his endless patience with me while I was using
|
||||
- Jim Shorney for his endless patience with me while I was using
|
||||
him as a beta tester to trace down the address filter bug ;-)
|
||||
|
||||
Missing things:
|
||||
|
||||
-> set debug level via ioctl instead of compile-time switches
|
||||
-> I didn't follow the development of the 2.1.x kernels, so my
|
||||
assumptions about which things changed with which kernel version
|
||||
assumptions about which things changed with which kernel version
|
||||
are probably nonsense
|
||||
|
||||
History:
|
||||
@ -275,7 +275,7 @@ static void InitDscrs(struct net_device *dev)
|
||||
priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
|
||||
priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
|
||||
priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
|
||||
|
||||
|
||||
for (z = 0; z < priv->rxbufcnt; z++) {
|
||||
rra.startlo = baddr;
|
||||
rra.starthi = 0;
|
||||
@ -570,7 +570,7 @@ static void irqrx_handler(struct net_device *dev)
|
||||
lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
|
||||
memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
|
||||
|
||||
/* iron out upper word halves of fields we use - SONIC will duplicate
|
||||
/* iron out upper word halves of fields we use - SONIC will duplicate
|
||||
bits 0..15 to 16..31 */
|
||||
|
||||
rda.status &= 0xffff;
|
||||
@ -836,9 +836,9 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
|
||||
memcpy_toio(priv->base + baddr, skb->data, skb->len);
|
||||
|
||||
/* copy filler into RAM - in case we're filling up...
|
||||
/* copy filler into RAM - in case we're filling up...
|
||||
we're filling a bit more than necessary, but that doesn't harm
|
||||
since the buffer is far larger...
|
||||
since the buffer is far larger...
|
||||
Sorry Linus for the filler string but I couldn't resist ;-) */
|
||||
|
||||
if (tmplen > skb->len) {
|
||||
@ -952,7 +952,7 @@ static int ibmlana_probe(struct net_device *dev)
|
||||
priv->realirq = irq;
|
||||
priv->medium = medium;
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
|
||||
|
||||
/* set base + irq for this device (irq not allocated so far) */
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
/* media enumeration - defined in a way that it fits onto the LAN/A's
|
||||
POS registers... */
|
||||
|
||||
typedef enum {
|
||||
typedef enum {
|
||||
Media_10BaseT, Media_10Base5,
|
||||
Media_Unknown, Media_10Base2, Media_Count
|
||||
} ibmlana_medium;
|
||||
@ -27,7 +27,7 @@ typedef enum {
|
||||
typedef struct {
|
||||
unsigned int slot; /* MCA-Slot-# */
|
||||
struct net_device_stats stat; /* packet statistics */
|
||||
int realirq; /* memorizes actual IRQ, even when
|
||||
int realirq; /* memorizes actual IRQ, even when
|
||||
currently not allocated */
|
||||
ibmlana_medium medium; /* physical cannector */
|
||||
u32 tdastart, txbufstart, /* addresses */
|
||||
@ -41,7 +41,7 @@ typedef struct {
|
||||
spinlock_t lock;
|
||||
} ibmlana_priv;
|
||||
|
||||
/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
|
||||
/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
|
||||
a full 64K I/O range... */
|
||||
|
||||
#define IBM_LANA_IORANGE 0xa0
|
||||
|
@ -24,7 +24,7 @@
|
||||
/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
|
||||
/* option of the RS/6000 Platform Architechture to interface with virtual */
|
||||
/* ethernet NICs that are presented to the partition by the hypervisor. */
|
||||
/* */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
/*
|
||||
TODO:
|
||||
@ -79,7 +79,7 @@
|
||||
#else
|
||||
#define ibmveth_debug_printk_no_adapter(fmt, args...)
|
||||
#define ibmveth_debug_printk(fmt, args...)
|
||||
#define ibmveth_assert(expr)
|
||||
#define ibmveth_assert(expr)
|
||||
#endif
|
||||
|
||||
static int ibmveth_open(struct net_device *dev);
|
||||
@ -96,6 +96,7 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
|
||||
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
|
||||
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
|
||||
static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
|
||||
static struct kobj_type ktype_veth_pool;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
#define IBMVETH_PROC_DIR "net/ibmveth"
|
||||
@ -133,12 +134,13 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
|
||||
}
|
||||
|
||||
/* setup the initial settings for a buffer pool */
|
||||
static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size)
|
||||
static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
|
||||
{
|
||||
pool->size = pool_size;
|
||||
pool->index = pool_index;
|
||||
pool->buff_size = buff_size;
|
||||
pool->threshold = pool_size / 2;
|
||||
pool->active = pool_active;
|
||||
}
|
||||
|
||||
/* allocate and setup an buffer pool - called during open */
|
||||
@ -146,13 +148,13 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
|
||||
{
|
||||
int i;
|
||||
|
||||
pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
|
||||
pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
|
||||
|
||||
if(!pool->free_map) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
|
||||
pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
|
||||
if(!pool->dma_addr) {
|
||||
kfree(pool->free_map);
|
||||
pool->free_map = NULL;
|
||||
@ -180,7 +182,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
|
||||
atomic_set(&pool->available, 0);
|
||||
pool->producer_index = 0;
|
||||
pool->consumer_index = 0;
|
||||
pool->active = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -214,7 +215,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
|
||||
|
||||
free_index = pool->consumer_index++ % pool->size;
|
||||
index = pool->free_map[free_index];
|
||||
|
||||
|
||||
ibmveth_assert(index != IBM_VETH_INVALID_MAP);
|
||||
ibmveth_assert(pool->skbuff[index] == NULL);
|
||||
|
||||
@ -231,10 +232,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
|
||||
desc.desc = 0;
|
||||
desc.fields.valid = 1;
|
||||
desc.fields.length = pool->buff_size;
|
||||
desc.fields.address = dma_addr;
|
||||
desc.fields.address = dma_addr;
|
||||
|
||||
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
|
||||
|
||||
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
pool->free_map[free_index] = index;
|
||||
pool->skbuff[index] = NULL;
|
||||
@ -250,13 +251,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
|
||||
adapter->replenish_add_buff_success++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
mb();
|
||||
atomic_add(buffers_added, &(pool->available));
|
||||
}
|
||||
|
||||
/* replenish routine */
|
||||
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
|
||||
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -264,7 +265,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
|
||||
|
||||
for(i = 0; i < IbmVethNumBufferPools; i++)
|
||||
if(adapter->rx_buff_pool[i].active)
|
||||
ibmveth_replenish_buffer_pool(adapter,
|
||||
ibmveth_replenish_buffer_pool(adapter,
|
||||
&adapter->rx_buff_pool[i]);
|
||||
|
||||
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
|
||||
@ -301,7 +302,6 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
|
||||
kfree(pool->skbuff);
|
||||
pool->skbuff = NULL;
|
||||
}
|
||||
pool->active = 0;
|
||||
}
|
||||
|
||||
/* remove a buffer from a pool */
|
||||
@ -372,7 +372,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
|
||||
desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
|
||||
|
||||
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
|
||||
|
||||
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
|
||||
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
|
||||
@ -407,7 +407,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
|
||||
}
|
||||
free_page((unsigned long)adapter->buffer_list_addr);
|
||||
adapter->buffer_list_addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if(adapter->filter_list_addr != NULL) {
|
||||
if(!dma_mapping_error(adapter->filter_list_dma)) {
|
||||
@ -433,7 +433,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
|
||||
}
|
||||
|
||||
for(i = 0; i<IbmVethNumBufferPools; i++)
|
||||
ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
|
||||
if (adapter->rx_buff_pool[i].active)
|
||||
ibmveth_free_buffer_pool(adapter,
|
||||
&adapter->rx_buff_pool[i]);
|
||||
}
|
||||
|
||||
static int ibmveth_open(struct net_device *netdev)
|
||||
@ -450,10 +452,10 @@ static int ibmveth_open(struct net_device *netdev)
|
||||
|
||||
for(i = 0; i<IbmVethNumBufferPools; i++)
|
||||
rxq_entries += adapter->rx_buff_pool[i].size;
|
||||
|
||||
|
||||
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
|
||||
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
|
||||
|
||||
|
||||
if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
|
||||
ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
|
||||
ibmveth_cleanup(adapter);
|
||||
@ -489,9 +491,6 @@ static int ibmveth_open(struct net_device *netdev)
|
||||
adapter->rx_queue.num_slots = rxq_entries;
|
||||
adapter->rx_queue.toggle = 1;
|
||||
|
||||
/* call change_mtu to init the buffer pools based in initial mtu */
|
||||
ibmveth_change_mtu(netdev, netdev->mtu);
|
||||
|
||||
memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
|
||||
mac_address = mac_address >> 16;
|
||||
|
||||
@ -504,7 +503,7 @@ static int ibmveth_open(struct net_device *netdev)
|
||||
ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
|
||||
ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
|
||||
|
||||
|
||||
|
||||
lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
|
||||
adapter->buffer_list_dma,
|
||||
rxq_desc.desc,
|
||||
@ -519,7 +518,18 @@ static int ibmveth_open(struct net_device *netdev)
|
||||
rxq_desc.desc,
|
||||
mac_address);
|
||||
ibmveth_cleanup(adapter);
|
||||
return -ENONET;
|
||||
return -ENONET;
|
||||
}
|
||||
|
||||
for(i = 0; i<IbmVethNumBufferPools; i++) {
|
||||
if(!adapter->rx_buff_pool[i].active)
|
||||
continue;
|
||||
if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
|
||||
ibmveth_error_printk("unable to alloc pool\n");
|
||||
adapter->rx_buff_pool[i].active = 0;
|
||||
ibmveth_cleanup(adapter);
|
||||
return -ENOMEM ;
|
||||
}
|
||||
}
|
||||
|
||||
ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
|
||||
@ -547,10 +557,11 @@ static int ibmveth_close(struct net_device *netdev)
|
||||
{
|
||||
struct ibmveth_adapter *adapter = netdev->priv;
|
||||
long lpar_rc;
|
||||
|
||||
|
||||
ibmveth_debug_printk("close starting\n");
|
||||
|
||||
netif_stop_queue(netdev);
|
||||
if (!adapter->pool_config)
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
free_irq(netdev->irq, netdev);
|
||||
|
||||
@ -694,7 +705,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
desc[5].desc,
|
||||
correlator);
|
||||
} while ((lpar_rc == H_BUSY) && (retry_count--));
|
||||
|
||||
|
||||
if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
|
||||
int i;
|
||||
ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
|
||||
@ -780,7 +791,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
|
||||
/* more work to do - return that we are not done yet */
|
||||
netdev->quota -= frames_processed;
|
||||
*budget -= frames_processed;
|
||||
return 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* we think we are done - reenable interrupts, then check once more to make sure we are done */
|
||||
@ -806,7 +817,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
|
||||
}
|
||||
|
||||
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
|
||||
{
|
||||
{
|
||||
struct net_device *netdev = dev_instance;
|
||||
struct ibmveth_adapter *adapter = netdev->priv;
|
||||
unsigned long lpar_rc;
|
||||
@ -862,7 +873,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
|
||||
ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* re-enable filtering */
|
||||
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
|
||||
IbmVethMcastEnableFiltering,
|
||||
@ -876,46 +887,22 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
|
||||
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct ibmveth_adapter *adapter = dev->priv;
|
||||
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
|
||||
int i;
|
||||
int prev_smaller = 1;
|
||||
|
||||
if ((new_mtu < 68) ||
|
||||
(new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
|
||||
if (new_mtu < IBMVETH_MAX_MTU)
|
||||
return -EINVAL;
|
||||
|
||||
/* Look for an active buffer pool that can hold the new MTU */
|
||||
for(i = 0; i<IbmVethNumBufferPools; i++) {
|
||||
int activate = 0;
|
||||
if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
|
||||
activate = 1;
|
||||
prev_smaller= 1;
|
||||
} else {
|
||||
if (prev_smaller)
|
||||
activate = 1;
|
||||
prev_smaller= 0;
|
||||
if (!adapter->rx_buff_pool[i].active)
|
||||
continue;
|
||||
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (activate && !adapter->rx_buff_pool[i].active) {
|
||||
struct ibmveth_buff_pool *pool =
|
||||
&adapter->rx_buff_pool[i];
|
||||
if(ibmveth_alloc_buffer_pool(pool)) {
|
||||
ibmveth_error_printk("unable to alloc pool\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
adapter->rx_buff_pool[i].active = 1;
|
||||
} else if (!activate && adapter->rx_buff_pool[i].active) {
|
||||
adapter->rx_buff_pool[i].active = 0;
|
||||
h_free_logical_lan_buffer(adapter->vdev->unit_address,
|
||||
(u64)pool_size[i]);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* kick the interrupt handler so that the new buffer pools get
|
||||
replenished or deallocated */
|
||||
ibmveth_interrupt(dev->irq, dev, NULL);
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||
@ -928,7 +915,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
|
||||
unsigned int *mcastFilterSize_p;
|
||||
|
||||
|
||||
ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
|
||||
ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
|
||||
dev->unit_address);
|
||||
|
||||
mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
|
||||
@ -937,7 +924,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
|
||||
"attribute\n", __FILE__, __LINE__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
|
||||
if(!mcastFilterSize_p) {
|
||||
printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
|
||||
@ -945,7 +932,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
|
||||
__FILE__, __LINE__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
|
||||
|
||||
if(!netdev)
|
||||
@ -960,13 +947,14 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
|
||||
adapter->vdev = dev;
|
||||
adapter->netdev = netdev;
|
||||
adapter->mcastFilterSize= *mcastFilterSize_p;
|
||||
|
||||
adapter->pool_config = 0;
|
||||
|
||||
/* Some older boxes running PHYP non-natively have an OF that
|
||||
returns a 8-byte local-mac-address field (and the first
|
||||
returns a 8-byte local-mac-address field (and the first
|
||||
2 bytes have to be ignored) while newer boxes' OF return
|
||||
a 6-byte field. Note that IEEE 1275 specifies that
|
||||
a 6-byte field. Note that IEEE 1275 specifies that
|
||||
local-mac-address must be a 6-byte field.
|
||||
The RPA doc specifies that the first byte must be 10b, so
|
||||
The RPA doc specifies that the first byte must be 10b, so
|
||||
we'll just look for it to solve this 8 vs. 6 byte field issue */
|
||||
|
||||
if ((*mac_addr_p & 0x3) != 0x02)
|
||||
@ -976,7 +964,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
|
||||
memcpy(&adapter->mac_addr, mac_addr_p, 6);
|
||||
|
||||
adapter->liobn = dev->iommu_table->it_index;
|
||||
|
||||
|
||||
netdev->irq = dev->irq;
|
||||
netdev->open = ibmveth_open;
|
||||
netdev->poll = ibmveth_poll;
|
||||
@ -989,14 +977,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
|
||||
netdev->ethtool_ops = &netdev_ethtool_ops;
|
||||
netdev->change_mtu = ibmveth_change_mtu;
|
||||
SET_NETDEV_DEV(netdev, &dev->dev);
|
||||
netdev->features |= NETIF_F_LLTX;
|
||||
netdev->features |= NETIF_F_LLTX;
|
||||
spin_lock_init(&adapter->stats_lock);
|
||||
|
||||
memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
|
||||
|
||||
for(i = 0; i<IbmVethNumBufferPools; i++)
|
||||
ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
|
||||
pool_count[i], pool_size[i]);
|
||||
for(i = 0; i<IbmVethNumBufferPools; i++) {
|
||||
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
|
||||
ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
|
||||
pool_count[i], pool_size[i],
|
||||
pool_active[i]);
|
||||
kobj->parent = &dev->dev.kobj;
|
||||
sprintf(kobj->name, "pool%d", i);
|
||||
kobj->ktype = &ktype_veth_pool;
|
||||
kobject_register(kobj);
|
||||
}
|
||||
|
||||
ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
|
||||
|
||||
@ -1025,6 +1020,10 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
|
||||
{
|
||||
struct net_device *netdev = dev->dev.driver_data;
|
||||
struct ibmveth_adapter *adapter = netdev->priv;
|
||||
int i;
|
||||
|
||||
for(i = 0; i<IbmVethNumBufferPools; i++)
|
||||
kobject_unregister(&adapter->rx_buff_pool[i].kobj);
|
||||
|
||||
unregister_netdev(netdev);
|
||||
|
||||
@ -1048,7 +1047,7 @@ static void ibmveth_proc_unregister_driver(void)
|
||||
remove_proc_entry(IBMVETH_PROC_DIR, NULL);
|
||||
}
|
||||
|
||||
static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
if (*pos == 0) {
|
||||
return (void *)1;
|
||||
@ -1063,18 +1062,18 @@ static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void ibmveth_seq_stop(struct seq_file *seq, void *v)
|
||||
static void ibmveth_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
static int ibmveth_seq_show(struct seq_file *seq, void *v)
|
||||
static int ibmveth_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct ibmveth_adapter *adapter = seq->private;
|
||||
char *current_mac = ((char*) &adapter->netdev->dev_addr);
|
||||
char *firmware_mac = ((char*) &adapter->mac_addr) ;
|
||||
|
||||
seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
|
||||
|
||||
|
||||
seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
|
||||
seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn);
|
||||
seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
|
||||
@ -1083,7 +1082,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
|
||||
seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
|
||||
firmware_mac[0], firmware_mac[1], firmware_mac[2],
|
||||
firmware_mac[3], firmware_mac[4], firmware_mac[5]);
|
||||
|
||||
|
||||
seq_printf(seq, "\nAdapter Statistics:\n");
|
||||
seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized);
|
||||
seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send);
|
||||
@ -1095,7 +1094,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
|
||||
seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
|
||||
seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
|
||||
seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
static struct seq_operations ibmveth_seq_ops = {
|
||||
@ -1153,11 +1152,11 @@ static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
|
||||
}
|
||||
|
||||
#else /* CONFIG_PROC_FS */
|
||||
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
|
||||
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
|
||||
{
|
||||
}
|
||||
|
||||
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
|
||||
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
|
||||
{
|
||||
}
|
||||
static void ibmveth_proc_register_driver(void)
|
||||
@ -1169,6 +1168,132 @@ static void ibmveth_proc_unregister_driver(void)
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
static struct attribute veth_active_attr;
|
||||
static struct attribute veth_num_attr;
|
||||
static struct attribute veth_size_attr;
|
||||
|
||||
static ssize_t veth_pool_show(struct kobject * kobj,
|
||||
struct attribute * attr, char * buf)
|
||||
{
|
||||
struct ibmveth_buff_pool *pool = container_of(kobj,
|
||||
struct ibmveth_buff_pool,
|
||||
kobj);
|
||||
|
||||
if (attr == &veth_active_attr)
|
||||
return sprintf(buf, "%d\n", pool->active);
|
||||
else if (attr == &veth_num_attr)
|
||||
return sprintf(buf, "%d\n", pool->size);
|
||||
else if (attr == &veth_size_attr)
|
||||
return sprintf(buf, "%d\n", pool->buff_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
|
||||
const char * buf, size_t count)
|
||||
{
|
||||
struct ibmveth_buff_pool *pool = container_of(kobj,
|
||||
struct ibmveth_buff_pool,
|
||||
kobj);
|
||||
struct net_device *netdev =
|
||||
container_of(kobj->parent, struct device, kobj)->driver_data;
|
||||
struct ibmveth_adapter *adapter = netdev->priv;
|
||||
long value = simple_strtol(buf, NULL, 10);
|
||||
long rc;
|
||||
|
||||
if (attr == &veth_active_attr) {
|
||||
if (value && !pool->active) {
|
||||
if(ibmveth_alloc_buffer_pool(pool)) {
|
||||
ibmveth_error_printk("unable to alloc pool\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
pool->active = 1;
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(netdev);
|
||||
adapter->pool_config = 0;
|
||||
if ((rc = ibmveth_open(netdev)))
|
||||
return rc;
|
||||
} else if (!value && pool->active) {
|
||||
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
|
||||
int i;
|
||||
/* Make sure there is a buffer pool with buffers that
|
||||
can hold a packet of the size of the MTU */
|
||||
for(i = 0; i<IbmVethNumBufferPools; i++) {
|
||||
if (pool == &adapter->rx_buff_pool[i])
|
||||
continue;
|
||||
if (!adapter->rx_buff_pool[i].active)
|
||||
continue;
|
||||
if (mtu < adapter->rx_buff_pool[i].buff_size) {
|
||||
pool->active = 0;
|
||||
h_free_logical_lan_buffer(adapter->
|
||||
vdev->
|
||||
unit_address,
|
||||
pool->
|
||||
buff_size);
|
||||
}
|
||||
}
|
||||
if (pool->active) {
|
||||
ibmveth_error_printk("no active pool >= MTU\n");
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
} else if (attr == &veth_num_attr) {
|
||||
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
|
||||
return -EINVAL;
|
||||
else {
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(netdev);
|
||||
adapter->pool_config = 0;
|
||||
pool->size = value;
|
||||
if ((rc = ibmveth_open(netdev)))
|
||||
return rc;
|
||||
}
|
||||
} else if (attr == &veth_size_attr) {
|
||||
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
|
||||
return -EINVAL;
|
||||
else {
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(netdev);
|
||||
adapter->pool_config = 0;
|
||||
pool->buff_size = value;
|
||||
if ((rc = ibmveth_open(netdev)))
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
/* kick the interrupt handler to allocate/deallocate pools */
|
||||
ibmveth_interrupt(netdev->irq, netdev, NULL);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
#define ATTR(_name, _mode) \
|
||||
struct attribute veth_##_name##_attr = { \
|
||||
.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
|
||||
};
|
||||
|
||||
static ATTR(active, 0644);
|
||||
static ATTR(num, 0644);
|
||||
static ATTR(size, 0644);
|
||||
|
||||
static struct attribute * veth_pool_attrs[] = {
|
||||
&veth_active_attr,
|
||||
&veth_num_attr,
|
||||
&veth_size_attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct sysfs_ops veth_pool_ops = {
|
||||
.show = veth_pool_show,
|
||||
.store = veth_pool_store,
|
||||
};
|
||||
|
||||
static struct kobj_type ktype_veth_pool = {
|
||||
.release = NULL,
|
||||
.sysfs_ops = &veth_pool_ops,
|
||||
.default_attrs = veth_pool_attrs,
|
||||
};
|
||||
|
||||
|
||||
static struct vio_device_id ibmveth_device_table[] __devinitdata= {
|
||||
{ "network", "IBM,l-lan"},
|
||||
{ "", "" }
|
||||
@ -1198,7 +1323,7 @@ static void __exit ibmveth_module_exit(void)
|
||||
{
|
||||
vio_unregister_driver(&ibmveth_driver);
|
||||
ibmveth_proc_unregister_driver();
|
||||
}
|
||||
}
|
||||
|
||||
module_init(ibmveth_module_init);
|
||||
module_exit(ibmveth_module_exit);
|
||||
|
@ -75,10 +75,13 @@
|
||||
|
||||
#define IbmVethNumBufferPools 5
|
||||
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
|
||||
#define IBMVETH_MAX_MTU 68
|
||||
#define IBMVETH_MAX_POOL_COUNT 4096
|
||||
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
|
||||
|
||||
/* pool_size should be sorted */
|
||||
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
|
||||
static int pool_count[] = { 256, 768, 256, 256, 256 };
|
||||
static int pool_active[] = { 1, 1, 0, 0, 0};
|
||||
|
||||
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
|
||||
|
||||
@ -94,6 +97,7 @@ struct ibmveth_buff_pool {
|
||||
dma_addr_t *dma_addr;
|
||||
struct sk_buff **skbuff;
|
||||
int active;
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
struct ibmveth_rx_q {
|
||||
@ -118,6 +122,7 @@ struct ibmveth_adapter {
|
||||
dma_addr_t filter_list_dma;
|
||||
struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
|
||||
struct ibmveth_rx_q rx_queue;
|
||||
int pool_config;
|
||||
|
||||
/* adapter specific stats */
|
||||
u64 replenish_task_cycles;
|
||||
@ -134,7 +139,7 @@ struct ibmveth_adapter {
|
||||
spinlock_t stats_lock;
|
||||
};
|
||||
|
||||
struct ibmveth_buf_desc_fields {
|
||||
struct ibmveth_buf_desc_fields {
|
||||
u32 valid : 1;
|
||||
u32 toggle : 1;
|
||||
u32 reserved : 6;
|
||||
@ -143,7 +148,7 @@ struct ibmveth_buf_desc_fields {
|
||||
};
|
||||
|
||||
union ibmveth_buf_desc {
|
||||
u64 desc;
|
||||
u64 desc;
|
||||
struct ibmveth_buf_desc_fields fields;
|
||||
};
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
################################################################################
|
||||
#
|
||||
#
|
||||
# Copyright(c) 1999 - 2002 Intel Corporation. All rights reserved.
|
||||
# Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License as published by the Free
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -84,7 +84,12 @@ struct ixgb_adapter;
|
||||
#define IXGB_DBG(args...)
|
||||
#endif
|
||||
|
||||
#define IXGB_ERR(args...) printk(KERN_ERR "ixgb: " args)
|
||||
#define PFX "ixgb: "
|
||||
#define DPRINTK(nlevel, klevel, fmt, args...) \
|
||||
(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
|
||||
printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
|
||||
__FUNCTION__ , ## args))
|
||||
|
||||
|
||||
/* TX/RX descriptor defines */
|
||||
#define DEFAULT_TXD 256
|
||||
@ -175,6 +180,7 @@ struct ixgb_adapter {
|
||||
uint64_t hw_csum_tx_good;
|
||||
uint64_t hw_csum_tx_error;
|
||||
uint32_t tx_int_delay;
|
||||
uint32_t tx_timeout_count;
|
||||
boolean_t tx_int_delay_enable;
|
||||
boolean_t detect_tx_hung;
|
||||
|
||||
@ -192,7 +198,9 @@ struct ixgb_adapter {
|
||||
|
||||
/* structs defined in ixgb_hw.h */
|
||||
struct ixgb_hw hw;
|
||||
u16 msg_enable;
|
||||
struct ixgb_hw_stats stats;
|
||||
uint32_t alloc_rx_buff_failed;
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
boolean_t have_msi;
|
||||
#endif
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -44,6 +44,8 @@ extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
|
||||
extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
|
||||
extern void ixgb_update_stats(struct ixgb_adapter *adapter);
|
||||
|
||||
#define IXGB_ALL_RAR_ENTRIES 16
|
||||
|
||||
struct ixgb_stats {
|
||||
char stat_string[ETH_GSTRING_LEN];
|
||||
int sizeof_stat;
|
||||
@ -76,6 +78,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
|
||||
{"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)},
|
||||
{"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
|
||||
{"tx_deferred_ok", IXGB_STAT(stats.dc)},
|
||||
{"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
|
||||
{"rx_long_length_errors", IXGB_STAT(stats.roc)},
|
||||
{"rx_short_length_errors", IXGB_STAT(stats.ruc)},
|
||||
#ifdef NETIF_F_TSO
|
||||
@ -117,6 +120,16 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ixgb_set_speed_duplex(struct net_device *netdev)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
/* be optimistic about our link, since we were up before */
|
||||
adapter->link_speed = 10000;
|
||||
adapter->link_duplex = FULL_DUPLEX;
|
||||
netif_carrier_on(netdev);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
static int
|
||||
ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
@ -130,12 +143,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
||||
ixgb_down(adapter, TRUE);
|
||||
ixgb_reset(adapter);
|
||||
ixgb_up(adapter);
|
||||
/* be optimistic about our link, since we were up before */
|
||||
adapter->link_speed = 10000;
|
||||
adapter->link_duplex = FULL_DUPLEX;
|
||||
netif_carrier_on(netdev);
|
||||
netif_wake_queue(netdev);
|
||||
|
||||
ixgb_set_speed_duplex(netdev);
|
||||
} else
|
||||
ixgb_reset(adapter);
|
||||
|
||||
@ -183,11 +191,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
|
||||
if(netif_running(adapter->netdev)) {
|
||||
ixgb_down(adapter, TRUE);
|
||||
ixgb_up(adapter);
|
||||
/* be optimistic about our link, since we were up before */
|
||||
adapter->link_speed = 10000;
|
||||
adapter->link_duplex = FULL_DUPLEX;
|
||||
netif_carrier_on(netdev);
|
||||
netif_wake_queue(netdev);
|
||||
ixgb_set_speed_duplex(netdev);
|
||||
} else
|
||||
ixgb_reset(adapter);
|
||||
|
||||
@ -212,11 +216,7 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
|
||||
if(netif_running(netdev)) {
|
||||
ixgb_down(adapter,TRUE);
|
||||
ixgb_up(adapter);
|
||||
/* be optimistic about our link, since we were up before */
|
||||
adapter->link_speed = 10000;
|
||||
adapter->link_duplex = FULL_DUPLEX;
|
||||
netif_carrier_on(netdev);
|
||||
netif_wake_queue(netdev);
|
||||
ixgb_set_speed_duplex(netdev);
|
||||
} else
|
||||
ixgb_reset(adapter);
|
||||
return 0;
|
||||
@ -251,6 +251,19 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
|
||||
}
|
||||
#endif /* NETIF_F_TSO */
|
||||
|
||||
static uint32_t
|
||||
ixgb_get_msglevel(struct net_device *netdev)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
return adapter->msg_enable;
|
||||
}
|
||||
|
||||
static void
|
||||
ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
adapter->msg_enable = data;
|
||||
}
|
||||
#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
|
||||
|
||||
static int
|
||||
@ -303,7 +316,7 @@ ixgb_get_regs(struct net_device *netdev,
|
||||
*reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
|
||||
|
||||
/* there are 16 RAR entries in hardware, we only use 3 */
|
||||
for(i = 0; i < 16; i++) {
|
||||
for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
|
||||
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
|
||||
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
|
||||
}
|
||||
@ -593,11 +606,7 @@ ixgb_set_ringparam(struct net_device *netdev,
|
||||
adapter->tx_ring = tx_new;
|
||||
if((err = ixgb_up(adapter)))
|
||||
return err;
|
||||
/* be optimistic about our link, since we were up before */
|
||||
adapter->link_speed = 10000;
|
||||
adapter->link_duplex = FULL_DUPLEX;
|
||||
netif_carrier_on(netdev);
|
||||
netif_wake_queue(netdev);
|
||||
ixgb_set_speed_duplex(netdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -714,6 +723,8 @@ static struct ethtool_ops ixgb_ethtool_ops = {
|
||||
.set_tx_csum = ixgb_set_tx_csum,
|
||||
.get_sg = ethtool_op_get_sg,
|
||||
.set_sg = ethtool_op_set_sg,
|
||||
.get_msglevel = ixgb_get_msglevel,
|
||||
.set_msglevel = ixgb_set_msglevel,
|
||||
#ifdef NETIF_F_TSO
|
||||
.get_tso = ethtool_op_get_tso,
|
||||
.set_tso = ixgb_set_tso,
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -57,6 +57,7 @@ typedef enum {
|
||||
typedef enum {
|
||||
ixgb_media_type_unknown = 0,
|
||||
ixgb_media_type_fiber = 1,
|
||||
ixgb_media_type_copper = 2,
|
||||
ixgb_num_media_types
|
||||
} ixgb_media_type;
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -43,6 +43,8 @@
|
||||
#define IXGB_SUBDEVICE_ID_A11F 0xA11F
|
||||
#define IXGB_SUBDEVICE_ID_A01F 0xA01F
|
||||
|
||||
#endif /* #ifndef _IXGB_IDS_H_ */
|
||||
#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
|
||||
#define IXGB_SUBDEVICE_ID_A00C 0xA00C
|
||||
|
||||
#endif /* #ifndef _IXGB_IDS_H_ */
|
||||
/* End of File */
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -28,22 +28,6 @@
|
||||
|
||||
#include "ixgb.h"
|
||||
|
||||
/* Change Log
|
||||
* 1.0.96 04/19/05
|
||||
* - Make needlessly global code static -- bunk@stusta.de
|
||||
* - ethtool cleanup -- shemminger@osdl.org
|
||||
* - Support for MODULE_VERSION -- linville@tuxdriver.com
|
||||
* - add skb_header_cloned check to the tso path -- herbert@apana.org.au
|
||||
* 1.0.88 01/05/05
|
||||
* - include fix to the condition that determines when to quit NAPI - Robert Olsson
|
||||
* - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
|
||||
* 1.0.84 10/26/04
|
||||
* - reset buffer_info->dma in Tx resource cleanup logic
|
||||
* 1.0.83 10/12/04
|
||||
* - sparse cleanup - shemminger@osdl.org
|
||||
* - fix tx resource cleanup logic
|
||||
*/
|
||||
|
||||
char ixgb_driver_name[] = "ixgb";
|
||||
static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
|
||||
|
||||
@ -52,9 +36,9 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
|
||||
#else
|
||||
#define DRIVERNAPI "-NAPI"
|
||||
#endif
|
||||
#define DRV_VERSION "1.0.100-k2"DRIVERNAPI
|
||||
#define DRV_VERSION "1.0.109-k2"DRIVERNAPI
|
||||
char ixgb_driver_version[] = DRV_VERSION;
|
||||
static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
|
||||
static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
|
||||
|
||||
/* ixgb_pci_tbl - PCI Device ID Table
|
||||
*
|
||||
@ -67,6 +51,8 @@ static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
|
||||
static struct pci_device_id ixgb_pci_tbl[] = {
|
||||
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
|
||||
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
|
||||
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
|
||||
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
|
||||
@ -148,6 +134,11 @@ MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
|
||||
static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
|
||||
module_param(debug, int, 0);
|
||||
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
||||
|
||||
/* some defines for controlling descriptor fetches in h/w */
|
||||
#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
|
||||
#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
|
||||
@ -196,7 +187,7 @@ module_exit(ixgb_exit_module);
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
static inline void
|
||||
static void
|
||||
ixgb_irq_disable(struct ixgb_adapter *adapter)
|
||||
{
|
||||
atomic_inc(&adapter->irq_sem);
|
||||
@ -210,7 +201,7 @@ ixgb_irq_disable(struct ixgb_adapter *adapter)
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
static inline void
|
||||
static void
|
||||
ixgb_irq_enable(struct ixgb_adapter *adapter)
|
||||
{
|
||||
if(atomic_dec_and_test(&adapter->irq_sem)) {
|
||||
@ -231,6 +222,7 @@ ixgb_up(struct ixgb_adapter *adapter)
|
||||
|
||||
/* hardware has been reset, we need to reload some things */
|
||||
|
||||
ixgb_rar_set(hw, netdev->dev_addr, 0);
|
||||
ixgb_set_multi(netdev);
|
||||
|
||||
ixgb_restore_vlan(adapter);
|
||||
@ -240,6 +232,9 @@ ixgb_up(struct ixgb_adapter *adapter)
|
||||
ixgb_configure_rx(adapter);
|
||||
ixgb_alloc_rx_buffers(adapter);
|
||||
|
||||
/* disable interrupts and get the hardware into a known state */
|
||||
IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
{
|
||||
boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
|
||||
@ -249,7 +244,7 @@ ixgb_up(struct ixgb_adapter *adapter)
|
||||
if (!pcix)
|
||||
adapter->have_msi = FALSE;
|
||||
else if((err = pci_enable_msi(adapter->pdev))) {
|
||||
printk (KERN_ERR
|
||||
DPRINTK(PROBE, ERR,
|
||||
"Unable to allocate MSI interrupt Error: %d\n", err);
|
||||
adapter->have_msi = FALSE;
|
||||
/* proceed to try to request regular interrupt */
|
||||
@ -259,11 +254,11 @@ ixgb_up(struct ixgb_adapter *adapter)
|
||||
#endif
|
||||
if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
|
||||
SA_SHIRQ | SA_SAMPLE_RANDOM,
|
||||
netdev->name, netdev)))
|
||||
netdev->name, netdev))) {
|
||||
DPRINTK(PROBE, ERR,
|
||||
"Unable to allocate interrupt Error: %d\n", err);
|
||||
return err;
|
||||
|
||||
/* disable interrupts and get the hardware into a known state */
|
||||
IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
|
||||
}
|
||||
|
||||
if((hw->max_frame_size != max_frame) ||
|
||||
(hw->max_frame_size !=
|
||||
@ -285,11 +280,12 @@ ixgb_up(struct ixgb_adapter *adapter)
|
||||
}
|
||||
|
||||
mod_timer(&adapter->watchdog_timer, jiffies);
|
||||
ixgb_irq_enable(adapter);
|
||||
|
||||
#ifdef CONFIG_IXGB_NAPI
|
||||
netif_poll_enable(netdev);
|
||||
#endif
|
||||
ixgb_irq_enable(adapter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -326,7 +322,7 @@ ixgb_reset(struct ixgb_adapter *adapter)
|
||||
|
||||
ixgb_adapter_stop(&adapter->hw);
|
||||
if(!ixgb_init_hw(&adapter->hw))
|
||||
IXGB_DBG("ixgb_init_hw failed.\n");
|
||||
DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -363,7 +359,8 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
} else {
|
||||
if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
|
||||
(err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
|
||||
IXGB_ERR("No usable DMA configuration, aborting\n");
|
||||
printk(KERN_ERR
|
||||
"ixgb: No usable DMA configuration, aborting\n");
|
||||
goto err_dma_mask;
|
||||
}
|
||||
pci_using_dac = 0;
|
||||
@ -388,6 +385,7 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
adapter->netdev = netdev;
|
||||
adapter->pdev = pdev;
|
||||
adapter->hw.back = adapter;
|
||||
adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
|
||||
|
||||
mmio_start = pci_resource_start(pdev, BAR_0);
|
||||
mmio_len = pci_resource_len(pdev, BAR_0);
|
||||
@ -416,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
netdev->change_mtu = &ixgb_change_mtu;
|
||||
ixgb_set_ethtool_ops(netdev);
|
||||
netdev->tx_timeout = &ixgb_tx_timeout;
|
||||
netdev->watchdog_timeo = HZ;
|
||||
netdev->watchdog_timeo = 5 * HZ;
|
||||
#ifdef CONFIG_IXGB_NAPI
|
||||
netdev->poll = &ixgb_clean;
|
||||
netdev->weight = 64;
|
||||
@ -428,6 +426,7 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
netdev->poll_controller = ixgb_netpoll;
|
||||
#endif
|
||||
|
||||
strcpy(netdev->name, pci_name(pdev));
|
||||
netdev->mem_start = mmio_start;
|
||||
netdev->mem_end = mmio_start + mmio_len;
|
||||
netdev->base_addr = adapter->hw.io_base;
|
||||
@ -449,6 +448,9 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
#ifdef NETIF_F_TSO
|
||||
netdev->features |= NETIF_F_TSO;
|
||||
#endif
|
||||
#ifdef NETIF_F_LLTX
|
||||
netdev->features |= NETIF_F_LLTX;
|
||||
#endif
|
||||
|
||||
if(pci_using_dac)
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
@ -456,7 +458,7 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
/* make sure the EEPROM is good */
|
||||
|
||||
if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
|
||||
printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
|
||||
DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
|
||||
err = -EIO;
|
||||
goto err_eeprom;
|
||||
}
|
||||
@ -465,6 +467,7 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
|
||||
|
||||
if(!is_valid_ether_addr(netdev->perm_addr)) {
|
||||
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
|
||||
err = -EIO;
|
||||
goto err_eeprom;
|
||||
}
|
||||
@ -478,6 +481,7 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
INIT_WORK(&adapter->tx_timeout_task,
|
||||
(void (*)(void *))ixgb_tx_timeout_task, netdev);
|
||||
|
||||
strcpy(netdev->name, "eth%d");
|
||||
if((err = register_netdev(netdev)))
|
||||
goto err_register;
|
||||
|
||||
@ -486,8 +490,7 @@ ixgb_probe(struct pci_dev *pdev,
|
||||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
|
||||
netdev->name);
|
||||
DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
|
||||
ixgb_check_options(adapter);
|
||||
/* reset the hardware with the new settings */
|
||||
|
||||
@ -557,17 +560,17 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
|
||||
hw->subsystem_vendor_id = pdev->subsystem_vendor;
|
||||
hw->subsystem_id = pdev->subsystem_device;
|
||||
|
||||
adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
|
||||
|
||||
hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
|
||||
adapter->rx_buffer_len = hw->max_frame_size;
|
||||
|
||||
if((hw->device_id == IXGB_DEVICE_ID_82597EX)
|
||||
||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
|
||||
||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
|
||||
|| (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
|
||||
|| (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
|
||||
|| (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
|
||||
hw->mac_type = ixgb_82597;
|
||||
else {
|
||||
/* should never have loaded on this device */
|
||||
printk(KERN_ERR "ixgb: unsupported device id\n");
|
||||
DPRINTK(PROBE, ERR, "unsupported device id\n");
|
||||
}
|
||||
|
||||
/* enable flow control to be programmed */
|
||||
@ -665,6 +668,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
|
||||
size = sizeof(struct ixgb_buffer) * txdr->count;
|
||||
txdr->buffer_info = vmalloc(size);
|
||||
if(!txdr->buffer_info) {
|
||||
DPRINTK(PROBE, ERR,
|
||||
"Unable to allocate transmit descriptor ring memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(txdr->buffer_info, 0, size);
|
||||
@ -677,6 +682,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
|
||||
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
|
||||
if(!txdr->desc) {
|
||||
vfree(txdr->buffer_info);
|
||||
DPRINTK(PROBE, ERR,
|
||||
"Unable to allocate transmit descriptor memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(txdr->desc, 0, txdr->size);
|
||||
@ -750,6 +757,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
|
||||
size = sizeof(struct ixgb_buffer) * rxdr->count;
|
||||
rxdr->buffer_info = vmalloc(size);
|
||||
if(!rxdr->buffer_info) {
|
||||
DPRINTK(PROBE, ERR,
|
||||
"Unable to allocate receive descriptor ring\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(rxdr->buffer_info, 0, size);
|
||||
@ -763,6 +772,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
|
||||
|
||||
if(!rxdr->desc) {
|
||||
vfree(rxdr->buffer_info);
|
||||
DPRINTK(PROBE, ERR,
|
||||
"Unable to allocate receive descriptors\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(rxdr->desc, 0, rxdr->size);
|
||||
@ -794,21 +805,14 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
|
||||
|
||||
rctl |= IXGB_RCTL_SECRC;
|
||||
|
||||
switch (adapter->rx_buffer_len) {
|
||||
case IXGB_RXBUFFER_2048:
|
||||
default:
|
||||
if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
|
||||
rctl |= IXGB_RCTL_BSIZE_2048;
|
||||
break;
|
||||
case IXGB_RXBUFFER_4096:
|
||||
else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
|
||||
rctl |= IXGB_RCTL_BSIZE_4096;
|
||||
break;
|
||||
case IXGB_RXBUFFER_8192:
|
||||
else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
|
||||
rctl |= IXGB_RCTL_BSIZE_8192;
|
||||
break;
|
||||
case IXGB_RXBUFFER_16384:
|
||||
else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
|
||||
rctl |= IXGB_RCTL_BSIZE_16384;
|
||||
break;
|
||||
}
|
||||
|
||||
IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
|
||||
}
|
||||
@ -898,22 +902,25 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
|
||||
adapter->tx_ring.desc = NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
static void
|
||||
ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
|
||||
struct ixgb_buffer *buffer_info)
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
if(buffer_info->dma) {
|
||||
pci_unmap_page(pdev,
|
||||
buffer_info->dma,
|
||||
buffer_info->length,
|
||||
PCI_DMA_TODEVICE);
|
||||
buffer_info->dma = 0;
|
||||
}
|
||||
if(buffer_info->skb) {
|
||||
|
||||
if (buffer_info->dma)
|
||||
pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
|
||||
PCI_DMA_TODEVICE);
|
||||
|
||||
if (buffer_info->skb)
|
||||
dev_kfree_skb_any(buffer_info->skb);
|
||||
buffer_info->skb = NULL;
|
||||
}
|
||||
|
||||
buffer_info->skb = NULL;
|
||||
buffer_info->dma = 0;
|
||||
buffer_info->time_stamp = 0;
|
||||
/* these fields must always be initialized in tx
|
||||
* buffer_info->length = 0;
|
||||
* buffer_info->next_to_watch = 0; */
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1112,8 +1119,8 @@ ixgb_watchdog(unsigned long data)
|
||||
|
||||
if(adapter->hw.link_up) {
|
||||
if(!netif_carrier_ok(netdev)) {
|
||||
printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
|
||||
netdev->name, 10000, "Full Duplex");
|
||||
DPRINTK(LINK, INFO,
|
||||
"NIC Link is Up 10000 Mbps Full Duplex\n");
|
||||
adapter->link_speed = 10000;
|
||||
adapter->link_duplex = FULL_DUPLEX;
|
||||
netif_carrier_on(netdev);
|
||||
@ -1123,9 +1130,7 @@ ixgb_watchdog(unsigned long data)
|
||||
if(netif_carrier_ok(netdev)) {
|
||||
adapter->link_speed = 0;
|
||||
adapter->link_duplex = 0;
|
||||
printk(KERN_INFO
|
||||
"ixgb: %s NIC Link is Down\n",
|
||||
netdev->name);
|
||||
DPRINTK(LINK, INFO, "NIC Link is Down\n");
|
||||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
@ -1158,7 +1163,7 @@ ixgb_watchdog(unsigned long data)
|
||||
#define IXGB_TX_FLAGS_VLAN 0x00000002
|
||||
#define IXGB_TX_FLAGS_TSO 0x00000004
|
||||
|
||||
static inline int
|
||||
static int
|
||||
ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
||||
{
|
||||
#ifdef NETIF_F_TSO
|
||||
@ -1220,7 +1225,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline boolean_t
|
||||
static boolean_t
|
||||
ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
||||
{
|
||||
struct ixgb_context_desc *context_desc;
|
||||
@ -1258,7 +1263,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
||||
#define IXGB_MAX_TXD_PWR 14
|
||||
#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
|
||||
|
||||
static inline int
|
||||
static int
|
||||
ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
||||
unsigned int first)
|
||||
{
|
||||
@ -1284,6 +1289,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
||||
size,
|
||||
PCI_DMA_TODEVICE);
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = 0;
|
||||
|
||||
len -= size;
|
||||
offset += size;
|
||||
@ -1309,6 +1315,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
||||
size,
|
||||
PCI_DMA_TODEVICE);
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = 0;
|
||||
|
||||
len -= size;
|
||||
offset += size;
|
||||
@ -1323,7 +1330,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline void
|
||||
static void
|
||||
ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
|
||||
{
|
||||
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
|
||||
@ -1395,13 +1402,26 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef NETIF_F_LLTX
|
||||
local_irq_save(flags);
|
||||
if (!spin_trylock(&adapter->tx_lock)) {
|
||||
/* Collision - tell upper layer to requeue */
|
||||
local_irq_restore(flags);
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
#else
|
||||
spin_lock_irqsave(&adapter->tx_lock, flags);
|
||||
#endif
|
||||
|
||||
if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
|
||||
netif_stop_queue(netdev);
|
||||
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
#ifndef NETIF_F_LLTX
|
||||
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
||||
#endif
|
||||
|
||||
if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
|
||||
tx_flags |= IXGB_TX_FLAGS_VLAN;
|
||||
@ -1413,10 +1433,13 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
tso = ixgb_tso(adapter, skb);
|
||||
if (tso < 0) {
|
||||
dev_kfree_skb_any(skb);
|
||||
#ifdef NETIF_F_LLTX
|
||||
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
||||
#endif
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
if (tso)
|
||||
if (likely(tso))
|
||||
tx_flags |= IXGB_TX_FLAGS_TSO;
|
||||
else if(ixgb_tx_csum(adapter, skb))
|
||||
tx_flags |= IXGB_TX_FLAGS_CSUM;
|
||||
@ -1426,7 +1449,15 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
netdev->trans_start = jiffies;
|
||||
|
||||
return 0;
|
||||
#ifdef NETIF_F_LLTX
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
||||
|
||||
#endif
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1448,6 +1479,7 @@ ixgb_tx_timeout_task(struct net_device *netdev)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
adapter->tx_timeout_count++;
|
||||
ixgb_down(adapter, TRUE);
|
||||
ixgb_up(adapter);
|
||||
}
|
||||
@ -1486,28 +1518,15 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
|
||||
if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|
||||
|| (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
|
||||
IXGB_ERR("Invalid MTU setting\n");
|
||||
DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|
||||
|| (max_frame <= IXGB_RXBUFFER_2048)) {
|
||||
adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
|
||||
|
||||
} else if(max_frame <= IXGB_RXBUFFER_4096) {
|
||||
adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
|
||||
|
||||
} else if(max_frame <= IXGB_RXBUFFER_8192) {
|
||||
adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
|
||||
|
||||
} else {
|
||||
adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
|
||||
}
|
||||
adapter->rx_buffer_len = max_frame;
|
||||
|
||||
netdev->mtu = new_mtu;
|
||||
|
||||
if(old_max_frame != max_frame && netif_running(netdev)) {
|
||||
|
||||
if ((old_max_frame != max_frame) && netif_running(netdev)) {
|
||||
ixgb_down(adapter, TRUE);
|
||||
ixgb_up(adapter);
|
||||
}
|
||||
@ -1765,23 +1784,43 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
|
||||
|
||||
tx_ring->next_to_clean = i;
|
||||
|
||||
spin_lock(&adapter->tx_lock);
|
||||
if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
|
||||
(IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
|
||||
|
||||
netif_wake_queue(netdev);
|
||||
if (unlikely(netif_queue_stopped(netdev))) {
|
||||
spin_lock(&adapter->tx_lock);
|
||||
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
|
||||
(IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE))
|
||||
netif_wake_queue(netdev);
|
||||
spin_unlock(&adapter->tx_lock);
|
||||
}
|
||||
spin_unlock(&adapter->tx_lock);
|
||||
|
||||
if(adapter->detect_tx_hung) {
|
||||
/* detect a transmit hang in hardware, this serializes the
|
||||
* check with the clearing of time_stamp and movement of i */
|
||||
adapter->detect_tx_hung = FALSE;
|
||||
if(tx_ring->buffer_info[i].dma &&
|
||||
time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
|
||||
if (tx_ring->buffer_info[eop].dma &&
|
||||
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
|
||||
&& !(IXGB_READ_REG(&adapter->hw, STATUS) &
|
||||
IXGB_STATUS_TXOFF))
|
||||
IXGB_STATUS_TXOFF)) {
|
||||
/* detected Tx unit hang */
|
||||
DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
|
||||
" TDH <%x>\n"
|
||||
" TDT <%x>\n"
|
||||
" next_to_use <%x>\n"
|
||||
" next_to_clean <%x>\n"
|
||||
"buffer_info[next_to_clean]\n"
|
||||
" time_stamp <%lx>\n"
|
||||
" next_to_watch <%x>\n"
|
||||
" jiffies <%lx>\n"
|
||||
" next_to_watch.status <%x>\n",
|
||||
IXGB_READ_REG(&adapter->hw, TDH),
|
||||
IXGB_READ_REG(&adapter->hw, TDT),
|
||||
tx_ring->next_to_use,
|
||||
tx_ring->next_to_clean,
|
||||
tx_ring->buffer_info[eop].time_stamp,
|
||||
eop,
|
||||
jiffies,
|
||||
eop_desc->status);
|
||||
netif_stop_queue(netdev);
|
||||
}
|
||||
}
|
||||
|
||||
return cleaned;
|
||||
@ -1794,7 +1833,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
|
||||
* @sk_buff: socket buffer with received data
|
||||
**/
|
||||
|
||||
static inline void
|
||||
static void
|
||||
ixgb_rx_checksum(struct ixgb_adapter *adapter,
|
||||
struct ixgb_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
@ -1858,6 +1897,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
|
||||
#endif
|
||||
status = rx_desc->status;
|
||||
skb = buffer_info->skb;
|
||||
buffer_info->skb = NULL;
|
||||
|
||||
prefetch(skb->data);
|
||||
|
||||
@ -1902,6 +1942,26 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
|
||||
goto rxdesc_done;
|
||||
}
|
||||
|
||||
/* code added for copybreak, this should improve
|
||||
* performance for small packets with large amounts
|
||||
* of reassembly being done in the stack */
|
||||
#define IXGB_CB_LENGTH 256
|
||||
if (length < IXGB_CB_LENGTH) {
|
||||
struct sk_buff *new_skb =
|
||||
dev_alloc_skb(length + NET_IP_ALIGN);
|
||||
if (new_skb) {
|
||||
skb_reserve(new_skb, NET_IP_ALIGN);
|
||||
new_skb->dev = netdev;
|
||||
memcpy(new_skb->data - NET_IP_ALIGN,
|
||||
skb->data - NET_IP_ALIGN,
|
||||
length + NET_IP_ALIGN);
|
||||
/* save the skb in buffer_info as good */
|
||||
buffer_info->skb = skb;
|
||||
skb = new_skb;
|
||||
}
|
||||
}
|
||||
/* end copybreak code */
|
||||
|
||||
/* Good Receive */
|
||||
skb_put(skb, length);
|
||||
|
||||
@ -1931,7 +1991,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
|
||||
rxdesc_done:
|
||||
/* clean up descriptor, might be written over by hw */
|
||||
rx_desc->status = 0;
|
||||
buffer_info->skb = NULL;
|
||||
|
||||
/* use prefetched values */
|
||||
rx_desc = next_rxd;
|
||||
@ -1971,12 +2030,18 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
|
||||
|
||||
/* leave three descriptors unused */
|
||||
while(--cleancount > 2) {
|
||||
rx_desc = IXGB_RX_DESC(*rx_ring, i);
|
||||
/* recycle! its good for you */
|
||||
if (!(skb = buffer_info->skb))
|
||||
skb = dev_alloc_skb(adapter->rx_buffer_len
|
||||
+ NET_IP_ALIGN);
|
||||
else {
|
||||
skb_trim(skb, 0);
|
||||
goto map_skb;
|
||||
}
|
||||
|
||||
skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
|
||||
|
||||
if(unlikely(!skb)) {
|
||||
if (unlikely(!skb)) {
|
||||
/* Better luck next round */
|
||||
adapter->alloc_rx_buff_failed++;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1990,33 +2055,36 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
|
||||
|
||||
buffer_info->skb = skb;
|
||||
buffer_info->length = adapter->rx_buffer_len;
|
||||
buffer_info->dma =
|
||||
pci_map_single(pdev,
|
||||
skb->data,
|
||||
adapter->rx_buffer_len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
map_skb:
|
||||
buffer_info->dma = pci_map_single(pdev,
|
||||
skb->data,
|
||||
adapter->rx_buffer_len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
rx_desc = IXGB_RX_DESC(*rx_ring, i);
|
||||
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
|
||||
/* guarantee DD bit not set now before h/w gets descriptor
|
||||
* this is the rest of the workaround for h/w double
|
||||
* writeback. */
|
||||
rx_desc->status = 0;
|
||||
|
||||
if((i & ~(num_group_tail_writes- 1)) == i) {
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
* applicable for weak-ordered memory model archs,
|
||||
* such as IA-64). */
|
||||
wmb();
|
||||
|
||||
IXGB_WRITE_REG(&adapter->hw, RDT, i);
|
||||
}
|
||||
|
||||
if(++i == rx_ring->count) i = 0;
|
||||
buffer_info = &rx_ring->buffer_info[i];
|
||||
}
|
||||
|
||||
rx_ring->next_to_use = i;
|
||||
if (likely(rx_ring->next_to_use != i)) {
|
||||
rx_ring->next_to_use = i;
|
||||
if (unlikely(i-- == 0))
|
||||
i = (rx_ring->count - 1);
|
||||
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
* applicable for weak-ordered memory model archs, such
|
||||
* as IA-64). */
|
||||
wmb();
|
||||
IXGB_WRITE_REG(&adapter->hw, RDT, i);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
@ -76,7 +76,7 @@ IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
|
||||
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
|
||||
* - 3 - Full Flow Control Support
|
||||
*
|
||||
* Default Value: Read flow control settings from the EEPROM
|
||||
* Default Value: 2 - Tx only (silicon bug avoidance)
|
||||
*/
|
||||
|
||||
IXGB_PARAM(FlowControl, "Flow Control setting");
|
||||
@ -137,7 +137,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
|
||||
*
|
||||
* Valid Range: 1 - 65535
|
||||
*
|
||||
* Default Value: 256 (0x100)
|
||||
* Default Value: 65535 (0xffff) (we'll send an xon if we recover)
|
||||
*/
|
||||
|
||||
IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
|
||||
@ -165,8 +165,6 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
|
||||
|
||||
#define XSUMRX_DEFAULT OPTION_ENABLED
|
||||
|
||||
#define FLOW_CONTROL_FULL ixgb_fc_full
|
||||
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
|
||||
#define DEFAULT_FCRTL 0x28000
|
||||
#define DEFAULT_FCRTH 0x30000
|
||||
#define MIN_FCRTL 0
|
||||
@ -174,9 +172,9 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
|
||||
#define MIN_FCRTH 8
|
||||
#define MAX_FCRTH 0x3FFF0
|
||||
|
||||
#define DEFAULT_FCPAUSE 0x100 /* this may be too long */
|
||||
#define MIN_FCPAUSE 1
|
||||
#define MAX_FCPAUSE 0xffff
|
||||
#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
|
||||
|
||||
struct ixgb_option {
|
||||
enum { enable_option, range_option, list_option } type;
|
||||
@ -336,7 +334,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.type = list_option,
|
||||
.name = "Flow Control",
|
||||
.err = "reading default settings from EEPROM",
|
||||
.def = ixgb_fc_full,
|
||||
.def = ixgb_fc_tx_pause,
|
||||
.arg = { .l = { .nr = LIST_LEN(fc_list),
|
||||
.p = fc_list }}
|
||||
};
|
||||
@ -365,8 +363,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
} else {
|
||||
adapter->hw.fc.high_water = opt.def;
|
||||
}
|
||||
if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
|
||||
printk (KERN_INFO
|
||||
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
|
||||
printk (KERN_INFO
|
||||
"Ignoring RxFCHighThresh when no RxFC\n");
|
||||
}
|
||||
{ /* Receive Flow Control Low Threshold */
|
||||
@ -385,8 +383,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
} else {
|
||||
adapter->hw.fc.low_water = opt.def;
|
||||
}
|
||||
if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
|
||||
printk (KERN_INFO
|
||||
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
|
||||
printk (KERN_INFO
|
||||
"Ignoring RxFCLowThresh when no RxFC\n");
|
||||
}
|
||||
{ /* Flow Control Pause Time Request*/
|
||||
@ -406,12 +404,12 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
} else {
|
||||
adapter->hw.fc.pause_time = opt.def;
|
||||
}
|
||||
if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
|
||||
printk (KERN_INFO
|
||||
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
|
||||
printk (KERN_INFO
|
||||
"Ignoring FCReqTimeout when no RxFC\n");
|
||||
}
|
||||
/* high low and spacing check for rx flow control thresholds */
|
||||
if (adapter->hw.fc.type & ixgb_fc_rx_pause) {
|
||||
if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
|
||||
/* high must be greater than low */
|
||||
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
|
||||
/* set defaults */
|
||||
|
5
drivers/net/myri10ge/Makefile
Normal file
5
drivers/net/myri10ge/Makefile
Normal file
@ -0,0 +1,5 @@
|
||||
#
|
||||
# Makefile for the Myricom Myri-10G ethernet driver
|
||||
#
|
||||
|
||||
obj-$(CONFIG_MYRI10GE) += myri10ge.o
|
2869
drivers/net/myri10ge/myri10ge.c
Normal file
2869
drivers/net/myri10ge/myri10ge.c
Normal file
File diff suppressed because it is too large
Load Diff
205
drivers/net/myri10ge/myri10ge_mcp.h
Normal file
205
drivers/net/myri10ge/myri10ge_mcp.h
Normal file
@ -0,0 +1,205 @@
|
||||
#ifndef __MYRI10GE_MCP_H__
|
||||
#define __MYRI10GE_MCP_H__
|
||||
|
||||
#define MXGEFW_VERSION_MAJOR 1
|
||||
#define MXGEFW_VERSION_MINOR 4
|
||||
|
||||
/* 8 Bytes */
|
||||
struct mcp_dma_addr {
|
||||
u32 high;
|
||||
u32 low;
|
||||
};
|
||||
|
||||
/* 4 Bytes */
|
||||
struct mcp_slot {
|
||||
u16 checksum;
|
||||
u16 length;
|
||||
};
|
||||
|
||||
/* 64 Bytes */
|
||||
struct mcp_cmd {
|
||||
u32 cmd;
|
||||
u32 data0; /* will be low portion if data > 32 bits */
|
||||
/* 8 */
|
||||
u32 data1; /* will be high portion if data > 32 bits */
|
||||
u32 data2; /* currently unused.. */
|
||||
/* 16 */
|
||||
struct mcp_dma_addr response_addr;
|
||||
/* 24 */
|
||||
u8 pad[40];
|
||||
};
|
||||
|
||||
/* 8 Bytes */
|
||||
struct mcp_cmd_response {
|
||||
u32 data;
|
||||
u32 result;
|
||||
};
|
||||
|
||||
/*
|
||||
* flags used in mcp_kreq_ether_send_t:
|
||||
*
|
||||
* The SMALL flag is only needed in the first segment. It is raised
|
||||
* for packets that are total less or equal 512 bytes.
|
||||
*
|
||||
* The CKSUM flag must be set in all segments.
|
||||
*
|
||||
* The PADDED flags is set if the packet needs to be padded, and it
|
||||
* must be set for all segments.
|
||||
*
|
||||
* The MXGEFW_FLAGS_ALIGN_ODD must be set if the cumulative
|
||||
* length of all previous segments was odd.
|
||||
*/
|
||||
|
||||
#define MXGEFW_FLAGS_SMALL 0x1
|
||||
#define MXGEFW_FLAGS_TSO_HDR 0x1
|
||||
#define MXGEFW_FLAGS_FIRST 0x2
|
||||
#define MXGEFW_FLAGS_ALIGN_ODD 0x4
|
||||
#define MXGEFW_FLAGS_CKSUM 0x8
|
||||
#define MXGEFW_FLAGS_TSO_LAST 0x8
|
||||
#define MXGEFW_FLAGS_NO_TSO 0x10
|
||||
#define MXGEFW_FLAGS_TSO_CHOP 0x10
|
||||
#define MXGEFW_FLAGS_TSO_PLD 0x20
|
||||
|
||||
#define MXGEFW_SEND_SMALL_SIZE 1520
|
||||
#define MXGEFW_MAX_MTU 9400
|
||||
|
||||
union mcp_pso_or_cumlen {
|
||||
u16 pseudo_hdr_offset;
|
||||
u16 cum_len;
|
||||
};
|
||||
|
||||
#define MXGEFW_MAX_SEND_DESC 12
|
||||
#define MXGEFW_PAD 2
|
||||
|
||||
/* 16 Bytes */
|
||||
struct mcp_kreq_ether_send {
|
||||
u32 addr_high;
|
||||
u32 addr_low;
|
||||
u16 pseudo_hdr_offset;
|
||||
u16 length;
|
||||
u8 pad;
|
||||
u8 rdma_count;
|
||||
u8 cksum_offset; /* where to start computing cksum */
|
||||
u8 flags; /* as defined above */
|
||||
};
|
||||
|
||||
/* 8 Bytes */
|
||||
struct mcp_kreq_ether_recv {
|
||||
u32 addr_high;
|
||||
u32 addr_low;
|
||||
};
|
||||
|
||||
/* Commands */
|
||||
|
||||
#define MXGEFW_CMD_OFFSET 0xf80000
|
||||
|
||||
enum myri10ge_mcp_cmd_type {
|
||||
MXGEFW_CMD_NONE = 0,
|
||||
/* Reset the mcp, it is left in a safe state, waiting
|
||||
* for the driver to set all its parameters */
|
||||
MXGEFW_CMD_RESET,
|
||||
|
||||
/* get the version number of the current firmware..
|
||||
* (may be available in the eeprom strings..? */
|
||||
MXGEFW_GET_MCP_VERSION,
|
||||
|
||||
/* Parameters which must be set by the driver before it can
|
||||
* issue MXGEFW_CMD_ETHERNET_UP. They persist until the next
|
||||
* MXGEFW_CMD_RESET is issued */
|
||||
|
||||
MXGEFW_CMD_SET_INTRQ_DMA,
|
||||
MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */
|
||||
MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */
|
||||
|
||||
/* Parameters which refer to lanai SRAM addresses where the
|
||||
* driver must issue PIO writes for various things */
|
||||
|
||||
MXGEFW_CMD_GET_SEND_OFFSET,
|
||||
MXGEFW_CMD_GET_SMALL_RX_OFFSET,
|
||||
MXGEFW_CMD_GET_BIG_RX_OFFSET,
|
||||
MXGEFW_CMD_GET_IRQ_ACK_OFFSET,
|
||||
MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
|
||||
|
||||
/* Parameters which refer to rings stored on the MCP,
|
||||
* and whose size is controlled by the mcp */
|
||||
|
||||
MXGEFW_CMD_GET_SEND_RING_SIZE, /* in bytes */
|
||||
MXGEFW_CMD_GET_RX_RING_SIZE, /* in bytes */
|
||||
|
||||
/* Parameters which refer to rings stored in the host,
|
||||
* and whose size is controlled by the host. Note that
|
||||
* all must be physically contiguous and must contain
|
||||
* a power of 2 number of entries. */
|
||||
|
||||
MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */
|
||||
|
||||
/* command to bring ethernet interface up. Above parameters
|
||||
* (plus mtu & mac address) must have been exchanged prior
|
||||
* to issuing this command */
|
||||
MXGEFW_CMD_ETHERNET_UP,
|
||||
|
||||
/* command to bring ethernet interface down. No further sends
|
||||
* or receives may be processed until an MXGEFW_CMD_ETHERNET_UP
|
||||
* is issued, and all interrupt queues must be flushed prior
|
||||
* to ack'ing this command */
|
||||
|
||||
MXGEFW_CMD_ETHERNET_DOWN,
|
||||
|
||||
/* commands the driver may issue live, without resetting
|
||||
* the nic. Note that increasing the mtu "live" should
|
||||
* only be done if the driver has already supplied buffers
|
||||
* sufficiently large to handle the new mtu. Decreasing
|
||||
* the mtu live is safe */
|
||||
|
||||
MXGEFW_CMD_SET_MTU,
|
||||
MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, /* in microseconds */
|
||||
MXGEFW_CMD_SET_STATS_INTERVAL, /* in microseconds */
|
||||
MXGEFW_CMD_SET_STATS_DMA,
|
||||
|
||||
MXGEFW_ENABLE_PROMISC,
|
||||
MXGEFW_DISABLE_PROMISC,
|
||||
MXGEFW_SET_MAC_ADDRESS,
|
||||
|
||||
MXGEFW_ENABLE_FLOW_CONTROL,
|
||||
MXGEFW_DISABLE_FLOW_CONTROL,
|
||||
|
||||
/* do a DMA test
|
||||
* data0,data1 = DMA address
|
||||
* data2 = RDMA length (MSH), WDMA length (LSH)
|
||||
* command return data = repetitions (MSH), 0.5-ms ticks (LSH)
|
||||
*/
|
||||
MXGEFW_DMA_TEST
|
||||
};
|
||||
|
||||
enum myri10ge_mcp_cmd_status {
|
||||
MXGEFW_CMD_OK = 0,
|
||||
MXGEFW_CMD_UNKNOWN,
|
||||
MXGEFW_CMD_ERROR_RANGE,
|
||||
MXGEFW_CMD_ERROR_BUSY,
|
||||
MXGEFW_CMD_ERROR_EMPTY,
|
||||
MXGEFW_CMD_ERROR_CLOSED,
|
||||
MXGEFW_CMD_ERROR_HASH_ERROR,
|
||||
MXGEFW_CMD_ERROR_BAD_PORT,
|
||||
MXGEFW_CMD_ERROR_RESOURCES
|
||||
};
|
||||
|
||||
/* 40 Bytes */
|
||||
struct mcp_irq_data {
|
||||
u32 send_done_count;
|
||||
|
||||
u32 link_up;
|
||||
u32 dropped_link_overflow;
|
||||
u32 dropped_link_error_or_filtered;
|
||||
u32 dropped_runt;
|
||||
u32 dropped_overrun;
|
||||
u32 dropped_no_small_buffer;
|
||||
u32 dropped_no_big_buffer;
|
||||
u32 rdma_tags_available;
|
||||
|
||||
u8 tx_stopped;
|
||||
u8 link_down;
|
||||
u8 stats_updated;
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
#endif /* __MYRI10GE_MCP_H__ */
|
58
drivers/net/myri10ge/myri10ge_mcp_gen_header.h
Normal file
58
drivers/net/myri10ge/myri10ge_mcp_gen_header.h
Normal file
@ -0,0 +1,58 @@
|
||||
#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
|
||||
#define __MYRI10GE_MCP_GEN_HEADER_H__
|
||||
|
||||
/* this file define a standard header used as a first entry point to
|
||||
* exchange information between firmware/driver and driver. The
|
||||
* header structure can be anywhere in the mcp. It will usually be in
|
||||
* the .data section, because some fields needs to be initialized at
|
||||
* compile time.
|
||||
* The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must
|
||||
* contains the location of the header.
|
||||
*
|
||||
* Typically a MCP will start with the following:
|
||||
* .text
|
||||
* .space 52 ! to help catch MEMORY_INT errors
|
||||
* bt start ! jump to real code
|
||||
* nop
|
||||
* .long _gen_mcp_header
|
||||
*
|
||||
* The source will have a definition like:
|
||||
*
|
||||
* mcp_gen_header_t gen_mcp_header = {
|
||||
* .header_length = sizeof(mcp_gen_header_t),
|
||||
* .mcp_type = MCP_TYPE_XXX,
|
||||
* .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $",
|
||||
* .mcp_globals = (unsigned)&Globals
|
||||
* };
|
||||
*/
|
||||
|
||||
#define MCP_HEADER_PTR_OFFSET 0x3c
|
||||
|
||||
#define MCP_TYPE_MX 0x4d582020 /* "MX " */
|
||||
#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */
|
||||
#define MCP_TYPE_ETH 0x45544820 /* "ETH " */
|
||||
#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */
|
||||
|
||||
struct mcp_gen_header {
|
||||
/* the first 4 fields are filled at compile time */
|
||||
unsigned header_length;
|
||||
unsigned mcp_type;
|
||||
char version[128];
|
||||
unsigned mcp_globals; /* pointer to mcp-type specific structure */
|
||||
|
||||
/* filled by the MCP at run-time */
|
||||
unsigned sram_size;
|
||||
unsigned string_specs; /* either the original STRING_SPECS or a superset */
|
||||
unsigned string_specs_len;
|
||||
|
||||
/* Fields above this comment are guaranteed to be present.
|
||||
*
|
||||
* Fields below this comment are extensions added in later versions
|
||||
* of this struct, drivers should compare the header_length against
|
||||
* offsetof(field) to check wether a given MCP implements them.
|
||||
*
|
||||
* Never remove any field. Keep everything naturally align.
|
||||
*/
|
||||
};
|
||||
|
||||
#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
|
@ -829,7 +829,7 @@ that the ne2k probe is the last 8390 based probe to take place (as it
|
||||
is at boot) and so the probe will get confused by any other 8390 cards.
|
||||
ISA device autoprobes on a running machine are not recommended anyway. */
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
int this_dev, found = 0;
|
||||
|
||||
|
@ -780,7 +780,7 @@ MODULE_PARM_DESC(bad, "(ignored)");
|
||||
|
||||
/* Module code fixed by David Weinehall */
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
struct net_device *dev;
|
||||
int this_dev, found = 0;
|
||||
|
@ -12,7 +12,7 @@
|
||||
Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
|
||||
|
||||
pcnet_cs.c 1.153 2003/11/09 18:53:09
|
||||
|
||||
|
||||
The network driver code is based on Donald Becker's NE2000 code:
|
||||
|
||||
Written 1992,1993 by Donald Becker.
|
||||
@ -146,7 +146,7 @@ typedef struct hw_info_t {
|
||||
#define MII_PHYID_REG2 0x03
|
||||
|
||||
static hw_info_t hw_info[] = {
|
||||
{ /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
|
||||
{ /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
|
||||
{ /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 },
|
||||
{ /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 },
|
||||
{ /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94,
|
||||
@ -193,7 +193,7 @@ static hw_info_t hw_info[] = {
|
||||
{ /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 },
|
||||
{ /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65,
|
||||
HAS_MISC_REG | HAS_IBM_MISC },
|
||||
{ /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
|
||||
{ /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
|
||||
HAS_MISC_REG | HAS_IBM_MISC },
|
||||
{ /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 },
|
||||
{ /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 },
|
||||
@ -330,7 +330,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
|
||||
for (j = 0; j < 6; j++)
|
||||
dev->dev_addr[j] = readb(base + (j<<1));
|
||||
}
|
||||
|
||||
|
||||
iounmap(virt);
|
||||
j = pcmcia_release_window(link->win);
|
||||
if (j != CS_SUCCESS)
|
||||
@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link)
|
||||
if (link->io.NumPorts2 > 0) {
|
||||
/* for master/slave multifunction cards */
|
||||
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
|
||||
link->irq.Attributes =
|
||||
link->irq.Attributes =
|
||||
IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
|
||||
}
|
||||
} else {
|
||||
@ -543,19 +543,19 @@ static int pcnet_config(struct pcmcia_device *link)
|
||||
manfid = le16_to_cpu(buf[0]);
|
||||
prodid = le16_to_cpu(buf[1]);
|
||||
}
|
||||
|
||||
|
||||
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
|
||||
tuple.Attributes = 0;
|
||||
CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
|
||||
while (last_ret == CS_SUCCESS) {
|
||||
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
|
||||
cistpl_io_t *io = &(parse.cftable_entry.io);
|
||||
|
||||
|
||||
if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
|
||||
pcmcia_parse_tuple(link, &tuple, &parse) != 0 ||
|
||||
cfg->index == 0 || cfg->io.nwin == 0)
|
||||
goto next_entry;
|
||||
|
||||
|
||||
link->conf.ConfigIndex = cfg->index;
|
||||
/* For multifunction cards, by convention, we configure the
|
||||
network function with window 0, and serial with window 1 */
|
||||
@ -584,7 +584,7 @@ static int pcnet_config(struct pcmcia_device *link)
|
||||
}
|
||||
|
||||
CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
|
||||
|
||||
|
||||
if (link->io.NumPorts2 == 8) {
|
||||
link->conf.Attributes |= CONF_ENABLE_SPKR;
|
||||
link->conf.Status = CCSR_AUDIO_ENA;
|
||||
@ -592,7 +592,7 @@ static int pcnet_config(struct pcmcia_device *link)
|
||||
if ((manfid == MANFID_IBM) &&
|
||||
(prodid == PRODID_IBM_HOME_AND_AWAY))
|
||||
link->conf.ConfigIndex |= 0x10;
|
||||
|
||||
|
||||
CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
|
||||
dev->irq = link->irq.AssignedIRQ;
|
||||
dev->base_addr = link->io.BasePort1;
|
||||
@ -614,7 +614,7 @@ static int pcnet_config(struct pcmcia_device *link)
|
||||
hw_info = get_ax88190(link);
|
||||
if (hw_info == NULL)
|
||||
hw_info = get_hwired(link);
|
||||
|
||||
|
||||
if (hw_info == NULL) {
|
||||
printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
|
||||
" address for io base %#3lx\n", dev->base_addr);
|
||||
@ -631,7 +631,7 @@ static int pcnet_config(struct pcmcia_device *link)
|
||||
info->flags &= ~USE_BIG_BUF;
|
||||
if (!use_big_buf)
|
||||
info->flags &= ~USE_BIG_BUF;
|
||||
|
||||
|
||||
if (info->flags & USE_BIG_BUF) {
|
||||
start_pg = SOCKET_START_PG;
|
||||
stop_pg = SOCKET_STOP_PG;
|
||||
@ -929,7 +929,7 @@ static void set_misc_reg(struct net_device *dev)
|
||||
kio_addr_t nic_base = dev->base_addr;
|
||||
pcnet_dev_t *info = PRIV(dev);
|
||||
u_char tmp;
|
||||
|
||||
|
||||
if (info->flags & HAS_MISC_REG) {
|
||||
tmp = inb_p(nic_base + PCNET_MISC) & ~3;
|
||||
if (dev->if_port == 2)
|
||||
@ -1022,7 +1022,7 @@ static int pcnet_close(struct net_device *dev)
|
||||
|
||||
ei_close(dev);
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
|
||||
link->open--;
|
||||
netif_stop_queue(dev);
|
||||
del_timer_sync(&info->watchdog);
|
||||
@ -1054,12 +1054,12 @@ static void pcnet_reset_8390(struct net_device *dev)
|
||||
udelay(100);
|
||||
}
|
||||
outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
|
||||
|
||||
|
||||
if (i == 100)
|
||||
printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
|
||||
dev->name);
|
||||
set_misc_reg(dev);
|
||||
|
||||
|
||||
} /* pcnet_reset_8390 */
|
||||
|
||||
/*====================================================================*/
|
||||
@ -1233,7 +1233,7 @@ static void dma_get_8390_hdr(struct net_device *dev,
|
||||
dev->name, ei_status.dmaing, ei_status.irqlock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
ei_status.dmaing |= 0x01;
|
||||
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
|
||||
outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
|
||||
@ -1458,7 +1458,7 @@ static void shmem_get_8390_hdr(struct net_device *dev,
|
||||
void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8)
|
||||
+ (ring_page << 8)
|
||||
- (ei_status.rx_start_page << 8);
|
||||
|
||||
|
||||
copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr));
|
||||
/* Fix for big endian systems */
|
||||
hdr->count = le16_to_cpu(hdr->count);
|
||||
@ -1473,7 +1473,7 @@ static void shmem_block_input(struct net_device *dev, int count,
|
||||
unsigned long offset = (TX_PAGES<<8) + ring_offset
|
||||
- (ei_status.rx_start_page << 8);
|
||||
char *buf = skb->data;
|
||||
|
||||
|
||||
if (offset + count > ei_status.priv) {
|
||||
/* We must wrap the input move. */
|
||||
int semi_count = ei_status.priv - offset;
|
||||
@ -1541,7 +1541,7 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
|
||||
info->base = NULL; link->win = NULL;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
|
||||
ei_status.mem = info->base + offset;
|
||||
ei_status.priv = req.Size;
|
||||
dev->mem_start = (u_long)ei_status.mem;
|
||||
@ -1768,6 +1768,8 @@ static struct pcmcia_device_id pcnet_ids[] = {
|
||||
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
|
||||
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
|
||||
PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"),
|
||||
PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
|
||||
0xb4be14e3, 0x43ac239b, 0x0877b627),
|
||||
PCMCIA_DEVICE_NULL
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
|
||||
|
@ -45,5 +45,11 @@ config CICADA_PHY
|
||||
---help---
|
||||
Currently supports the cis8204
|
||||
|
||||
config SMSC_PHY
|
||||
tristate "Drivers for SMSC PHYs"
|
||||
depends on PHYLIB
|
||||
---help---
|
||||
Currently supports the LAN83C185 PHY
|
||||
|
||||
endmenu
|
||||
|
||||
|
@ -8,3 +8,4 @@ obj-$(CONFIG_DAVICOM_PHY) += davicom.o
|
||||
obj-$(CONFIG_CICADA_PHY) += cicada.o
|
||||
obj-$(CONFIG_LXT_PHY) += lxt.o
|
||||
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
|
||||
obj-$(CONFIG_SMSC_PHY) += smsc.o
|
||||
|
101
drivers/net/phy/smsc.c
Normal file
101
drivers/net/phy/smsc.c
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* drivers/net/phy/smsc.c
|
||||
*
|
||||
* Driver for SMSC PHYs
|
||||
*
|
||||
* Author: Herbert Valerio Riedel
|
||||
*
|
||||
* Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
|
||||
#define MII_LAN83C185_IM 30 /* Interrupt Mask */
|
||||
|
||||
#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
|
||||
#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
|
||||
#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
|
||||
#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
|
||||
#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
|
||||
#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
|
||||
#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
|
||||
|
||||
#define MII_LAN83C185_ISF_INT_ALL (0x0e)
|
||||
|
||||
#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
|
||||
(MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
|
||||
|
||||
|
||||
static int lan83c185_config_intr(struct phy_device *phydev)
|
||||
{
|
||||
int rc = phy_write (phydev, MII_LAN83C185_IM,
|
||||
((PHY_INTERRUPT_ENABLED == phydev->interrupts)
|
||||
? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS
|
||||
: 0));
|
||||
|
||||
return rc < 0 ? rc : 0;
|
||||
}
|
||||
|
||||
static int lan83c185_ack_interrupt(struct phy_device *phydev)
|
||||
{
|
||||
int rc = phy_read (phydev, MII_LAN83C185_ISF);
|
||||
|
||||
return rc < 0 ? rc : 0;
|
||||
}
|
||||
|
||||
static int lan83c185_config_init(struct phy_device *phydev)
|
||||
{
|
||||
return lan83c185_ack_interrupt (phydev);
|
||||
}
|
||||
|
||||
|
||||
static struct phy_driver lan83c185_driver = {
|
||||
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
|
||||
.phy_id_mask = 0xfffffff0,
|
||||
.name = "SMSC LAN83C185",
|
||||
|
||||
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
|
||||
| SUPPORTED_Asym_Pause),
|
||||
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
|
||||
|
||||
/* basic functions */
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.config_init = lan83c185_config_init,
|
||||
|
||||
/* IRQ related */
|
||||
.ack_interrupt = lan83c185_ack_interrupt,
|
||||
.config_intr = lan83c185_config_intr,
|
||||
|
||||
.driver = { .owner = THIS_MODULE, }
|
||||
};
|
||||
|
||||
static int __init smsc_init(void)
|
||||
{
|
||||
return phy_driver_register (&lan83c185_driver);
|
||||
}
|
||||
|
||||
static void __exit smsc_exit(void)
|
||||
{
|
||||
phy_driver_unregister (&lan83c185_driver);
|
||||
}
|
||||
|
||||
MODULE_DESCRIPTION("SMSC PHY driver");
|
||||
MODULE_AUTHOR("Herbert Valerio Riedel");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(smsc_init);
|
||||
module_exit(smsc_exit);
|
@ -184,6 +184,7 @@ static const struct {
|
||||
|
||||
static struct pci_device_id rtl8169_pci_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
|
||||
{ PCI_DEVICE(0x16ec, 0x0116), },
|
||||
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, },
|
||||
|
@ -167,6 +167,7 @@ typedef struct _XENA_dev_config {
|
||||
u8 unused4[0x08];
|
||||
|
||||
u64 gpio_int_reg;
|
||||
#define GPIO_INT_REG_DP_ERR_INT BIT(0)
|
||||
#define GPIO_INT_REG_LINK_DOWN BIT(1)
|
||||
#define GPIO_INT_REG_LINK_UP BIT(2)
|
||||
u64 gpio_int_mask;
|
||||
@ -187,7 +188,7 @@ typedef struct _XENA_dev_config {
|
||||
/* PIC Control registers */
|
||||
u64 pic_control;
|
||||
#define PIC_CNTL_RX_ALARM_MAP_1 BIT(0)
|
||||
#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,4)
|
||||
#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5)
|
||||
|
||||
u64 swapper_ctrl;
|
||||
#define SWAPPER_CTRL_PIF_R_FE BIT(0)
|
||||
@ -267,6 +268,21 @@ typedef struct _XENA_dev_config {
|
||||
|
||||
/* General Configuration */
|
||||
u64 mdio_control;
|
||||
#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16)
|
||||
#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5)
|
||||
#define MDIO_MMD_PMA_DEV_ADDR 0x1
|
||||
#define MDIO_MMD_PMD_DEV_ADDR 0x1
|
||||
#define MDIO_MMD_WIS_DEV_ADDR 0x2
|
||||
#define MDIO_MMD_PCS_DEV_ADDR 0x3
|
||||
#define MDIO_MMD_PHYXS_DEV_ADDR 0x4
|
||||
#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5)
|
||||
#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4)
|
||||
#define MDIO_OP(val) vBIT(val, 60, 2)
|
||||
#define MDIO_OP_ADDR_TRANS 0x0
|
||||
#define MDIO_OP_WRITE_TRANS 0x1
|
||||
#define MDIO_OP_READ_POST_INC_TRANS 0x2
|
||||
#define MDIO_OP_READ_TRANS 0x3
|
||||
#define MDIO_MDIO_DATA(val) vBIT(val, 32, 16)
|
||||
|
||||
u64 dtx_control;
|
||||
|
||||
@ -284,9 +300,13 @@ typedef struct _XENA_dev_config {
|
||||
u64 gpio_control;
|
||||
#define GPIO_CTRL_GPIO_0 BIT(8)
|
||||
u64 misc_control;
|
||||
#define EXT_REQ_EN BIT(1)
|
||||
#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
|
||||
|
||||
u8 unused7_1[0x240 - 0x208];
|
||||
u8 unused7_1[0x230 - 0x208];
|
||||
|
||||
u64 pic_control2;
|
||||
u64 ini_dperr_ctrl;
|
||||
|
||||
u64 wreq_split_mask;
|
||||
#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
|
||||
@ -493,6 +513,7 @@ typedef struct _XENA_dev_config {
|
||||
#define PRC_CTRL_NO_SNOOP_DESC BIT(22)
|
||||
#define PRC_CTRL_NO_SNOOP_BUFF BIT(23)
|
||||
#define PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
|
||||
#define PRC_CTRL_GROUP_READS BIT(38)
|
||||
#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
|
||||
|
||||
u64 prc_alarm_action;
|
||||
@ -541,7 +562,12 @@ typedef struct _XENA_dev_config {
|
||||
#define RX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
|
||||
#define RX_PA_CFG_IGNORE_L2_ERR BIT(6)
|
||||
|
||||
u8 unused12[0x700 - 0x1D8];
|
||||
u64 unused_11_1;
|
||||
|
||||
u64 ring_bump_counter1;
|
||||
u64 ring_bump_counter2;
|
||||
|
||||
u8 unused12[0x700 - 0x1F0];
|
||||
|
||||
u64 rxdma_debug_ctrl;
|
||||
|
||||
|
1478
drivers/net/s2io.c
1478
drivers/net/s2io.c
File diff suppressed because it is too large
Load Diff
@ -31,6 +31,8 @@
|
||||
#define SUCCESS 0
|
||||
#define FAILURE -1
|
||||
|
||||
#define CHECKBIT(value, nbit) (value & (1 << nbit))
|
||||
|
||||
/* Maximum time to flicker LED when asked to identify NIC using ethtool */
|
||||
#define MAX_FLICKER_TIME 60000 /* 60 Secs */
|
||||
|
||||
@ -78,6 +80,11 @@ static int debug_level = ERR_DBG;
|
||||
typedef struct {
|
||||
unsigned long long single_ecc_errs;
|
||||
unsigned long long double_ecc_errs;
|
||||
unsigned long long parity_err_cnt;
|
||||
unsigned long long serious_err_cnt;
|
||||
unsigned long long soft_reset_cnt;
|
||||
unsigned long long fifo_full_cnt;
|
||||
unsigned long long ring_full_cnt;
|
||||
/* LRO statistics */
|
||||
unsigned long long clubbed_frms_cnt;
|
||||
unsigned long long sending_both;
|
||||
@ -87,6 +94,25 @@ typedef struct {
|
||||
unsigned long long num_aggregations;
|
||||
} swStat_t;
|
||||
|
||||
/* Xpak releated alarm and warnings */
|
||||
typedef struct {
|
||||
u64 alarm_transceiver_temp_high;
|
||||
u64 alarm_transceiver_temp_low;
|
||||
u64 alarm_laser_bias_current_high;
|
||||
u64 alarm_laser_bias_current_low;
|
||||
u64 alarm_laser_output_power_high;
|
||||
u64 alarm_laser_output_power_low;
|
||||
u64 warn_transceiver_temp_high;
|
||||
u64 warn_transceiver_temp_low;
|
||||
u64 warn_laser_bias_current_high;
|
||||
u64 warn_laser_bias_current_low;
|
||||
u64 warn_laser_output_power_high;
|
||||
u64 warn_laser_output_power_low;
|
||||
u64 xpak_regs_stat;
|
||||
u32 xpak_timer_count;
|
||||
} xpakStat_t;
|
||||
|
||||
|
||||
/* The statistics block of Xena */
|
||||
typedef struct stat_block {
|
||||
/* Tx MAC statistics counters. */
|
||||
@ -263,7 +289,9 @@ typedef struct stat_block {
|
||||
u32 rmac_accepted_ip_oflow;
|
||||
u32 reserved_14;
|
||||
u32 link_fault_cnt;
|
||||
u8 buffer[20];
|
||||
swStat_t sw_stat;
|
||||
xpakStat_t xpak_stat;
|
||||
} StatInfo_t;
|
||||
|
||||
/*
|
||||
@ -659,7 +687,8 @@ typedef struct {
|
||||
} usr_addr_t;
|
||||
|
||||
/* Default Tunable parameters of the NIC. */
|
||||
#define DEFAULT_FIFO_LEN 4096
|
||||
#define DEFAULT_FIFO_0_LEN 4096
|
||||
#define DEFAULT_FIFO_1_7_LEN 512
|
||||
#define SMALL_BLK_CNT 30
|
||||
#define LARGE_BLK_CNT 100
|
||||
|
||||
@ -732,7 +761,7 @@ struct s2io_nic {
|
||||
int device_close_flag;
|
||||
int device_enabled_once;
|
||||
|
||||
char name[50];
|
||||
char name[60];
|
||||
struct tasklet_struct task;
|
||||
volatile unsigned long tasklet_status;
|
||||
|
||||
@ -803,6 +832,8 @@ struct s2io_nic {
|
||||
char desc1[35];
|
||||
char desc2[35];
|
||||
|
||||
int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
|
||||
|
||||
struct msix_info_st msix_info[0x3f];
|
||||
|
||||
#define XFRAME_I_DEVICE 1
|
||||
@ -824,6 +855,8 @@ struct s2io_nic {
|
||||
spinlock_t rx_lock;
|
||||
atomic_t isr_cnt;
|
||||
u64 *ufo_in_band_v;
|
||||
#define VPD_PRODUCT_NAME_LEN 50
|
||||
u8 product_name[VPD_PRODUCT_NAME_LEN];
|
||||
};
|
||||
|
||||
#define RESET_ERROR 1;
|
||||
@ -848,28 +881,32 @@ static inline void writeq(u64 val, void __iomem *addr)
|
||||
writel((u32) (val), addr);
|
||||
writel((u32) (val >> 32), (addr + 4));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* In 32 bit modes, some registers have to be written in a
|
||||
* particular order to expect correct hardware operation. The
|
||||
* macro SPECIAL_REG_WRITE is used to perform such ordered
|
||||
* writes. Defines UF (Upper First) and LF (Lower First) will
|
||||
* be used to specify the required write order.
|
||||
/*
|
||||
* Some registers have to be written in a particular order to
|
||||
* expect correct hardware operation. The macro SPECIAL_REG_WRITE
|
||||
* is used to perform such ordered writes. Defines UF (Upper First)
|
||||
* and LF (Lower First) will be used to specify the required write order.
|
||||
*/
|
||||
#define UF 1
|
||||
#define LF 2
|
||||
static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
|
||||
{
|
||||
u32 ret;
|
||||
|
||||
if (order == LF) {
|
||||
writel((u32) (val), addr);
|
||||
ret = readl(addr);
|
||||
writel((u32) (val >> 32), (addr + 4));
|
||||
ret = readl(addr + 4);
|
||||
} else {
|
||||
writel((u32) (val >> 32), (addr + 4));
|
||||
ret = readl(addr + 4);
|
||||
writel((u32) (val), addr);
|
||||
ret = readl(addr);
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define SPECIAL_REG_WRITE(val, addr, dummy) writeq(val, addr)
|
||||
#endif
|
||||
|
||||
/* Interrupt related values of Xena */
|
||||
|
||||
@ -965,7 +1002,7 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
|
||||
static struct ethtool_ops netdev_ethtool_ops;
|
||||
static void s2io_set_link(unsigned long data);
|
||||
static int s2io_set_swapper(nic_t * sp);
|
||||
static void s2io_card_down(nic_t *nic);
|
||||
static void s2io_card_down(nic_t *nic, int flag);
|
||||
static int s2io_card_up(nic_t *nic);
|
||||
static int get_xena_rev_id(struct pci_dev *pdev);
|
||||
static void restore_xmsi_data(nic_t *nic);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
|
||||
Copyright 1999 Silicon Integrated System Corporation
|
||||
Revision: 1.08.09 Sep. 19 2005
|
||||
Revision: 1.08.10 Apr. 2 2006
|
||||
|
||||
Modified from the driver which is originally written by Donald Becker.
|
||||
|
||||
@ -17,9 +17,10 @@
|
||||
SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
|
||||
preliminary Rev. 1.0 Jan. 18, 1998
|
||||
|
||||
Rev 1.08.10 Apr. 2 2006 Daniele Venzano add vlan (jumbo packets) support
|
||||
Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
|
||||
Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
|
||||
Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support
|
||||
Rev 1.08.07 Nov. 2 2003 Daniele Venzano <venza@brownhat.org> add suspend/resume support
|
||||
Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
|
||||
Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary
|
||||
Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support
|
||||
@ -77,7 +78,7 @@
|
||||
#include "sis900.h"
|
||||
|
||||
#define SIS900_MODULE_NAME "sis900"
|
||||
#define SIS900_DRV_VERSION "v1.08.09 Sep. 19 2005"
|
||||
#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
|
||||
|
||||
static char version[] __devinitdata =
|
||||
KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
|
||||
@ -1402,6 +1403,11 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex)
|
||||
rx_flags |= RxATX;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||
/* Can accept Jumbo packet */
|
||||
rx_flags |= RxAJAB;
|
||||
#endif
|
||||
|
||||
outl (tx_flags, ioaddr + txcfg);
|
||||
outl (rx_flags, ioaddr + rxcfg);
|
||||
}
|
||||
@ -1714,18 +1720,26 @@ static int sis900_rx(struct net_device *net_dev)
|
||||
|
||||
while (rx_status & OWN) {
|
||||
unsigned int rx_size;
|
||||
unsigned int data_size;
|
||||
|
||||
if (--rx_work_limit < 0)
|
||||
break;
|
||||
|
||||
rx_size = (rx_status & DSIZE) - CRC_SIZE;
|
||||
data_size = rx_status & DSIZE;
|
||||
rx_size = data_size - CRC_SIZE;
|
||||
|
||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||
/* ``TOOLONG'' flag means jumbo packet recived. */
|
||||
if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
|
||||
rx_status &= (~ ((unsigned int)TOOLONG));
|
||||
#endif
|
||||
|
||||
if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
|
||||
/* corrupted packet received */
|
||||
if (netif_msg_rx_err(sis_priv))
|
||||
printk(KERN_DEBUG "%s: Corrupted packet "
|
||||
"received, buffer status = 0x%8.8x.\n",
|
||||
net_dev->name, rx_status);
|
||||
"received, buffer status = 0x%8.8x/%d.\n",
|
||||
net_dev->name, rx_status, data_size);
|
||||
sis_priv->stats.rx_errors++;
|
||||
if (rx_status & OVERRUN)
|
||||
sis_priv->stats.rx_over_errors++;
|
||||
|
@ -310,8 +310,14 @@ enum sis630_revision_id {
|
||||
#define CRC_SIZE 4
|
||||
#define MAC_HEADER_SIZE 14
|
||||
|
||||
#define TX_BUF_SIZE 1536
|
||||
#define RX_BUF_SIZE 1536
|
||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||
#define MAX_FRAME_SIZE (1518 + 4)
|
||||
#else
|
||||
#define MAX_FRAME_SIZE 1518
|
||||
#endif /* CONFIG_VLAN_802_1Q */
|
||||
|
||||
#define TX_BUF_SIZE (MAX_FRAME_SIZE+18)
|
||||
#define RX_BUF_SIZE (MAX_FRAME_SIZE+18)
|
||||
|
||||
#define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */
|
||||
#define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */
|
||||
|
@ -44,12 +44,13 @@
|
||||
#include "skge.h"
|
||||
|
||||
#define DRV_NAME "skge"
|
||||
#define DRV_VERSION "1.5"
|
||||
#define DRV_VERSION "1.6"
|
||||
#define PFX DRV_NAME " "
|
||||
|
||||
#define DEFAULT_TX_RING_SIZE 128
|
||||
#define DEFAULT_RX_RING_SIZE 512
|
||||
#define MAX_TX_RING_SIZE 1024
|
||||
#define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
|
||||
#define MAX_RX_RING_SIZE 4096
|
||||
#define RX_COPY_THRESHOLD 128
|
||||
#define RX_BUF_SIZE 1536
|
||||
@ -401,7 +402,7 @@ static int skge_set_ring_param(struct net_device *dev,
|
||||
int err;
|
||||
|
||||
if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
|
||||
p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE)
|
||||
p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
skge->rx_ring.count = p->rx_pending;
|
||||
@ -603,7 +604,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
|
||||
struct skge_hw *hw = skge->hw;
|
||||
int port = skge->port;
|
||||
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
mutex_lock(&hw->phy_mutex);
|
||||
if (hw->chip_id == CHIP_ID_GENESIS) {
|
||||
switch (mode) {
|
||||
case LED_MODE_OFF:
|
||||
@ -663,7 +664,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
|
||||
PHY_M_LED_MO_RX(MO_LED_ON));
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
mutex_unlock(&hw->phy_mutex);
|
||||
}
|
||||
|
||||
/* blink LED's for finding board */
|
||||
@ -2038,7 +2039,7 @@ static void skge_phy_reset(struct skge_port *skge)
|
||||
netif_stop_queue(skge->netdev);
|
||||
netif_carrier_off(skge->netdev);
|
||||
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
mutex_lock(&hw->phy_mutex);
|
||||
if (hw->chip_id == CHIP_ID_GENESIS) {
|
||||
genesis_reset(hw, port);
|
||||
genesis_mac_init(hw, port);
|
||||
@ -2046,7 +2047,7 @@ static void skge_phy_reset(struct skge_port *skge)
|
||||
yukon_reset(hw, port);
|
||||
yukon_init(hw, port);
|
||||
}
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
mutex_unlock(&hw->phy_mutex);
|
||||
}
|
||||
|
||||
/* Basic MII support */
|
||||
@ -2067,12 +2068,12 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
/* fallthru */
|
||||
case SIOCGMIIREG: {
|
||||
u16 val = 0;
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
mutex_lock(&hw->phy_mutex);
|
||||
if (hw->chip_id == CHIP_ID_GENESIS)
|
||||
err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
|
||||
else
|
||||
err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
mutex_unlock(&hw->phy_mutex);
|
||||
data->val_out = val;
|
||||
break;
|
||||
}
|
||||
@ -2081,14 +2082,14 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
mutex_lock(&hw->phy_mutex);
|
||||
if (hw->chip_id == CHIP_ID_GENESIS)
|
||||
err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
|
||||
data->val_in);
|
||||
else
|
||||
err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
|
||||
data->val_in);
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
mutex_unlock(&hw->phy_mutex);
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
@ -2191,12 +2192,12 @@ static int skge_up(struct net_device *dev)
|
||||
goto free_rx_ring;
|
||||
|
||||
/* Initialize MAC */
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
mutex_lock(&hw->phy_mutex);
|
||||
if (hw->chip_id == CHIP_ID_GENESIS)
|
||||
genesis_mac_init(hw, port);
|
||||
else
|
||||
yukon_mac_init(hw, port);
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
mutex_unlock(&hw->phy_mutex);
|
||||
|
||||
/* Configure RAMbuffers */
|
||||
chunk = hw->ram_size / ((hw->ports + 1)*2);
|
||||
@ -2302,21 +2303,20 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct skge_port *skge = netdev_priv(dev);
|
||||
struct skge_hw *hw = skge->hw;
|
||||
struct skge_ring *ring = &skge->tx_ring;
|
||||
struct skge_element *e;
|
||||
struct skge_tx_desc *td;
|
||||
int i;
|
||||
u32 control, len;
|
||||
u64 map;
|
||||
unsigned long flags;
|
||||
|
||||
skb = skb_padto(skb, ETH_ZLEN);
|
||||
if (!skb)
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
if (!spin_trylock(&skge->tx_lock)) {
|
||||
if (!spin_trylock_irqsave(&skge->tx_lock, flags))
|
||||
/* Collision - tell upper layer to requeue */
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
|
||||
if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
|
||||
if (!netif_queue_stopped(dev)) {
|
||||
@ -2325,12 +2325,13 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
|
||||
dev->name);
|
||||
}
|
||||
spin_unlock(&skge->tx_lock);
|
||||
spin_unlock_irqrestore(&skge->tx_lock, flags);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
e = ring->to_use;
|
||||
e = skge->tx_ring.to_use;
|
||||
td = e->desc;
|
||||
BUG_ON(td->control & BMU_OWN);
|
||||
e->skb = skb;
|
||||
len = skb_headlen(skb);
|
||||
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
||||
@ -2371,8 +2372,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
frag->size, PCI_DMA_TODEVICE);
|
||||
|
||||
e = e->next;
|
||||
e->skb = NULL;
|
||||
e->skb = skb;
|
||||
tf = e->desc;
|
||||
BUG_ON(tf->control & BMU_OWN);
|
||||
|
||||
tf->dma_lo = map;
|
||||
tf->dma_hi = (u64) map >> 32;
|
||||
pci_unmap_addr_set(e, mapaddr, map);
|
||||
@ -2389,56 +2392,68 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
|
||||
|
||||
if (netif_msg_tx_queued(skge))
|
||||
if (unlikely(netif_msg_tx_queued(skge)))
|
||||
printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
|
||||
dev->name, e - ring->start, skb->len);
|
||||
dev->name, e - skge->tx_ring.start, skb->len);
|
||||
|
||||
ring->to_use = e->next;
|
||||
if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
|
||||
skge->tx_ring.to_use = e->next;
|
||||
if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
|
||||
pr_debug("%s: transmit queue full\n", dev->name);
|
||||
netif_stop_queue(dev);
|
||||
}
|
||||
|
||||
mmiowb();
|
||||
spin_unlock(&skge->tx_lock);
|
||||
spin_unlock_irqrestore(&skge->tx_lock, flags);
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
|
||||
|
||||
/* Free resources associated with this reing element */
|
||||
static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
|
||||
u32 control)
|
||||
{
|
||||
struct pci_dev *pdev = skge->hw->pdev;
|
||||
struct skge_element *e;
|
||||
|
||||
for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
|
||||
struct sk_buff *skb = e->skb;
|
||||
int i;
|
||||
BUG_ON(!e->skb);
|
||||
|
||||
e->skb = NULL;
|
||||
/* skb header vs. fragment */
|
||||
if (control & BMU_STF)
|
||||
pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
|
||||
skb_headlen(skb), PCI_DMA_TODEVICE);
|
||||
pci_unmap_len(e, maplen),
|
||||
PCI_DMA_TODEVICE);
|
||||
else
|
||||
pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
|
||||
pci_unmap_len(e, maplen),
|
||||
PCI_DMA_TODEVICE);
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
e = e->next;
|
||||
pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
|
||||
skb_shinfo(skb)->frags[i].size,
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
if (control & BMU_EOF) {
|
||||
if (unlikely(netif_msg_tx_done(skge)))
|
||||
printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
|
||||
skge->netdev->name, e - skge->tx_ring.start);
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_any(e->skb);
|
||||
}
|
||||
skge->tx_ring.to_clean = e;
|
||||
e->skb = NULL;
|
||||
}
|
||||
|
||||
/* Free all buffers in transmit ring */
|
||||
static void skge_tx_clean(struct skge_port *skge)
|
||||
{
|
||||
struct skge_element *e;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_bh(&skge->tx_lock);
|
||||
skge_tx_complete(skge, skge->tx_ring.to_use);
|
||||
spin_lock_irqsave(&skge->tx_lock, flags);
|
||||
for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
|
||||
struct skge_tx_desc *td = e->desc;
|
||||
skge_tx_free(skge, e, td->control);
|
||||
td->control = 0;
|
||||
}
|
||||
|
||||
skge->tx_ring.to_clean = e;
|
||||
netif_wake_queue(skge->netdev);
|
||||
spin_unlock_bh(&skge->tx_lock);
|
||||
spin_unlock_irqrestore(&skge->tx_lock, flags);
|
||||
}
|
||||
|
||||
static void skge_tx_timeout(struct net_device *dev)
|
||||
@ -2664,32 +2679,28 @@ resubmit:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void skge_tx_done(struct skge_port *skge)
|
||||
/* Free all buffers in Tx ring which are no longer owned by device */
|
||||
static void skge_txirq(struct net_device *dev)
|
||||
{
|
||||
struct skge_port *skge = netdev_priv(dev);
|
||||
struct skge_ring *ring = &skge->tx_ring;
|
||||
struct skge_element *e, *last;
|
||||
struct skge_element *e;
|
||||
|
||||
rmb();
|
||||
|
||||
spin_lock(&skge->tx_lock);
|
||||
last = ring->to_clean;
|
||||
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
|
||||
struct skge_tx_desc *td = e->desc;
|
||||
|
||||
if (td->control & BMU_OWN)
|
||||
break;
|
||||
|
||||
if (td->control & BMU_EOF) {
|
||||
last = e->next;
|
||||
if (unlikely(netif_msg_tx_done(skge)))
|
||||
printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
|
||||
skge->netdev->name, e - ring->start);
|
||||
}
|
||||
skge_tx_free(skge, e, td->control);
|
||||
}
|
||||
skge->tx_ring.to_clean = e;
|
||||
|
||||
skge_tx_complete(skge, last);
|
||||
|
||||
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
|
||||
|
||||
if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
|
||||
if (netif_queue_stopped(skge->netdev)
|
||||
&& skge_avail(&skge->tx_ring) > TX_LOW_WATER)
|
||||
netif_wake_queue(skge->netdev);
|
||||
|
||||
spin_unlock(&skge->tx_lock);
|
||||
@ -2704,8 +2715,6 @@ static int skge_poll(struct net_device *dev, int *budget)
|
||||
int to_do = min(dev->quota, *budget);
|
||||
int work_done = 0;
|
||||
|
||||
skge_tx_done(skge);
|
||||
|
||||
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
|
||||
struct skge_rx_desc *rd = e->desc;
|
||||
struct sk_buff *skb;
|
||||
@ -2737,10 +2746,12 @@ static int skge_poll(struct net_device *dev, int *budget)
|
||||
return 1; /* not done */
|
||||
|
||||
netif_rx_complete(dev);
|
||||
mmiowb();
|
||||
|
||||
hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask |= rxirqmask[skge->port];
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
mmiowb();
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2847,16 +2858,16 @@ static void skge_error_irq(struct skge_hw *hw)
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt from PHY are handled in tasklet (soft irq)
|
||||
* Interrupt from PHY are handled in work queue
|
||||
* because accessing phy registers requires spin wait which might
|
||||
* cause excess interrupt latency.
|
||||
*/
|
||||
static void skge_extirq(unsigned long data)
|
||||
static void skge_extirq(void *arg)
|
||||
{
|
||||
struct skge_hw *hw = (struct skge_hw *) data;
|
||||
struct skge_hw *hw = arg;
|
||||
int port;
|
||||
|
||||
spin_lock(&hw->phy_lock);
|
||||
mutex_lock(&hw->phy_mutex);
|
||||
for (port = 0; port < hw->ports; port++) {
|
||||
struct net_device *dev = hw->dev[port];
|
||||
struct skge_port *skge = netdev_priv(dev);
|
||||
@ -2868,10 +2879,12 @@ static void skge_extirq(unsigned long data)
|
||||
bcom_phy_intr(skge);
|
||||
}
|
||||
}
|
||||
spin_unlock(&hw->phy_lock);
|
||||
mutex_unlock(&hw->phy_mutex);
|
||||
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask |= IS_EXT_REG;
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
}
|
||||
|
||||
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||
@ -2884,54 +2897,68 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||
if (status == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
spin_lock(&hw->hw_lock);
|
||||
status &= hw->intr_mask;
|
||||
if (status & IS_EXT_REG) {
|
||||
hw->intr_mask &= ~IS_EXT_REG;
|
||||
tasklet_schedule(&hw->ext_tasklet);
|
||||
schedule_work(&hw->phy_work);
|
||||
}
|
||||
|
||||
if (status & (IS_R1_F|IS_XA1_F)) {
|
||||
if (status & IS_XA1_F) {
|
||||
skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
|
||||
skge_txirq(hw->dev[0]);
|
||||
}
|
||||
|
||||
if (status & IS_R1_F) {
|
||||
skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
|
||||
hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
|
||||
hw->intr_mask &= ~IS_R1_F;
|
||||
netif_rx_schedule(hw->dev[0]);
|
||||
}
|
||||
|
||||
if (status & (IS_R2_F|IS_XA2_F)) {
|
||||
skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
|
||||
hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
|
||||
netif_rx_schedule(hw->dev[1]);
|
||||
}
|
||||
|
||||
if (likely((status & hw->intr_mask) == 0))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
if (status & IS_PA_TO_RX1) {
|
||||
struct skge_port *skge = netdev_priv(hw->dev[0]);
|
||||
++skge->net_stats.rx_over_errors;
|
||||
skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
|
||||
}
|
||||
|
||||
if (status & IS_PA_TO_RX2) {
|
||||
struct skge_port *skge = netdev_priv(hw->dev[1]);
|
||||
++skge->net_stats.rx_over_errors;
|
||||
skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
|
||||
}
|
||||
|
||||
if (status & IS_PA_TO_TX1)
|
||||
skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
|
||||
|
||||
if (status & IS_PA_TO_TX2)
|
||||
skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
|
||||
if (status & IS_PA_TO_RX1) {
|
||||
struct skge_port *skge = netdev_priv(hw->dev[0]);
|
||||
|
||||
++skge->net_stats.rx_over_errors;
|
||||
skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
|
||||
}
|
||||
|
||||
|
||||
if (status & IS_MAC1)
|
||||
skge_mac_intr(hw, 0);
|
||||
|
||||
if (status & IS_MAC2)
|
||||
skge_mac_intr(hw, 1);
|
||||
if (hw->dev[1]) {
|
||||
if (status & IS_XA2_F) {
|
||||
skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
|
||||
skge_txirq(hw->dev[1]);
|
||||
}
|
||||
|
||||
if (status & IS_R2_F) {
|
||||
skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
|
||||
hw->intr_mask &= ~IS_R2_F;
|
||||
netif_rx_schedule(hw->dev[1]);
|
||||
}
|
||||
|
||||
if (status & IS_PA_TO_RX2) {
|
||||
struct skge_port *skge = netdev_priv(hw->dev[1]);
|
||||
++skge->net_stats.rx_over_errors;
|
||||
skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
|
||||
}
|
||||
|
||||
if (status & IS_PA_TO_TX2)
|
||||
skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
|
||||
|
||||
if (status & IS_MAC2)
|
||||
skge_mac_intr(hw, 1);
|
||||
}
|
||||
|
||||
if (status & IS_HW_ERR)
|
||||
skge_error_irq(hw);
|
||||
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
spin_unlock(&hw->hw_lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -2957,7 +2984,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
|
||||
if (!is_valid_ether_addr(addr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
mutex_lock(&hw->phy_mutex);
|
||||
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
|
||||
memcpy_toio(hw->regs + B2_MAC_1 + port*8,
|
||||
dev->dev_addr, ETH_ALEN);
|
||||
@ -2970,7 +2997,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
|
||||
gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
|
||||
gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
|
||||
}
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
mutex_unlock(&hw->phy_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3082,6 +3109,7 @@ static int skge_reset(struct skge_hw *hw)
|
||||
else
|
||||
hw->ram_size = t8 * 4096;
|
||||
|
||||
spin_lock_init(&hw->hw_lock);
|
||||
hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
|
||||
if (hw->ports > 1)
|
||||
hw->intr_mask |= IS_PORT_2;
|
||||
@ -3150,14 +3178,14 @@ static int skge_reset(struct skge_hw *hw)
|
||||
|
||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
mutex_lock(&hw->phy_mutex);
|
||||
for (i = 0; i < hw->ports; i++) {
|
||||
if (hw->chip_id == CHIP_ID_GENESIS)
|
||||
genesis_reset(hw, i);
|
||||
else
|
||||
yukon_reset(hw, i);
|
||||
}
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
mutex_unlock(&hw->phy_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3305,8 +3333,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
hw->pdev = pdev;
|
||||
spin_lock_init(&hw->phy_lock);
|
||||
tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
|
||||
mutex_init(&hw->phy_mutex);
|
||||
INIT_WORK(&hw->phy_work, skge_extirq, hw);
|
||||
|
||||
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
|
||||
if (!hw->regs) {
|
||||
@ -3334,6 +3362,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
|
||||
if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
|
||||
goto err_out_led_off;
|
||||
|
||||
if (!is_valid_ether_addr(dev->dev_addr)) {
|
||||
printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n",
|
||||
pci_name(pdev));
|
||||
err = -EIO;
|
||||
goto err_out_free_netdev;
|
||||
}
|
||||
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR PFX "%s: cannot register net device\n",
|
||||
@ -3388,11 +3424,15 @@ static void __devexit skge_remove(struct pci_dev *pdev)
|
||||
dev0 = hw->dev[0];
|
||||
unregister_netdev(dev0);
|
||||
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask = 0;
|
||||
skge_write32(hw, B0_IMSK, 0);
|
||||
spin_unlock_irq(&hw->hw_lock);
|
||||
|
||||
skge_write16(hw, B0_LED, LED_STAT_OFF);
|
||||
skge_write8(hw, B0_CTST, CS_RST_SET);
|
||||
|
||||
tasklet_kill(&hw->ext_tasklet);
|
||||
flush_scheduled_work();
|
||||
|
||||
free_irq(pdev->irq, hw);
|
||||
pci_release_regions(pdev);
|
||||
|
@ -2388,6 +2388,7 @@ struct skge_ring {
|
||||
struct skge_hw {
|
||||
void __iomem *regs;
|
||||
struct pci_dev *pdev;
|
||||
spinlock_t hw_lock;
|
||||
u32 intr_mask;
|
||||
struct net_device *dev[2];
|
||||
|
||||
@ -2399,9 +2400,8 @@ struct skge_hw {
|
||||
u32 ram_size;
|
||||
u32 ram_offset;
|
||||
u16 phy_addr;
|
||||
|
||||
struct tasklet_struct ext_tasklet;
|
||||
spinlock_t phy_lock;
|
||||
struct work_struct phy_work;
|
||||
struct mutex phy_mutex;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -553,7 +553,7 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
/* This is set up so that only a single autoprobe takes place per call.
|
||||
ISA device autoprobes on a running machine are not recommended. */
|
||||
int
|
||||
int __init
|
||||
init_module(void)
|
||||
{
|
||||
struct net_device *dev;
|
||||
|
@ -421,7 +421,7 @@ static struct net_device *dev_ultra[MAX_ULTRA32_CARDS];
|
||||
MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
int this_dev, found = 0;
|
||||
|
||||
|
2307
drivers/net/smc911x.c
Normal file
2307
drivers/net/smc911x.c
Normal file
File diff suppressed because it is too large
Load Diff
835
drivers/net/smc911x.h
Normal file
835
drivers/net/smc911x.h
Normal file
@ -0,0 +1,835 @@
|
||||
/*------------------------------------------------------------------------
|
||||
. smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device.
|
||||
.
|
||||
. Copyright (C) 2005 Sensoria Corp.
|
||||
. Derived from the unified SMC91x driver by Nicolas Pitre
|
||||
.
|
||||
. This program is free software; you can redistribute it and/or modify
|
||||
. it under the terms of the GNU General Public License as published by
|
||||
. the Free Software Foundation; either version 2 of the License, or
|
||||
. (at your option) any later version.
|
||||
.
|
||||
. This program is distributed in the hope that it will be useful,
|
||||
. but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
. GNU General Public License for more details.
|
||||
.
|
||||
. You should have received a copy of the GNU General Public License
|
||||
. along with this program; if not, write to the Free Software
|
||||
. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
.
|
||||
. Information contained in this file was obtained from the LAN9118
|
||||
. manual from SMC. To get a copy, if you really want one, you can find
|
||||
. information under www.smsc.com.
|
||||
.
|
||||
. Authors
|
||||
. Dustin McIntire <dustin@sensoria.com>
|
||||
.
|
||||
---------------------------------------------------------------------------*/
|
||||
#ifndef _SMC911X_H_
|
||||
#define _SMC911X_H_
|
||||
|
||||
/*
|
||||
* Use the DMA feature on PXA chips
|
||||
*/
|
||||
#ifdef CONFIG_ARCH_PXA
|
||||
#define SMC_USE_PXA_DMA 1
|
||||
#define SMC_USE_16BIT 0
|
||||
#define SMC_USE_32BIT 1
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Define the bus width specific IO macros
|
||||
*/
|
||||
|
||||
#if SMC_USE_16BIT
|
||||
#define SMC_inb(a, r) readb((a) + (r))
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_inl(a, r) ((SMC_inw(a, r) & 0xFFFF)+(SMC_inw(a+2, r)<<16))
|
||||
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
||||
#define SMC_outw(v, a, r) writew(v, (a) + (r))
|
||||
#define SMC_outl(v, a, r) \
|
||||
do{ \
|
||||
writel(v & 0xFFFF, (a) + (r)); \
|
||||
writel(v >> 16, (a) + (r) + 2); \
|
||||
} while (0)
|
||||
#define SMC_insl(a, r, p, l) readsw((short*)((a) + (r)), p, l*2)
|
||||
#define SMC_outsl(a, r, p, l) writesw((short*)((a) + (r)), p, l*2)
|
||||
|
||||
#elif SMC_USE_32BIT
|
||||
#define SMC_inb(a, r) readb((a) + (r))
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_inl(a, r) readl((a) + (r))
|
||||
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
||||
#define SMC_outl(v, a, r) writel(v, (a) + (r))
|
||||
#define SMC_insl(a, r, p, l) readsl((int*)((a) + (r)), p, l)
|
||||
#define SMC_outsl(a, r, p, l) writesl((int*)((a) + (r)), p, l)
|
||||
|
||||
#endif /* SMC_USE_16BIT */
|
||||
|
||||
|
||||
|
||||
#if SMC_USE_PXA_DMA
|
||||
#define SMC_USE_DMA
|
||||
|
||||
/*
|
||||
* Define the request and free functions
|
||||
* These are unfortunately architecture specific as no generic allocation
|
||||
* mechanism exits
|
||||
*/
|
||||
#define SMC_DMA_REQUEST(dev, handler) \
|
||||
pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev)
|
||||
|
||||
#define SMC_DMA_FREE(dev, dma) \
|
||||
pxa_free_dma(dma)
|
||||
|
||||
#define SMC_DMA_ACK_IRQ(dev, dma) \
|
||||
{ \
|
||||
if (DCSR(dma) & DCSR_BUSERR) { \
|
||||
printk("%s: DMA %d bus error!\n", dev->name, dma); \
|
||||
} \
|
||||
DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \
|
||||
}
|
||||
|
||||
/*
|
||||
* Use a DMA for RX and TX packets.
|
||||
*/
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/arch/pxa-regs.h>
|
||||
|
||||
static dma_addr_t rx_dmabuf, tx_dmabuf;
|
||||
static int rx_dmalen, tx_dmalen;
|
||||
|
||||
#ifdef SMC_insl
|
||||
#undef SMC_insl
|
||||
#define SMC_insl(a, r, p, l) \
|
||||
smc_pxa_dma_insl(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
|
||||
|
||||
static inline void
|
||||
smc_pxa_dma_insl(struct device *dev, u_long ioaddr, u_long physaddr,
|
||||
int reg, int dma, u_char *buf, int len)
|
||||
{
|
||||
/* 64 bit alignment is required for memory to memory DMA */
|
||||
if ((long)buf & 4) {
|
||||
*((u32 *)buf) = SMC_inl(ioaddr, reg);
|
||||
buf += 4;
|
||||
len--;
|
||||
}
|
||||
|
||||
len *= 4;
|
||||
rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
|
||||
rx_dmalen = len;
|
||||
DCSR(dma) = DCSR_NODESC;
|
||||
DTADR(dma) = rx_dmabuf;
|
||||
DSADR(dma) = physaddr + reg;
|
||||
DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
|
||||
DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
|
||||
DCSR(dma) = DCSR_NODESC | DCSR_RUN;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef SMC_insw
|
||||
#undef SMC_insw
|
||||
#define SMC_insw(a, r, p, l) \
|
||||
smc_pxa_dma_insw(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
|
||||
|
||||
static inline void
|
||||
smc_pxa_dma_insw(struct device *dev, u_long ioaddr, u_long physaddr,
|
||||
int reg, int dma, u_char *buf, int len)
|
||||
{
|
||||
/* 64 bit alignment is required for memory to memory DMA */
|
||||
while ((long)buf & 6) {
|
||||
*((u16 *)buf) = SMC_inw(ioaddr, reg);
|
||||
buf += 2;
|
||||
len--;
|
||||
}
|
||||
|
||||
len *= 2;
|
||||
rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
|
||||
rx_dmalen = len;
|
||||
DCSR(dma) = DCSR_NODESC;
|
||||
DTADR(dma) = rx_dmabuf;
|
||||
DSADR(dma) = physaddr + reg;
|
||||
DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
|
||||
DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
|
||||
DCSR(dma) = DCSR_NODESC | DCSR_RUN;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef SMC_outsl
|
||||
#undef SMC_outsl
|
||||
#define SMC_outsl(a, r, p, l) \
|
||||
smc_pxa_dma_outsl(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
|
||||
|
||||
static inline void
|
||||
smc_pxa_dma_outsl(struct device *dev, u_long ioaddr, u_long physaddr,
|
||||
int reg, int dma, u_char *buf, int len)
|
||||
{
|
||||
/* 64 bit alignment is required for memory to memory DMA */
|
||||
if ((long)buf & 4) {
|
||||
SMC_outl(*((u32 *)buf), ioaddr, reg);
|
||||
buf += 4;
|
||||
len--;
|
||||
}
|
||||
|
||||
len *= 4;
|
||||
tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
|
||||
tx_dmalen = len;
|
||||
DCSR(dma) = DCSR_NODESC;
|
||||
DSADR(dma) = tx_dmabuf;
|
||||
DTADR(dma) = physaddr + reg;
|
||||
DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
|
||||
DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
|
||||
DCSR(dma) = DCSR_NODESC | DCSR_RUN;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef SMC_outsw
|
||||
#undef SMC_outsw
|
||||
#define SMC_outsw(a, r, p, l) \
|
||||
smc_pxa_dma_outsw(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
|
||||
|
||||
static inline void
|
||||
smc_pxa_dma_outsw(struct device *dev, u_long ioaddr, u_long physaddr,
|
||||
int reg, int dma, u_char *buf, int len)
|
||||
{
|
||||
/* 64 bit alignment is required for memory to memory DMA */
|
||||
while ((long)buf & 6) {
|
||||
SMC_outw(*((u16 *)buf), ioaddr, reg);
|
||||
buf += 2;
|
||||
len--;
|
||||
}
|
||||
|
||||
len *= 2;
|
||||
tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
|
||||
tx_dmalen = len;
|
||||
DCSR(dma) = DCSR_NODESC;
|
||||
DSADR(dma) = tx_dmabuf;
|
||||
DTADR(dma) = physaddr + reg;
|
||||
DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
|
||||
DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
|
||||
DCSR(dma) = DCSR_NODESC | DCSR_RUN;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* SMC_USE_PXA_DMA */
|
||||
|
||||
|
||||
/* Chip Parameters and Register Definitions */
|
||||
|
||||
#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2)
|
||||
|
||||
#define SMC911X_IO_EXTENT 0x100
|
||||
|
||||
#define SMC911X_EEPROM_LEN 7
|
||||
|
||||
/* Below are the register offsets and bit definitions
|
||||
* of the Lan911x memory space
|
||||
*/
|
||||
#define RX_DATA_FIFO (0x00)
|
||||
|
||||
#define TX_DATA_FIFO (0x20)
|
||||
#define TX_CMD_A_INT_ON_COMP_ (0x80000000)
|
||||
#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000)
|
||||
#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000)
|
||||
#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000)
|
||||
#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000)
|
||||
#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000)
|
||||
#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000)
|
||||
#define TX_CMD_A_INT_LAST_SEG_ (0x00001000)
|
||||
#define TX_CMD_A_BUF_SIZE_ (0x000007FF)
|
||||
#define TX_CMD_B_PKT_TAG_ (0xFFFF0000)
|
||||
#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000)
|
||||
#define TX_CMD_B_DISABLE_PADDING_ (0x00001000)
|
||||
#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF)
|
||||
|
||||
#define RX_STATUS_FIFO (0x40)
|
||||
#define RX_STS_PKT_LEN_ (0x3FFF0000)
|
||||
#define RX_STS_ES_ (0x00008000)
|
||||
#define RX_STS_BCST_ (0x00002000)
|
||||
#define RX_STS_LEN_ERR_ (0x00001000)
|
||||
#define RX_STS_RUNT_ERR_ (0x00000800)
|
||||
#define RX_STS_MCAST_ (0x00000400)
|
||||
#define RX_STS_TOO_LONG_ (0x00000080)
|
||||
#define RX_STS_COLL_ (0x00000040)
|
||||
#define RX_STS_ETH_TYPE_ (0x00000020)
|
||||
#define RX_STS_WDOG_TMT_ (0x00000010)
|
||||
#define RX_STS_MII_ERR_ (0x00000008)
|
||||
#define RX_STS_DRIBBLING_ (0x00000004)
|
||||
#define RX_STS_CRC_ERR_ (0x00000002)
|
||||
#define RX_STATUS_FIFO_PEEK (0x44)
|
||||
#define TX_STATUS_FIFO (0x48)
|
||||
#define TX_STS_TAG_ (0xFFFF0000)
|
||||
#define TX_STS_ES_ (0x00008000)
|
||||
#define TX_STS_LOC_ (0x00000800)
|
||||
#define TX_STS_NO_CARR_ (0x00000400)
|
||||
#define TX_STS_LATE_COLL_ (0x00000200)
|
||||
#define TX_STS_MANY_COLL_ (0x00000100)
|
||||
#define TX_STS_COLL_CNT_ (0x00000078)
|
||||
#define TX_STS_MANY_DEFER_ (0x00000004)
|
||||
#define TX_STS_UNDERRUN_ (0x00000002)
|
||||
#define TX_STS_DEFERRED_ (0x00000001)
|
||||
#define TX_STATUS_FIFO_PEEK (0x4C)
|
||||
#define ID_REV (0x50)
|
||||
#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */
|
||||
#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */
|
||||
|
||||
#define INT_CFG (0x54)
|
||||
#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */
|
||||
#define INT_CFG_INT_DEAS_CLR_ (0x00004000)
|
||||
#define INT_CFG_INT_DEAS_STS_ (0x00002000)
|
||||
#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */
|
||||
#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */
|
||||
#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */
|
||||
#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */
|
||||
|
||||
#define INT_STS (0x58)
|
||||
#define INT_STS_SW_INT_ (0x80000000) /* R/WC */
|
||||
#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */
|
||||
#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */
|
||||
#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */
|
||||
#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */
|
||||
#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */
|
||||
#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */
|
||||
#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */
|
||||
#define INT_STS_PHY_INT_ (0x00040000) /* RO */
|
||||
#define INT_STS_PME_INT_ (0x00020000) /* R/WC */
|
||||
#define INT_STS_TXSO_ (0x00010000) /* R/WC */
|
||||
#define INT_STS_RWT_ (0x00008000) /* R/WC */
|
||||
#define INT_STS_RXE_ (0x00004000) /* R/WC */
|
||||
#define INT_STS_TXE_ (0x00002000) /* R/WC */
|
||||
//#define INT_STS_ERX_ (0x00001000) /* R/WC */
|
||||
#define INT_STS_TDFU_ (0x00000800) /* R/WC */
|
||||
#define INT_STS_TDFO_ (0x00000400) /* R/WC */
|
||||
#define INT_STS_TDFA_ (0x00000200) /* R/WC */
|
||||
#define INT_STS_TSFF_ (0x00000100) /* R/WC */
|
||||
#define INT_STS_TSFL_ (0x00000080) /* R/WC */
|
||||
//#define INT_STS_RXDF_ (0x00000040) /* R/WC */
|
||||
#define INT_STS_RDFO_ (0x00000040) /* R/WC */
|
||||
#define INT_STS_RDFL_ (0x00000020) /* R/WC */
|
||||
#define INT_STS_RSFF_ (0x00000010) /* R/WC */
|
||||
#define INT_STS_RSFL_ (0x00000008) /* R/WC */
|
||||
#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */
|
||||
#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */
|
||||
#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */
|
||||
|
||||
#define INT_EN (0x5C)
|
||||
#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */
|
||||
#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */
|
||||
#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */
|
||||
#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */
|
||||
//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */
|
||||
#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */
|
||||
#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */
|
||||
#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */
|
||||
#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */
|
||||
#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */
|
||||
#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */
|
||||
#define INT_EN_RWT_EN_ (0x00008000) /* R/W */
|
||||
#define INT_EN_RXE_EN_ (0x00004000) /* R/W */
|
||||
#define INT_EN_TXE_EN_ (0x00002000) /* R/W */
|
||||
//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */
|
||||
#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */
|
||||
#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */
|
||||
#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */
|
||||
#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */
|
||||
#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */
|
||||
//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */
|
||||
#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */
|
||||
#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */
|
||||
#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */
|
||||
#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */
|
||||
#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */
|
||||
#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */
|
||||
#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */
|
||||
|
||||
#define BYTE_TEST (0x64)
|
||||
#define FIFO_INT (0x68)
|
||||
#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */
|
||||
#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */
|
||||
#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */
|
||||
#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */
|
||||
|
||||
#define RX_CFG (0x6C)
|
||||
#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */
|
||||
#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */
|
||||
#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */
|
||||
#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */
|
||||
#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */
|
||||
#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */
|
||||
#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */
|
||||
//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */
|
||||
|
||||
#define TX_CFG (0x70)
|
||||
//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */
|
||||
//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */
|
||||
#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */
|
||||
#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */
|
||||
#define TX_CFG_TXSAO_ (0x00000004) /* R/W */
|
||||
#define TX_CFG_TX_ON_ (0x00000002) /* R/W */
|
||||
#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */
|
||||
|
||||
#define HW_CFG (0x74)
|
||||
#define HW_CFG_TTM_ (0x00200000) /* R/W */
|
||||
#define HW_CFG_SF_ (0x00100000) /* R/W */
|
||||
#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */
|
||||
#define HW_CFG_TR_ (0x00003000) /* R/W */
|
||||
#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */
|
||||
#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */
|
||||
#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */
|
||||
#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */
|
||||
#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */
|
||||
#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */
|
||||
#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */
|
||||
#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */
|
||||
#define HW_CFG_SRST_TO_ (0x00000002) /* RO */
|
||||
#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */
|
||||
|
||||
#define RX_DP_CTRL (0x78)
|
||||
#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */
|
||||
#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */
|
||||
|
||||
#define RX_FIFO_INF (0x7C)
|
||||
#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */
|
||||
#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */
|
||||
|
||||
#define TX_FIFO_INF (0x80)
|
||||
#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */
|
||||
#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */
|
||||
|
||||
#define PMT_CTRL (0x84)
|
||||
#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */
|
||||
#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */
|
||||
#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */
|
||||
#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */
|
||||
#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */
|
||||
#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */
|
||||
#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */
|
||||
#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */
|
||||
#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */
|
||||
#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */
|
||||
#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */
|
||||
#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */
|
||||
#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */
|
||||
#define PMT_CTRL_READY_ (0x00000001) /* RO */
|
||||
|
||||
#define GPIO_CFG (0x88)
|
||||
#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */
|
||||
#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */
|
||||
#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */
|
||||
#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */
|
||||
#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */
|
||||
#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */
|
||||
#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */
|
||||
#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */
|
||||
#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */
|
||||
#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */
|
||||
#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */
|
||||
#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */
|
||||
#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */
|
||||
#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */
|
||||
#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */
|
||||
#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */
|
||||
#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */
|
||||
#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */
|
||||
|
||||
#define GPT_CFG (0x8C)
|
||||
#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */
|
||||
#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */
|
||||
|
||||
#define GPT_CNT (0x90)
|
||||
#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */
|
||||
|
||||
#define ENDIAN (0x98)
|
||||
#define FREE_RUN (0x9C)
|
||||
#define RX_DROP (0xA0)
|
||||
#define MAC_CSR_CMD (0xA4)
|
||||
#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */
|
||||
#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */
|
||||
#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */
|
||||
|
||||
#define MAC_CSR_DATA (0xA8)
|
||||
#define AFC_CFG (0xAC)
|
||||
#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */
|
||||
#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */
|
||||
#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */
|
||||
#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */
|
||||
#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */
|
||||
#define AFC_CFG_FCADD_ (0x00000002) /* R/W */
|
||||
#define AFC_CFG_FCANY_ (0x00000001) /* R/W */
|
||||
|
||||
#define E2P_CMD (0xB0)
|
||||
#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */
|
||||
#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */
|
||||
#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */
|
||||
#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */
|
||||
#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */
|
||||
#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */
|
||||
#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */
|
||||
#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */
|
||||
#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */
|
||||
#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */
|
||||
#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */
|
||||
#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */
|
||||
#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */
|
||||
|
||||
#define E2P_DATA (0xB4)
|
||||
#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */
|
||||
/* end of LAN register offsets and bit definitions */
|
||||
|
||||
/*
|
||||
****************************************************************************
|
||||
****************************************************************************
|
||||
* MAC Control and Status Register (Indirect Address)
|
||||
* Offset (through the MAC_CSR CMD and DATA port)
|
||||
****************************************************************************
|
||||
****************************************************************************
|
||||
*
|
||||
*/
|
||||
#define MAC_CR (0x01) /* R/W */
|
||||
|
||||
/* MAC_CR - MAC Control Register */
|
||||
#define MAC_CR_RXALL_ (0x80000000)
|
||||
// TODO: delete this bit? It is not described in the data sheet.
|
||||
#define MAC_CR_HBDIS_ (0x10000000)
|
||||
#define MAC_CR_RCVOWN_ (0x00800000)
|
||||
#define MAC_CR_LOOPBK_ (0x00200000)
|
||||
#define MAC_CR_FDPX_ (0x00100000)
|
||||
#define MAC_CR_MCPAS_ (0x00080000)
|
||||
#define MAC_CR_PRMS_ (0x00040000)
|
||||
#define MAC_CR_INVFILT_ (0x00020000)
|
||||
#define MAC_CR_PASSBAD_ (0x00010000)
|
||||
#define MAC_CR_HFILT_ (0x00008000)
|
||||
#define MAC_CR_HPFILT_ (0x00002000)
|
||||
#define MAC_CR_LCOLL_ (0x00001000)
|
||||
#define MAC_CR_BCAST_ (0x00000800)
|
||||
#define MAC_CR_DISRTY_ (0x00000400)
|
||||
#define MAC_CR_PADSTR_ (0x00000100)
|
||||
#define MAC_CR_BOLMT_MASK_ (0x000000C0)
|
||||
#define MAC_CR_DFCHK_ (0x00000020)
|
||||
#define MAC_CR_TXEN_ (0x00000008)
|
||||
#define MAC_CR_RXEN_ (0x00000004)
|
||||
|
||||
#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */
|
||||
#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */
|
||||
#define HASHH (0x04) /* R/W */
|
||||
#define HASHL (0x05) /* R/W */
|
||||
|
||||
#define MII_ACC (0x06) /* R/W */
|
||||
#define MII_ACC_PHY_ADDR_ (0x0000F800)
|
||||
#define MII_ACC_MIIRINDA_ (0x000007C0)
|
||||
#define MII_ACC_MII_WRITE_ (0x00000002)
|
||||
#define MII_ACC_MII_BUSY_ (0x00000001)
|
||||
|
||||
#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */
|
||||
|
||||
#define FLOW (0x08) /* R/W */
|
||||
#define FLOW_FCPT_ (0xFFFF0000)
|
||||
#define FLOW_FCPASS_ (0x00000004)
|
||||
#define FLOW_FCEN_ (0x00000002)
|
||||
#define FLOW_FCBSY_ (0x00000001)
|
||||
|
||||
#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */
|
||||
#define VLAN1_VTI1_ (0x0000ffff)
|
||||
|
||||
#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */
|
||||
#define VLAN2_VTI2_ (0x0000ffff)
|
||||
|
||||
#define WUFF (0x0B) /* WO */
|
||||
|
||||
#define WUCSR (0x0C) /* R/W */
|
||||
#define WUCSR_GUE_ (0x00000200)
|
||||
#define WUCSR_WUFR_ (0x00000040)
|
||||
#define WUCSR_MPR_ (0x00000020)
|
||||
#define WUCSR_WAKE_EN_ (0x00000004)
|
||||
#define WUCSR_MPEN_ (0x00000002)
|
||||
|
||||
/*
|
||||
****************************************************************************
|
||||
* Chip Specific MII Defines
|
||||
****************************************************************************
|
||||
*
|
||||
* Phy register offsets and bit definitions
|
||||
*
|
||||
*/
|
||||
|
||||
#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */
|
||||
//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000)
|
||||
#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000)
|
||||
//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800)
|
||||
//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400)
|
||||
//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200)
|
||||
//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100)
|
||||
//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010)
|
||||
//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008)
|
||||
//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004)
|
||||
#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002)
|
||||
|
||||
#define PHY_INT_SRC ((u32)29)
|
||||
#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080)
|
||||
#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040)
|
||||
#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020)
|
||||
#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010)
|
||||
#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008)
|
||||
#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004)
|
||||
#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002)
|
||||
|
||||
#define PHY_INT_MASK ((u32)30)
|
||||
#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080)
|
||||
#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040)
|
||||
#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020)
|
||||
#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010)
|
||||
#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008)
|
||||
#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004)
|
||||
#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002)
|
||||
|
||||
#define PHY_SPECIAL ((u32)31)
|
||||
#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000)
|
||||
#define PHY_SPECIAL_RES_ ((u16)0x0040)
|
||||
#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1)
|
||||
#define PHY_SPECIAL_SPD_ ((u16)0x001C)
|
||||
#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004)
|
||||
#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014)
|
||||
#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008)
|
||||
#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018)
|
||||
|
||||
#define LAN911X_INTERNAL_PHY_ID (0x0007C000)
|
||||
|
||||
/* Chip ID values */
|
||||
#define CHIP_9115 0x115
|
||||
#define CHIP_9116 0x116
|
||||
#define CHIP_9117 0x117
|
||||
#define CHIP_9118 0x118
|
||||
|
||||
struct chip_id {
|
||||
u16 id;
|
||||
char *name;
|
||||
};
|
||||
|
||||
static const struct chip_id chip_ids[] = {
|
||||
{ CHIP_9115, "LAN9115" },
|
||||
{ CHIP_9116, "LAN9116" },
|
||||
{ CHIP_9117, "LAN9117" },
|
||||
{ CHIP_9118, "LAN9118" },
|
||||
{ 0, NULL },
|
||||
};
|
||||
|
||||
#define IS_REV_A(x) ((x & 0xFFFF)==0)
|
||||
|
||||
/*
|
||||
* Macros to abstract register access according to the data bus
|
||||
* capabilities. Please use those and not the in/out primitives.
|
||||
*/
|
||||
/* FIFO read/write macros */
|
||||
#define SMC_PUSH_DATA(p, l) SMC_outsl( ioaddr, TX_DATA_FIFO, p, (l) >> 2 )
|
||||
#define SMC_PULL_DATA(p, l) SMC_insl ( ioaddr, RX_DATA_FIFO, p, (l) >> 2 )
|
||||
#define SMC_SET_TX_FIFO(x) SMC_outl( x, ioaddr, TX_DATA_FIFO )
|
||||
#define SMC_GET_RX_FIFO() SMC_inl( ioaddr, RX_DATA_FIFO )
|
||||
|
||||
|
||||
/* I/O mapped register read/write macros */
|
||||
#define SMC_GET_TX_STS_FIFO() SMC_inl( ioaddr, TX_STATUS_FIFO )
|
||||
#define SMC_GET_RX_STS_FIFO() SMC_inl( ioaddr, RX_STATUS_FIFO )
|
||||
#define SMC_GET_RX_STS_FIFO_PEEK() SMC_inl( ioaddr, RX_STATUS_FIFO_PEEK )
|
||||
#define SMC_GET_PN() (SMC_inl( ioaddr, ID_REV ) >> 16)
|
||||
#define SMC_GET_REV() (SMC_inl( ioaddr, ID_REV ) & 0xFFFF)
|
||||
#define SMC_GET_IRQ_CFG() SMC_inl( ioaddr, INT_CFG )
|
||||
#define SMC_SET_IRQ_CFG(x) SMC_outl( x, ioaddr, INT_CFG )
|
||||
#define SMC_GET_INT() SMC_inl( ioaddr, INT_STS )
|
||||
#define SMC_ACK_INT(x) SMC_outl( x, ioaddr, INT_STS )
|
||||
#define SMC_GET_INT_EN() SMC_inl( ioaddr, INT_EN )
|
||||
#define SMC_SET_INT_EN(x) SMC_outl( x, ioaddr, INT_EN )
|
||||
#define SMC_GET_BYTE_TEST() SMC_inl( ioaddr, BYTE_TEST )
|
||||
#define SMC_SET_BYTE_TEST(x) SMC_outl( x, ioaddr, BYTE_TEST )
|
||||
#define SMC_GET_FIFO_INT() SMC_inl( ioaddr, FIFO_INT )
|
||||
#define SMC_SET_FIFO_INT(x) SMC_outl( x, ioaddr, FIFO_INT )
|
||||
#define SMC_SET_FIFO_TDA(x) \
|
||||
do { \
|
||||
unsigned long __flags; \
|
||||
int __mask; \
|
||||
local_irq_save(__flags); \
|
||||
__mask = SMC_GET_FIFO_INT() & ~(0xFF<<24); \
|
||||
SMC_SET_FIFO_INT( __mask | (x)<<24 ); \
|
||||
local_irq_restore(__flags); \
|
||||
} while (0)
|
||||
#define SMC_SET_FIFO_TSL(x) \
|
||||
do { \
|
||||
unsigned long __flags; \
|
||||
int __mask; \
|
||||
local_irq_save(__flags); \
|
||||
__mask = SMC_GET_FIFO_INT() & ~(0xFF<<16); \
|
||||
SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<16)); \
|
||||
local_irq_restore(__flags); \
|
||||
} while (0)
|
||||
#define SMC_SET_FIFO_RSA(x) \
|
||||
do { \
|
||||
unsigned long __flags; \
|
||||
int __mask; \
|
||||
local_irq_save(__flags); \
|
||||
__mask = SMC_GET_FIFO_INT() & ~(0xFF<<8); \
|
||||
SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<8)); \
|
||||
local_irq_restore(__flags); \
|
||||
} while (0)
|
||||
#define SMC_SET_FIFO_RSL(x) \
|
||||
do { \
|
||||
unsigned long __flags; \
|
||||
int __mask; \
|
||||
local_irq_save(__flags); \
|
||||
__mask = SMC_GET_FIFO_INT() & ~0xFF; \
|
||||
SMC_SET_FIFO_INT( __mask | ((x) & 0xFF)); \
|
||||
local_irq_restore(__flags); \
|
||||
} while (0)
|
||||
#define SMC_GET_RX_CFG() SMC_inl( ioaddr, RX_CFG )
|
||||
#define SMC_SET_RX_CFG(x) SMC_outl( x, ioaddr, RX_CFG )
|
||||
#define SMC_GET_TX_CFG() SMC_inl( ioaddr, TX_CFG )
|
||||
#define SMC_SET_TX_CFG(x) SMC_outl( x, ioaddr, TX_CFG )
|
||||
#define SMC_GET_HW_CFG() SMC_inl( ioaddr, HW_CFG )
|
||||
#define SMC_SET_HW_CFG(x) SMC_outl( x, ioaddr, HW_CFG )
|
||||
#define SMC_GET_RX_DP_CTRL() SMC_inl( ioaddr, RX_DP_CTRL )
|
||||
#define SMC_SET_RX_DP_CTRL(x) SMC_outl( x, ioaddr, RX_DP_CTRL )
|
||||
#define SMC_GET_PMT_CTRL() SMC_inl( ioaddr, PMT_CTRL )
|
||||
#define SMC_SET_PMT_CTRL(x) SMC_outl( x, ioaddr, PMT_CTRL )
|
||||
#define SMC_GET_GPIO_CFG() SMC_inl( ioaddr, GPIO_CFG )
|
||||
#define SMC_SET_GPIO_CFG(x) SMC_outl( x, ioaddr, GPIO_CFG )
|
||||
#define SMC_GET_RX_FIFO_INF() SMC_inl( ioaddr, RX_FIFO_INF )
|
||||
#define SMC_SET_RX_FIFO_INF(x) SMC_outl( x, ioaddr, RX_FIFO_INF )
|
||||
#define SMC_GET_TX_FIFO_INF() SMC_inl( ioaddr, TX_FIFO_INF )
|
||||
#define SMC_SET_TX_FIFO_INF(x) SMC_outl( x, ioaddr, TX_FIFO_INF )
|
||||
#define SMC_GET_GPT_CFG() SMC_inl( ioaddr, GPT_CFG )
|
||||
#define SMC_SET_GPT_CFG(x) SMC_outl( x, ioaddr, GPT_CFG )
|
||||
#define SMC_GET_RX_DROP() SMC_inl( ioaddr, RX_DROP )
|
||||
#define SMC_SET_RX_DROP(x) SMC_outl( x, ioaddr, RX_DROP )
|
||||
#define SMC_GET_MAC_CMD() SMC_inl( ioaddr, MAC_CSR_CMD )
|
||||
#define SMC_SET_MAC_CMD(x) SMC_outl( x, ioaddr, MAC_CSR_CMD )
|
||||
#define SMC_GET_MAC_DATA() SMC_inl( ioaddr, MAC_CSR_DATA )
|
||||
#define SMC_SET_MAC_DATA(x) SMC_outl( x, ioaddr, MAC_CSR_DATA )
|
||||
#define SMC_GET_AFC_CFG() SMC_inl( ioaddr, AFC_CFG )
|
||||
#define SMC_SET_AFC_CFG(x) SMC_outl( x, ioaddr, AFC_CFG )
|
||||
#define SMC_GET_E2P_CMD() SMC_inl( ioaddr, E2P_CMD )
|
||||
#define SMC_SET_E2P_CMD(x) SMC_outl( x, ioaddr, E2P_CMD )
|
||||
#define SMC_GET_E2P_DATA() SMC_inl( ioaddr, E2P_DATA )
|
||||
#define SMC_SET_E2P_DATA(x) SMC_outl( x, ioaddr, E2P_DATA )
|
||||
|
||||
/* MAC register read/write macros */
|
||||
#define SMC_GET_MAC_CSR(a,v) \
|
||||
do { \
|
||||
while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
|
||||
SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | \
|
||||
MAC_CSR_CMD_R_NOT_W_ | (a) ); \
|
||||
while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
|
||||
v = SMC_GET_MAC_DATA(); \
|
||||
} while (0)
|
||||
#define SMC_SET_MAC_CSR(a,v) \
|
||||
do { \
|
||||
while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
|
||||
SMC_SET_MAC_DATA(v); \
|
||||
SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | (a) ); \
|
||||
while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
|
||||
} while (0)
|
||||
#define SMC_GET_MAC_CR(x) SMC_GET_MAC_CSR( MAC_CR, x )
|
||||
#define SMC_SET_MAC_CR(x) SMC_SET_MAC_CSR( MAC_CR, x )
|
||||
#define SMC_GET_ADDRH(x) SMC_GET_MAC_CSR( ADDRH, x )
|
||||
#define SMC_SET_ADDRH(x) SMC_SET_MAC_CSR( ADDRH, x )
|
||||
#define SMC_GET_ADDRL(x) SMC_GET_MAC_CSR( ADDRL, x )
|
||||
#define SMC_SET_ADDRL(x) SMC_SET_MAC_CSR( ADDRL, x )
|
||||
#define SMC_GET_HASHH(x) SMC_GET_MAC_CSR( HASHH, x )
|
||||
#define SMC_SET_HASHH(x) SMC_SET_MAC_CSR( HASHH, x )
|
||||
#define SMC_GET_HASHL(x) SMC_GET_MAC_CSR( HASHL, x )
|
||||
#define SMC_SET_HASHL(x) SMC_SET_MAC_CSR( HASHL, x )
|
||||
#define SMC_GET_MII_ACC(x) SMC_GET_MAC_CSR( MII_ACC, x )
|
||||
#define SMC_SET_MII_ACC(x) SMC_SET_MAC_CSR( MII_ACC, x )
|
||||
#define SMC_GET_MII_DATA(x) SMC_GET_MAC_CSR( MII_DATA, x )
|
||||
#define SMC_SET_MII_DATA(x) SMC_SET_MAC_CSR( MII_DATA, x )
|
||||
#define SMC_GET_FLOW(x) SMC_GET_MAC_CSR( FLOW, x )
|
||||
#define SMC_SET_FLOW(x) SMC_SET_MAC_CSR( FLOW, x )
|
||||
#define SMC_GET_VLAN1(x) SMC_GET_MAC_CSR( VLAN1, x )
|
||||
#define SMC_SET_VLAN1(x) SMC_SET_MAC_CSR( VLAN1, x )
|
||||
#define SMC_GET_VLAN2(x) SMC_GET_MAC_CSR( VLAN2, x )
|
||||
#define SMC_SET_VLAN2(x) SMC_SET_MAC_CSR( VLAN2, x )
|
||||
#define SMC_SET_WUFF(x) SMC_SET_MAC_CSR( WUFF, x )
|
||||
#define SMC_GET_WUCSR(x) SMC_GET_MAC_CSR( WUCSR, x )
|
||||
#define SMC_SET_WUCSR(x) SMC_SET_MAC_CSR( WUCSR, x )
|
||||
|
||||
/* PHY register read/write macros */
|
||||
#define SMC_GET_MII(a,phy,v) \
|
||||
do { \
|
||||
u32 __v; \
|
||||
do { \
|
||||
SMC_GET_MII_ACC(__v); \
|
||||
} while ( __v & MII_ACC_MII_BUSY_ ); \
|
||||
SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \
|
||||
MII_ACC_MII_BUSY_); \
|
||||
do { \
|
||||
SMC_GET_MII_ACC(__v); \
|
||||
} while ( __v & MII_ACC_MII_BUSY_ ); \
|
||||
SMC_GET_MII_DATA(v); \
|
||||
} while (0)
|
||||
#define SMC_SET_MII(a,phy,v) \
|
||||
do { \
|
||||
u32 __v; \
|
||||
do { \
|
||||
SMC_GET_MII_ACC(__v); \
|
||||
} while ( __v & MII_ACC_MII_BUSY_ ); \
|
||||
SMC_SET_MII_DATA(v); \
|
||||
SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \
|
||||
MII_ACC_MII_BUSY_ | \
|
||||
MII_ACC_MII_WRITE_ ); \
|
||||
do { \
|
||||
SMC_GET_MII_ACC(__v); \
|
||||
} while ( __v & MII_ACC_MII_BUSY_ ); \
|
||||
} while (0)
|
||||
#define SMC_GET_PHY_BMCR(phy,x) SMC_GET_MII( MII_BMCR, phy, x )
|
||||
#define SMC_SET_PHY_BMCR(phy,x) SMC_SET_MII( MII_BMCR, phy, x )
|
||||
#define SMC_GET_PHY_BMSR(phy,x) SMC_GET_MII( MII_BMSR, phy, x )
|
||||
#define SMC_GET_PHY_ID1(phy,x) SMC_GET_MII( MII_PHYSID1, phy, x )
|
||||
#define SMC_GET_PHY_ID2(phy,x) SMC_GET_MII( MII_PHYSID2, phy, x )
|
||||
#define SMC_GET_PHY_MII_ADV(phy,x) SMC_GET_MII( MII_ADVERTISE, phy, x )
|
||||
#define SMC_SET_PHY_MII_ADV(phy,x) SMC_SET_MII( MII_ADVERTISE, phy, x )
|
||||
#define SMC_GET_PHY_MII_LPA(phy,x) SMC_GET_MII( MII_LPA, phy, x )
|
||||
#define SMC_SET_PHY_MII_LPA(phy,x) SMC_SET_MII( MII_LPA, phy, x )
|
||||
#define SMC_GET_PHY_CTRL_STS(phy,x) SMC_GET_MII( PHY_MODE_CTRL_STS, phy, x )
|
||||
#define SMC_SET_PHY_CTRL_STS(phy,x) SMC_SET_MII( PHY_MODE_CTRL_STS, phy, x )
|
||||
#define SMC_GET_PHY_INT_SRC(phy,x) SMC_GET_MII( PHY_INT_SRC, phy, x )
|
||||
#define SMC_SET_PHY_INT_SRC(phy,x) SMC_SET_MII( PHY_INT_SRC, phy, x )
|
||||
#define SMC_GET_PHY_INT_MASK(phy,x) SMC_GET_MII( PHY_INT_MASK, phy, x )
|
||||
#define SMC_SET_PHY_INT_MASK(phy,x) SMC_SET_MII( PHY_INT_MASK, phy, x )
|
||||
#define SMC_GET_PHY_SPECIAL(phy,x) SMC_GET_MII( PHY_SPECIAL, phy, x )
|
||||
|
||||
|
||||
|
||||
/* Misc read/write macros */
|
||||
|
||||
#ifndef SMC_GET_MAC_ADDR
|
||||
#define SMC_GET_MAC_ADDR(addr) \
|
||||
do { \
|
||||
unsigned int __v; \
|
||||
\
|
||||
SMC_GET_MAC_CSR(ADDRL, __v); \
|
||||
addr[0] = __v; addr[1] = __v >> 8; \
|
||||
addr[2] = __v >> 16; addr[3] = __v >> 24; \
|
||||
SMC_GET_MAC_CSR(ADDRH, __v); \
|
||||
addr[4] = __v; addr[5] = __v >> 8; \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define SMC_SET_MAC_ADDR(addr) \
|
||||
do { \
|
||||
SMC_SET_MAC_CSR(ADDRL, \
|
||||
addr[0] | \
|
||||
(addr[1] << 8) | \
|
||||
(addr[2] << 16) | \
|
||||
(addr[3] << 24)); \
|
||||
SMC_SET_MAC_CSR(ADDRH, addr[4]|(addr[5] << 8));\
|
||||
} while (0)
|
||||
|
||||
|
||||
#define SMC_WRITE_EEPROM_CMD(cmd, addr) \
|
||||
do { \
|
||||
while (SMC_GET_E2P_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
|
||||
SMC_SET_MAC_CMD(MAC_CSR_CMD_R_NOT_W_ | a ); \
|
||||
while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
|
||||
} while (0)
|
||||
|
||||
#endif /* _SMC911X_H_ */
|
@ -732,12 +732,9 @@ static int ifport;
|
||||
struct net_device * __init smc_init(int unit)
|
||||
{
|
||||
struct net_device *dev = alloc_etherdev(sizeof(struct smc_local));
|
||||
static struct devlist *smcdev = smc_devlist;
|
||||
struct devlist *smcdev = smc_devlist;
|
||||
int err = 0;
|
||||
|
||||
#ifndef NO_AUTOPROBE
|
||||
smcdev = smc_devlist;
|
||||
#endif
|
||||
if (!dev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
@ -1607,7 +1604,7 @@ MODULE_PARM_DESC(io, "SMC 99194 I/O base address");
|
||||
MODULE_PARM_DESC(irq, "SMC 99194 IRQ number");
|
||||
MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)");
|
||||
|
||||
int init_module(void)
|
||||
int __init init_module(void)
|
||||
{
|
||||
if (io == 0)
|
||||
printk(KERN_WARNING
|
||||
|
@ -129,6 +129,24 @@
|
||||
#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
|
||||
#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
|
||||
|
||||
#elif defined(CONFIG_MACH_LOGICPD_PXA270)
|
||||
|
||||
#define SMC_CAN_USE_8BIT 0
|
||||
#define SMC_CAN_USE_16BIT 1
|
||||
#define SMC_CAN_USE_32BIT 0
|
||||
#define SMC_IO_SHIFT 0
|
||||
#define SMC_NOWAIT 1
|
||||
#define SMC_USE_PXA_DMA 1
|
||||
|
||||
#define SMC_inb(a, r) readb((a) + (r))
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_inl(a, r) readl((a) + (r))
|
||||
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
||||
#define SMC_outw(v, a, r) writew(v, (a) + (r))
|
||||
#define SMC_outl(v, a, r) writel(v, (a) + (r))
|
||||
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
||||
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
||||
|
||||
#elif defined(CONFIG_ARCH_INNOKOM) || \
|
||||
defined(CONFIG_MACH_MAINSTONE) || \
|
||||
defined(CONFIG_ARCH_PXA_IDP) || \
|
||||
|
@ -345,9 +345,9 @@ static int bcm5421_enable_fiber(struct mii_phy* phy)
|
||||
|
||||
static int bcm5461_enable_fiber(struct mii_phy* phy)
|
||||
{
|
||||
phy_write(phy, MII_NCONFIG, 0xfc0c);
|
||||
phy_write(phy, MII_BMCR, 0x4140);
|
||||
phy_write(phy, MII_NCONFIG, 0xfc0b);
|
||||
phy_write(phy, MII_NCONFIG, 0xfc0c);
|
||||
phy_write(phy, MII_BMCR, 0x4140);
|
||||
phy_write(phy, MII_NCONFIG, 0xfc0b);
|
||||
phy_write(phy, MII_BMCR, 0x0140);
|
||||
|
||||
return 0;
|
||||
|
@ -227,12 +227,12 @@ enum {
|
||||
SROMC0InfoLeaf = 27,
|
||||
MediaBlockMask = 0x3f,
|
||||
MediaCustomCSRs = (1 << 6),
|
||||
|
||||
|
||||
/* PCIPM bits */
|
||||
PM_Sleep = (1 << 31),
|
||||
PM_Snooze = (1 << 30),
|
||||
PM_Mask = PM_Sleep | PM_Snooze,
|
||||
|
||||
|
||||
/* SIAStatus bits */
|
||||
NWayState = (1 << 14) | (1 << 13) | (1 << 12),
|
||||
NWayRestart = (1 << 12),
|
||||
@ -858,7 +858,7 @@ static void de_stop_rxtx (struct de_private *de)
|
||||
return;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
|
||||
printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
|
||||
}
|
||||
|
||||
@ -931,7 +931,7 @@ static void de_set_media (struct de_private *de)
|
||||
macmode |= FullDuplex;
|
||||
else
|
||||
macmode &= ~FullDuplex;
|
||||
|
||||
|
||||
if (netif_msg_link(de)) {
|
||||
printk(KERN_INFO "%s: set link %s\n"
|
||||
KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
|
||||
@ -966,9 +966,9 @@ static void de21040_media_timer (unsigned long data)
|
||||
u32 status = dr32(SIAStatus);
|
||||
unsigned int carrier;
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
carrier = (status & NetCxnErr) ? 0 : 1;
|
||||
|
||||
|
||||
if (carrier) {
|
||||
if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
|
||||
goto no_link_yet;
|
||||
@ -985,7 +985,7 @@ static void de21040_media_timer (unsigned long data)
|
||||
return;
|
||||
}
|
||||
|
||||
de_link_down(de);
|
||||
de_link_down(de);
|
||||
|
||||
if (de->media_lock)
|
||||
return;
|
||||
@ -1039,7 +1039,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1050,9 +1050,9 @@ static void de21041_media_timer (unsigned long data)
|
||||
u32 status = dr32(SIAStatus);
|
||||
unsigned int carrier;
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
carrier = (status & NetCxnErr) ? 0 : 1;
|
||||
|
||||
|
||||
if (carrier) {
|
||||
if ((de->media_type == DE_MEDIA_TP_AUTO ||
|
||||
de->media_type == DE_MEDIA_TP ||
|
||||
@ -1072,7 +1072,7 @@ static void de21041_media_timer (unsigned long data)
|
||||
return;
|
||||
}
|
||||
|
||||
de_link_down(de);
|
||||
de_link_down(de);
|
||||
|
||||
/* if media type locked, don't switch media */
|
||||
if (de->media_lock)
|
||||
@ -1124,7 +1124,7 @@ static void de21041_media_timer (unsigned long data)
|
||||
u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
|
||||
de_next_media(de, next_states, ARRAY_SIZE(next_states));
|
||||
}
|
||||
|
||||
|
||||
set_media:
|
||||
spin_lock_irqsave(&de->lock, flags);
|
||||
de_stop_rxtx(de);
|
||||
@ -1148,7 +1148,7 @@ static void de_media_interrupt (struct de_private *de, u32 status)
|
||||
mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
BUG_ON(!(status & LinkFail));
|
||||
|
||||
if (netif_carrier_ok(de->dev)) {
|
||||
@ -1227,7 +1227,7 @@ static int de_init_hw (struct de_private *de)
|
||||
int rc;
|
||||
|
||||
de_adapter_wake(de);
|
||||
|
||||
|
||||
macmode = dr32(MacMode) & ~MacModeClear;
|
||||
|
||||
rc = de_reset_mac(de);
|
||||
@ -1413,7 +1413,7 @@ static int de_close (struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
netif_carrier_off(dev);
|
||||
spin_unlock_irqrestore(&de->lock, flags);
|
||||
|
||||
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
de_free_rings(de);
|
||||
@ -1441,7 +1441,7 @@ static void de_tx_timeout (struct net_device *dev)
|
||||
|
||||
spin_unlock_irq(&de->lock);
|
||||
enable_irq(dev->irq);
|
||||
|
||||
|
||||
/* Update the error counts. */
|
||||
__de_get_stats(de);
|
||||
|
||||
@ -1451,7 +1451,7 @@ static void de_tx_timeout (struct net_device *dev)
|
||||
de_init_rings(de);
|
||||
|
||||
de_init_hw(de);
|
||||
|
||||
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
@ -1459,7 +1459,7 @@ static void __de_get_regs(struct de_private *de, u8 *buf)
|
||||
{
|
||||
int i;
|
||||
u32 *rbuf = (u32 *)buf;
|
||||
|
||||
|
||||
/* read all CSRs */
|
||||
for (i = 0; i < DE_NUM_REGS; i++)
|
||||
rbuf[i] = dr32(i * 8);
|
||||
@ -1474,7 +1474,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
|
||||
ecmd->transceiver = XCVR_INTERNAL;
|
||||
ecmd->phy_address = 0;
|
||||
ecmd->advertising = de->media_advertise;
|
||||
|
||||
|
||||
switch (de->media_type) {
|
||||
case DE_MEDIA_AUI:
|
||||
ecmd->port = PORT_AUI;
|
||||
@ -1489,7 +1489,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
|
||||
ecmd->speed = SPEED_10;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (dr32(MacMode) & FullDuplex)
|
||||
ecmd->duplex = DUPLEX_FULL;
|
||||
else
|
||||
@ -1529,7 +1529,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
|
||||
if (ecmd->autoneg == AUTONEG_ENABLE &&
|
||||
(!(ecmd->advertising & ADVERTISED_Autoneg)))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
switch (ecmd->port) {
|
||||
case PORT_AUI:
|
||||
new_media = DE_MEDIA_AUI;
|
||||
@ -1554,22 +1554,22 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
|
||||
|
||||
|
||||
if ((new_media == de->media_type) &&
|
||||
(media_lock == de->media_lock) &&
|
||||
(ecmd->advertising == de->media_advertise))
|
||||
return 0; /* nothing to change */
|
||||
|
||||
|
||||
de_link_down(de);
|
||||
de_stop_rxtx(de);
|
||||
|
||||
|
||||
de->media_type = new_media;
|
||||
de->media_lock = media_lock;
|
||||
de->media_advertise = ecmd->advertising;
|
||||
de_set_media(de);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1817,7 +1817,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
|
||||
case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
|
||||
default: de->media_type = DE_MEDIA_TP_AUTO; break;
|
||||
}
|
||||
|
||||
|
||||
if (netif_msg_probe(de))
|
||||
printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
|
||||
de->board_idx, ofs,
|
||||
@ -1886,7 +1886,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
|
||||
de->media[idx].csr13,
|
||||
de->media[idx].csr14,
|
||||
de->media[idx].csr15);
|
||||
|
||||
|
||||
} else if (netif_msg_probe(de))
|
||||
printk("\n");
|
||||
|
||||
@ -2118,7 +2118,7 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
|
||||
|
||||
spin_unlock_irq(&de->lock);
|
||||
enable_irq(dev->irq);
|
||||
|
||||
|
||||
/* Update the error counts. */
|
||||
__de_get_stats(de);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -38,11 +38,11 @@
|
||||
/*
|
||||
** EISA Register Address Map
|
||||
*/
|
||||
#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
|
||||
#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
|
||||
#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
|
||||
#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
|
||||
#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
|
||||
#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
|
||||
#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
|
||||
#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
|
||||
#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
|
||||
#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
|
||||
#define EISA_CR iobase+0x0c84 /* EISA Control Register */
|
||||
#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
|
||||
#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
|
||||
@ -1008,8 +1008,8 @@ struct de4x5_ioctl {
|
||||
unsigned char __user *data; /* Pointer to the data buffer */
|
||||
};
|
||||
|
||||
/*
|
||||
** Recognised commands for the driver
|
||||
/*
|
||||
** Recognised commands for the driver
|
||||
*/
|
||||
#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
|
||||
#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
|
||||
|
@ -50,7 +50,7 @@
|
||||
forget to unmap PCI mapped skbs.
|
||||
|
||||
Alan Cox <alan@redhat.com>
|
||||
Added new PCI identifiers provided by Clear Zhang at ALi
|
||||
Added new PCI identifiers provided by Clear Zhang at ALi
|
||||
for their 1563 ethernet device.
|
||||
|
||||
TODO
|
||||
|
@ -96,11 +96,11 @@ static const char *block_name[] __devinitdata = {
|
||||
* tulip_build_fake_mediatable - Build a fake mediatable entry.
|
||||
* @tp: Ptr to the tulip private data.
|
||||
*
|
||||
* Some cards like the 3x5 HSC cards (J3514A) do not have a standard
|
||||
* Some cards like the 3x5 HSC cards (J3514A) do not have a standard
|
||||
* srom and can not be handled under the fixup routine. These cards
|
||||
* still need a valid mediatable entry for correct csr12 setup and
|
||||
* still need a valid mediatable entry for correct csr12 setup and
|
||||
* mii handling.
|
||||
*
|
||||
*
|
||||
* Since this is currently a parisc-linux specific function, the
|
||||
* #ifdef __hppa__ should completely optimize this function away for
|
||||
* non-parisc hardware.
|
||||
@ -140,7 +140,7 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
|
||||
tp->flags |= HAS_PHY_IRQ;
|
||||
tp->csr12_shadow = -1;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void __devinit tulip_parse_eeprom(struct net_device *dev)
|
||||
|
@ -139,22 +139,22 @@ int tulip_poll(struct net_device *dev, int *budget)
|
||||
}
|
||||
/* Acknowledge current RX interrupt sources. */
|
||||
iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
|
||||
|
||||
|
||||
|
||||
|
||||
/* If we own the next entry, it is a new packet. Send it up. */
|
||||
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
|
||||
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
|
||||
|
||||
|
||||
|
||||
|
||||
if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
|
||||
break;
|
||||
|
||||
|
||||
if (tulip_debug > 5)
|
||||
printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
|
||||
dev->name, entry, status);
|
||||
if (--rx_work_limit < 0)
|
||||
goto not_done;
|
||||
|
||||
|
||||
if ((status & 0x38008300) != 0x0300) {
|
||||
if ((status & 0x38000300) != 0x0300) {
|
||||
/* Ingore earlier buffers. */
|
||||
@ -180,7 +180,7 @@ int tulip_poll(struct net_device *dev, int *budget)
|
||||
/* Omit the four octet CRC from the length. */
|
||||
short pkt_len = ((status >> 16) & 0x7ff) - 4;
|
||||
struct sk_buff *skb;
|
||||
|
||||
|
||||
#ifndef final_version
|
||||
if (pkt_len > 1518) {
|
||||
printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
|
||||
@ -213,7 +213,7 @@ int tulip_poll(struct net_device *dev, int *budget)
|
||||
} else { /* Pass up the skb already on the Rx ring. */
|
||||
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
|
||||
pkt_len);
|
||||
|
||||
|
||||
#ifndef final_version
|
||||
if (tp->rx_buffers[entry].mapping !=
|
||||
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
|
||||
@ -225,17 +225,17 @@ int tulip_poll(struct net_device *dev, int *budget)
|
||||
skb->head, temp);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
|
||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
|
||||
|
||||
tp->rx_buffers[entry].skb = NULL;
|
||||
tp->rx_buffers[entry].mapping = 0;
|
||||
}
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
|
||||
dev->last_rx = jiffies;
|
||||
tp->stats.rx_packets++;
|
||||
tp->stats.rx_bytes += pkt_len;
|
||||
@ -245,12 +245,12 @@ int tulip_poll(struct net_device *dev, int *budget)
|
||||
entry = (++tp->cur_rx) % RX_RING_SIZE;
|
||||
if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
|
||||
tulip_refill_rx(dev);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* New ack strategy... irq does not ack Rx any longer
|
||||
hopefully this helps */
|
||||
|
||||
|
||||
/* Really bad things can happen here... If new packet arrives
|
||||
* and an irq arrives (tx or just due to occasionally unset
|
||||
* mask), it will be acked by irq handler, but new thread
|
||||
@ -259,28 +259,28 @@ int tulip_poll(struct net_device *dev, int *budget)
|
||||
* tomorrow (night 011029). If it will not fail, we won
|
||||
* finally: amount of IO did not increase at all. */
|
||||
} while ((ioread32(tp->base_addr + CSR5) & RxIntr));
|
||||
|
||||
|
||||
done:
|
||||
|
||||
|
||||
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
|
||||
|
||||
|
||||
/* We use this simplistic scheme for IM. It's proven by
|
||||
real life installations. We can have IM enabled
|
||||
continuesly but this would cause unnecessary latency.
|
||||
Unfortunely we can't use all the NET_RX_* feedback here.
|
||||
This would turn on IM for devices that is not contributing
|
||||
to backlog congestion with unnecessary latency.
|
||||
|
||||
continuesly but this would cause unnecessary latency.
|
||||
Unfortunely we can't use all the NET_RX_* feedback here.
|
||||
This would turn on IM for devices that is not contributing
|
||||
to backlog congestion with unnecessary latency.
|
||||
|
||||
We monitor the the device RX-ring and have:
|
||||
|
||||
|
||||
HW Interrupt Mitigation either ON or OFF.
|
||||
|
||||
ON: More then 1 pkt received (per intr.) OR we are dropping
|
||||
|
||||
ON: More then 1 pkt received (per intr.) OR we are dropping
|
||||
OFF: Only 1 pkt received
|
||||
|
||||
|
||||
Note. We only use min and max (0, 15) settings from mit_table */
|
||||
|
||||
|
||||
|
||||
|
||||
if( tp->flags & HAS_INTR_MITIGATION) {
|
||||
if( received > 1 ) {
|
||||
if( ! tp->mit_on ) {
|
||||
@ -297,20 +297,20 @@ done:
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
|
||||
|
||||
|
||||
dev->quota -= received;
|
||||
*budget -= received;
|
||||
|
||||
|
||||
tulip_refill_rx(dev);
|
||||
|
||||
|
||||
/* If RX ring is not full we are out of memory. */
|
||||
if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
|
||||
|
||||
|
||||
/* Remove us from polling list and enable RX intr. */
|
||||
|
||||
|
||||
netif_rx_complete(dev);
|
||||
iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
|
||||
|
||||
|
||||
/* The last op happens after poll completion. Which means the following:
|
||||
* 1. it can race with disabling irqs in irq handler
|
||||
* 2. it can race with dise/enabling irqs in other poll threads
|
||||
@ -321,9 +321,9 @@ done:
|
||||
* due to races in masking and due to too late acking of already
|
||||
* processed irqs. But it must not result in losing events.
|
||||
*/
|
||||
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
not_done:
|
||||
if (!received) {
|
||||
|
||||
@ -331,29 +331,29 @@ done:
|
||||
}
|
||||
dev->quota -= received;
|
||||
*budget -= received;
|
||||
|
||||
|
||||
if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
|
||||
tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
|
||||
tulip_refill_rx(dev);
|
||||
|
||||
|
||||
if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
|
||||
|
||||
|
||||
return 1;
|
||||
|
||||
|
||||
|
||||
|
||||
oom: /* Executed with RX ints disabled */
|
||||
|
||||
|
||||
|
||||
|
||||
/* Start timer, stop polling, but do not enable rx interrupts. */
|
||||
mod_timer(&tp->oom_timer, jiffies+1);
|
||||
|
||||
|
||||
/* Think: timer_pending() was an explicit signature of bug.
|
||||
* Timer can be pending now but fired and completed
|
||||
* before we did netif_rx_complete(). See? We would lose it. */
|
||||
|
||||
|
||||
/* remove ourselves from the polling list */
|
||||
netif_rx_complete(dev);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -521,9 +521,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
|
||||
/* Let's see whether the interrupt really is for us */
|
||||
csr5 = ioread32(ioaddr + CSR5);
|
||||
|
||||
if (tp->flags & HAS_PHY_IRQ)
|
||||
if (tp->flags & HAS_PHY_IRQ)
|
||||
handled = phy_interrupt (dev);
|
||||
|
||||
|
||||
if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
|
||||
return IRQ_RETVAL(handled);
|
||||
|
||||
@ -538,17 +538,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
|
||||
/* Mask RX intrs and add the device to poll list. */
|
||||
iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
|
||||
netif_rx_schedule(dev);
|
||||
|
||||
|
||||
if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
/* Acknowledge the interrupt sources we handle here ASAP
|
||||
the poll function does Rx and RxNoBuf acking */
|
||||
|
||||
|
||||
iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
|
||||
|
||||
#else
|
||||
#else
|
||||
/* Acknowledge all of the current interrupt sources ASAP. */
|
||||
iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
|
||||
|
||||
@ -559,11 +559,11 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TULIP_NAPI */
|
||||
|
||||
|
||||
if (tulip_debug > 4)
|
||||
printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
|
||||
dev->name, csr5, ioread32(ioaddr + CSR5));
|
||||
|
||||
|
||||
|
||||
if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
|
||||
unsigned int dirty_tx;
|
||||
@ -737,17 +737,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
|
||||
#ifdef CONFIG_TULIP_NAPI
|
||||
if (rxd)
|
||||
csr5 &= ~RxPollInt;
|
||||
} while ((csr5 & (TxNoBuf |
|
||||
TxDied |
|
||||
TxIntr |
|
||||
} while ((csr5 & (TxNoBuf |
|
||||
TxDied |
|
||||
TxIntr |
|
||||
TimerInt |
|
||||
/* Abnormal intr. */
|
||||
RxDied |
|
||||
TxFIFOUnderflow |
|
||||
TxJabber |
|
||||
TPLnkFail |
|
||||
RxDied |
|
||||
TxFIFOUnderflow |
|
||||
TxJabber |
|
||||
TPLnkFail |
|
||||
SytemError )) != 0);
|
||||
#else
|
||||
#else
|
||||
} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
|
||||
|
||||
tulip_refill_rx(dev);
|
||||
|
@ -140,7 +140,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
|
||||
spin_unlock_irqrestore(&tp->mii_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* Establish sync by sending 32 logic ones. */
|
||||
for (i = 32; i >= 0; i--) {
|
||||
iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
|
||||
|
@ -259,7 +259,7 @@ enum t21143_csr6_bits {
|
||||
There are no ill effects from too-large receive rings. */
|
||||
|
||||
#define TX_RING_SIZE 32
|
||||
#define RX_RING_SIZE 128
|
||||
#define RX_RING_SIZE 128
|
||||
#define MEDIA_MASK 31
|
||||
|
||||
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
|
||||
|
@ -1224,7 +1224,7 @@ out:
|
||||
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
|
||||
* is the DM910X and the on chip ULi devices
|
||||
*/
|
||||
|
||||
|
||||
static int tulip_uli_dm_quirk(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
|
||||
@ -1297,7 +1297,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
|
||||
*/
|
||||
|
||||
/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
|
||||
aligned. Aries might need this too. The Saturn errata are not
|
||||
aligned. Aries might need this too. The Saturn errata are not
|
||||
pretty reading but thankfully it's an old 486 chipset.
|
||||
|
||||
2. The dreaded SiS496 486 chipset. Same workaround as Intel
|
||||
@ -1500,7 +1500,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_MIPS_COBALT
|
||||
if ((pdev->bus->number == 0) &&
|
||||
if ((pdev->bus->number == 0) &&
|
||||
((PCI_SLOT(pdev->devfn) == 7) ||
|
||||
(PCI_SLOT(pdev->devfn) == 12))) {
|
||||
/* Cobalt MAC address in first EEPROM locations. */
|
||||
|
@ -9,7 +9,7 @@
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
|
||||
|
||||
*/
|
||||
|
||||
#define DRV_NAME "uli526x"
|
||||
@ -185,7 +185,7 @@ struct uli526x_board_info {
|
||||
|
||||
/* NIC SROM data */
|
||||
unsigned char srom[128];
|
||||
u8 init;
|
||||
u8 init;
|
||||
};
|
||||
|
||||
enum uli526x_offsets {
|
||||
@ -258,7 +258,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
|
||||
struct uli526x_board_info *db; /* board information structure */
|
||||
struct net_device *dev;
|
||||
int i, err;
|
||||
|
||||
|
||||
ULI526X_DBUG(0, "uli526x_init_one()", 0);
|
||||
|
||||
if (!printed_version++)
|
||||
@ -316,7 +316,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
|
||||
err = -ENOMEM;
|
||||
goto err_out_nomem;
|
||||
}
|
||||
|
||||
|
||||
db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
|
||||
db->first_tx_desc_dma = db->desc_pool_dma_ptr;
|
||||
db->buf_pool_start = db->buf_pool_ptr;
|
||||
@ -324,14 +324,14 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
|
||||
|
||||
db->chip_id = ent->driver_data;
|
||||
db->ioaddr = pci_resource_start(pdev, 0);
|
||||
|
||||
|
||||
db->pdev = pdev;
|
||||
db->init = 1;
|
||||
|
||||
|
||||
dev->base_addr = db->ioaddr;
|
||||
dev->irq = pdev->irq;
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
|
||||
/* Register some necessary functions */
|
||||
dev->open = &uli526x_open;
|
||||
dev->hard_start_xmit = &uli526x_start_xmit;
|
||||
@ -341,7 +341,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
|
||||
dev->ethtool_ops = &netdev_ethtool_ops;
|
||||
spin_lock_init(&db->lock);
|
||||
|
||||
|
||||
|
||||
/* read 64 word srom data */
|
||||
for (i = 0; i < 64; i++)
|
||||
((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
|
||||
@ -374,7 +374,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
|
||||
goto err_out_res;
|
||||
|
||||
printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
|
||||
|
||||
|
||||
for (i = 0; i < 6; i++)
|
||||
printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
|
||||
printk(", irq %d.\n", dev->irq);
|
||||
@ -389,7 +389,7 @@ err_out_nomem:
|
||||
if(db->desc_pool_ptr)
|
||||
pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
|
||||
db->desc_pool_ptr, db->desc_pool_dma_ptr);
|
||||
|
||||
|
||||
if(db->buf_pool_ptr != NULL)
|
||||
pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
|
||||
db->buf_pool_ptr, db->buf_pool_dma_ptr);
|
||||
@ -433,7 +433,7 @@ static int uli526x_open(struct net_device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct uli526x_board_info *db = netdev_priv(dev);
|
||||
|
||||
|
||||
ULI526X_DBUG(0, "uli526x_open", 0);
|
||||
|
||||
ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
|
||||
@ -454,7 +454,7 @@ static int uli526x_open(struct net_device *dev)
|
||||
/* CR6 operation mode decision */
|
||||
db->cr6_data |= ULI526X_TXTH_256;
|
||||
db->cr0_data = CR0_DEFAULT;
|
||||
|
||||
|
||||
/* Initialize ULI526X board */
|
||||
uli526x_init(dev);
|
||||
|
||||
@ -604,7 +604,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
/* Restore CR7 to enable interrupt */
|
||||
spin_unlock_irqrestore(&db->lock, flags);
|
||||
outl(db->cr7_data, dev->base_addr + DCR7);
|
||||
|
||||
|
||||
/* free this SKB */
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
@ -782,7 +782,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
|
||||
struct sk_buff *skb;
|
||||
int rxlen;
|
||||
u32 rdes0;
|
||||
|
||||
|
||||
rxptr = db->rx_ready_ptr;
|
||||
|
||||
while(db->rx_avail_cnt) {
|
||||
@ -821,7 +821,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
|
||||
if ( !(rdes0 & 0x8000) ||
|
||||
((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
|
||||
skb = rxptr->rx_skb_ptr;
|
||||
|
||||
|
||||
/* Good packet, send to upper layer */
|
||||
/* Shorst packet used new SKB */
|
||||
if ( (rxlen < RX_COPY_SIZE) &&
|
||||
@ -841,7 +841,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
|
||||
dev->last_rx = jiffies;
|
||||
db->stats.rx_packets++;
|
||||
db->stats.rx_bytes += rxlen;
|
||||
|
||||
|
||||
} else {
|
||||
/* Reuse SKB buffer when the packet is error */
|
||||
ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
|
||||
@ -911,7 +911,7 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
|
||||
SUPPORTED_100baseT_Full |
|
||||
SUPPORTED_Autoneg |
|
||||
SUPPORTED_MII);
|
||||
|
||||
|
||||
ecmd->advertising = (ADVERTISED_10baseT_Half |
|
||||
ADVERTISED_10baseT_Full |
|
||||
ADVERTISED_100baseT_Half |
|
||||
@ -924,13 +924,13 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
|
||||
ecmd->phy_address = db->phy_addr;
|
||||
|
||||
ecmd->transceiver = XCVR_EXTERNAL;
|
||||
|
||||
|
||||
ecmd->speed = 10;
|
||||
ecmd->duplex = DUPLEX_HALF;
|
||||
|
||||
|
||||
if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
|
||||
{
|
||||
ecmd->speed = 100;
|
||||
ecmd->speed = 100;
|
||||
}
|
||||
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
|
||||
{
|
||||
@ -939,11 +939,11 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
|
||||
if(db->link_failed)
|
||||
{
|
||||
ecmd->speed = -1;
|
||||
ecmd->duplex = -1;
|
||||
ecmd->duplex = -1;
|
||||
}
|
||||
|
||||
|
||||
if (db->media_mode & ULI526X_AUTO)
|
||||
{
|
||||
{
|
||||
ecmd->autoneg = AUTONEG_ENABLE;
|
||||
}
|
||||
}
|
||||
@ -964,15 +964,15 @@ static void netdev_get_drvinfo(struct net_device *dev,
|
||||
|
||||
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
|
||||
struct uli526x_board_info *np = netdev_priv(dev);
|
||||
|
||||
|
||||
ULi_ethtool_gset(np, cmd);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 netdev_get_link(struct net_device *dev) {
|
||||
struct uli526x_board_info *np = netdev_priv(dev);
|
||||
|
||||
|
||||
if(np->link_failed)
|
||||
return 0;
|
||||
else
|
||||
@ -1005,11 +1005,11 @@ static void uli526x_timer(unsigned long data)
|
||||
struct uli526x_board_info *db = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
u8 TmpSpeed=10;
|
||||
|
||||
|
||||
//ULI526X_DBUG(0, "uli526x_timer()", 0);
|
||||
spin_lock_irqsave(&db->lock, flags);
|
||||
|
||||
|
||||
|
||||
/* Dynamic reset ULI526X : system error or transmit time-out */
|
||||
tmp_cr8 = inl(db->ioaddr + DCR8);
|
||||
if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
|
||||
@ -1021,9 +1021,9 @@ static void uli526x_timer(unsigned long data)
|
||||
/* TX polling kick monitor */
|
||||
if ( db->tx_packet_cnt &&
|
||||
time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
|
||||
outl(0x1, dev->base_addr + DCR1); // Tx polling again
|
||||
outl(0x1, dev->base_addr + DCR1); // Tx polling again
|
||||
|
||||
// TX Timeout
|
||||
// TX Timeout
|
||||
if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
|
||||
db->reset_TXtimeout++;
|
||||
db->wait_reset = 1;
|
||||
@ -1073,7 +1073,7 @@ static void uli526x_timer(unsigned long data)
|
||||
uli526x_sense_speed(db) )
|
||||
db->link_failed = 1;
|
||||
uli526x_process_mode(db);
|
||||
|
||||
|
||||
if(db->link_failed==0)
|
||||
{
|
||||
if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
|
||||
@ -1404,7 +1404,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
|
||||
phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
|
||||
|
||||
if ( (phy_mode & 0x24) == 0x24 ) {
|
||||
|
||||
|
||||
phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
|
||||
if(phy_mode&0x8000)
|
||||
phy_mode = 0x8000;
|
||||
@ -1414,7 +1414,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
|
||||
phy_mode = 0x2000;
|
||||
else
|
||||
phy_mode = 0x1000;
|
||||
|
||||
|
||||
/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
|
||||
switch (phy_mode) {
|
||||
case 0x1000: db->op_mode = ULI526X_10MHF; break;
|
||||
@ -1442,7 +1442,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
|
||||
static void uli526x_set_phyxcer(struct uli526x_board_info *db)
|
||||
{
|
||||
u16 phy_reg;
|
||||
|
||||
|
||||
/* Phyxcer capability setting */
|
||||
phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
|
||||
|
||||
@ -1457,7 +1457,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
|
||||
case ULI526X_100MHF: phy_reg |= 0x80; break;
|
||||
case ULI526X_100MFD: phy_reg |= 0x100; break;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/* Write new capability to Phyxcer Reg4 */
|
||||
@ -1556,7 +1556,7 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data
|
||||
/* Write a word data to PHY controller */
|
||||
for ( i = 0x8000; i > 0; i >>= 1)
|
||||
phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -1574,7 +1574,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
|
||||
return phy_readby_cr10(iobase, phy_addr, offset);
|
||||
/* M5261/M5263 Chip */
|
||||
ioaddr = iobase + DCR9;
|
||||
|
||||
|
||||
/* Send 33 synchronization clock to Phy controller */
|
||||
for (i = 0; i < 35; i++)
|
||||
phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
|
||||
@ -1610,7 +1610,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
|
||||
static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
|
||||
{
|
||||
unsigned long ioaddr,cr10_value;
|
||||
|
||||
|
||||
ioaddr = iobase + DCR10;
|
||||
cr10_value = phy_addr;
|
||||
cr10_value = (cr10_value<<5) + offset;
|
||||
@ -1629,7 +1629,7 @@ static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
|
||||
static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
|
||||
{
|
||||
unsigned long ioaddr,cr10_value;
|
||||
|
||||
|
||||
ioaddr = iobase + DCR10;
|
||||
cr10_value = phy_addr;
|
||||
cr10_value = (cr10_value<<5) + offset;
|
||||
@ -1659,7 +1659,7 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
|
||||
static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
|
||||
{
|
||||
u16 phy_data;
|
||||
|
||||
|
||||
outl(0x50000 , ioaddr);
|
||||
udelay(1);
|
||||
phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
|
||||
|
@ -38,12 +38,12 @@
|
||||
Copyright (C) 2001 Manfred Spraul
|
||||
* ethtool support (jgarzik)
|
||||
* Replace some MII-related magic numbers with constants (jgarzik)
|
||||
|
||||
|
||||
TODO:
|
||||
* enable pci_power_off
|
||||
* Wake-On-LAN
|
||||
*/
|
||||
|
||||
|
||||
#define DRV_NAME "winbond-840"
|
||||
#define DRV_VERSION "1.01-d"
|
||||
#define DRV_RELDATE "Nov-17-2001"
|
||||
@ -57,7 +57,7 @@ c-help-name: Winbond W89c840 PCI Ethernet support
|
||||
c-help-symbol: CONFIG_WINBOND_840
|
||||
c-help: This driver is for the Winbond W89c840 chip. It also works with
|
||||
c-help: the TX9882 chip on the Compex RL100-ATX board.
|
||||
c-help: More specific information and updates are available from
|
||||
c-help: More specific information and updates are available from
|
||||
c-help: http://www.scyld.com/network/drivers.html
|
||||
*/
|
||||
|
||||
@ -207,7 +207,7 @@ Test with 'ping -s 10000' on a fast computer.
|
||||
|
||||
*/
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
PCI probe table.
|
||||
@ -374,7 +374,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
||||
static struct ethtool_ops netdev_ethtool_ops;
|
||||
static int netdev_close(struct net_device *dev);
|
||||
|
||||
|
||||
|
||||
|
||||
static int __devinit w840_probe1 (struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
@ -434,7 +434,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
|
||||
np->mii_if.mdio_read = mdio_read;
|
||||
np->mii_if.mdio_write = mdio_write;
|
||||
np->base_addr = ioaddr;
|
||||
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
if (dev->mem_start)
|
||||
@ -510,7 +510,7 @@ err_out_netdev:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
|
||||
often serial bit streams generated by the host processor.
|
||||
The example below is for the common 93c46 EEPROM, 64 16 bit words. */
|
||||
@ -660,7 +660,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int netdev_open(struct net_device *dev)
|
||||
{
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
@ -731,7 +731,7 @@ static int update_link(struct net_device *dev)
|
||||
dev->name, np->phys[0]);
|
||||
netif_carrier_on(dev);
|
||||
}
|
||||
|
||||
|
||||
if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
|
||||
/* If the link partner doesn't support autonegotiation
|
||||
* the MII detects it's abilities with the "parallel detection".
|
||||
@ -761,7 +761,7 @@ static int update_link(struct net_device *dev)
|
||||
result |= 0x20000000;
|
||||
if (result != np->csr6 && debug)
|
||||
printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
|
||||
dev->name, fasteth ? 100 : 10,
|
||||
dev->name, fasteth ? 100 : 10,
|
||||
duplex ? "full" : "half", np->phys[0]);
|
||||
return result;
|
||||
}
|
||||
@ -947,7 +947,7 @@ static void init_registers(struct net_device *dev)
|
||||
iowrite32(i, ioaddr + PCIBusCfg);
|
||||
|
||||
np->csr6 = 0;
|
||||
/* 128 byte Tx threshold;
|
||||
/* 128 byte Tx threshold;
|
||||
Transmit on; Receive on; */
|
||||
update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
|
||||
|
||||
@ -1584,7 +1584,7 @@ static int netdev_close(struct net_device *dev)
|
||||
static void __devexit w840_remove1 (struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
|
||||
if (dev) {
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
unregister_netdev(dev);
|
||||
@ -1640,7 +1640,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
|
||||
|
||||
spin_unlock_wait(&dev->xmit_lock);
|
||||
synchronize_irq(dev->irq);
|
||||
|
||||
|
||||
np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
|
||||
|
||||
/* no more hardware accesses behind this line. */
|
||||
|
@ -1,11 +1,11 @@
|
||||
/*
|
||||
* xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
|
||||
* xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
|
||||
*
|
||||
* This software is (C) by the respective authors, and licensed under the GPL
|
||||
* License.
|
||||
*
|
||||
* Written by Arjan van de Ven for Red Hat, Inc.
|
||||
* Based on work by Jeff Garzik, Doug Ledford and Donald Becker
|
||||
* Based on work by Jeff Garzik, Doug Ledford and Donald Becker
|
||||
*
|
||||
* This software may be used and distributed according to the terms
|
||||
* of the GNU General Public License, incorporated herein by reference.
|
||||
@ -93,7 +93,7 @@ struct xircom_private {
|
||||
|
||||
unsigned long io_port;
|
||||
int open;
|
||||
|
||||
|
||||
/* transmit_used is the rotating counter that indicates which transmit
|
||||
descriptor has to be used next */
|
||||
int transmit_used;
|
||||
@ -153,10 +153,10 @@ static struct pci_device_id xircom_pci_table[] = {
|
||||
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
|
||||
|
||||
static struct pci_driver xircom_ops = {
|
||||
.name = "xircom_cb",
|
||||
.id_table = xircom_pci_table,
|
||||
.probe = xircom_probe,
|
||||
.remove = xircom_remove,
|
||||
.name = "xircom_cb",
|
||||
.id_table = xircom_pci_table,
|
||||
.probe = xircom_probe,
|
||||
.remove = xircom_remove,
|
||||
.suspend =NULL,
|
||||
.resume =NULL
|
||||
};
|
||||
@ -174,7 +174,7 @@ static void print_binary(unsigned int number)
|
||||
buffer[i2++]='1';
|
||||
else
|
||||
buffer[i2++]='0';
|
||||
if ((i&3)==0)
|
||||
if ((i&3)==0)
|
||||
buffer[i2++]=' ';
|
||||
}
|
||||
printk("%s\n",buffer);
|
||||
@ -196,10 +196,10 @@ static struct ethtool_ops netdev_ethtool_ops = {
|
||||
|
||||
/* xircom_probe is the code that gets called on device insertion.
|
||||
it sets up the hardware and registers the device to the networklayer.
|
||||
|
||||
|
||||
TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
|
||||
first two packets that get send, and pump hates that.
|
||||
|
||||
|
||||
*/
|
||||
static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
@ -209,7 +209,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
|
||||
unsigned long flags;
|
||||
unsigned short tmp16;
|
||||
enter("xircom_probe");
|
||||
|
||||
|
||||
/* First do the PCI initialisation */
|
||||
|
||||
if (pci_enable_device(pdev))
|
||||
@ -217,24 +217,24 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
|
||||
|
||||
/* disable all powermanagement */
|
||||
pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
|
||||
|
||||
|
||||
pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
|
||||
|
||||
/* clear PCI status, if any */
|
||||
pci_read_config_word (pdev,PCI_STATUS, &tmp16);
|
||||
/* clear PCI status, if any */
|
||||
pci_read_config_word (pdev,PCI_STATUS, &tmp16);
|
||||
pci_write_config_word (pdev, PCI_STATUS,tmp16);
|
||||
|
||||
|
||||
pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
|
||||
|
||||
|
||||
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
|
||||
printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
Before changing the hardware, allocate the memory.
|
||||
This way, we can fail gracefully if not enough memory
|
||||
is available.
|
||||
is available.
|
||||
*/
|
||||
dev = alloc_etherdev(sizeof(struct xircom_private));
|
||||
if (!dev) {
|
||||
@ -242,13 +242,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
|
||||
goto device_fail;
|
||||
}
|
||||
private = netdev_priv(dev);
|
||||
|
||||
|
||||
/* Allocate the send/receive buffers */
|
||||
private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
|
||||
if (private->rx_buffer == NULL) {
|
||||
printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
|
||||
goto rx_buf_fail;
|
||||
}
|
||||
}
|
||||
private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
|
||||
if (private->tx_buffer == NULL) {
|
||||
printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
|
||||
@ -265,11 +265,11 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
|
||||
spin_lock_init(&private->lock);
|
||||
dev->irq = pdev->irq;
|
||||
dev->base_addr = private->io_port;
|
||||
|
||||
|
||||
initialize_card(private);
|
||||
read_mac_address(private);
|
||||
setup_descriptors(private);
|
||||
|
||||
|
||||
dev->open = &xircom_open;
|
||||
dev->hard_start_xmit = &xircom_start_xmit;
|
||||
dev->stop = &xircom_close;
|
||||
@ -285,19 +285,19 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
|
||||
printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
|
||||
goto reg_fail;
|
||||
}
|
||||
|
||||
|
||||
printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq);
|
||||
/* start the transmitter to get a heartbeat */
|
||||
/* TODO: send 2 dummy packets here */
|
||||
transceiver_voodoo(private);
|
||||
|
||||
|
||||
spin_lock_irqsave(&private->lock,flags);
|
||||
activate_transmitter(private);
|
||||
activate_receiver(private);
|
||||
spin_unlock_irqrestore(&private->lock,flags);
|
||||
|
||||
|
||||
trigger_receive(private);
|
||||
|
||||
|
||||
leave("xircom_probe");
|
||||
return 0;
|
||||
|
||||
@ -332,7 +332,7 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
|
||||
free_netdev(dev);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
leave("xircom_remove");
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
|
||||
{
|
||||
@ -346,11 +346,11 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs
|
||||
spin_lock(&card->lock);
|
||||
status = inl(card->io_port+CSR5);
|
||||
|
||||
#ifdef DEBUG
|
||||
#ifdef DEBUG
|
||||
print_binary(status);
|
||||
printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
|
||||
printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
|
||||
#endif
|
||||
#endif
|
||||
/* Handle shared irq and hotplug */
|
||||
if (status == 0 || status == 0xffffffff) {
|
||||
spin_unlock(&card->lock);
|
||||
@ -366,21 +366,21 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs
|
||||
netif_carrier_on(dev);
|
||||
else
|
||||
netif_carrier_off(dev);
|
||||
|
||||
|
||||
}
|
||||
|
||||
/* Clear all remaining interrupts */
|
||||
/* Clear all remaining interrupts */
|
||||
status |= 0xffffffff; /* FIXME: make this clear only the
|
||||
real existing bits */
|
||||
outl(status,card->io_port+CSR5);
|
||||
|
||||
|
||||
for (i=0;i<NUMDESCRIPTORS;i++)
|
||||
|
||||
for (i=0;i<NUMDESCRIPTORS;i++)
|
||||
investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
|
||||
for (i=0;i<NUMDESCRIPTORS;i++)
|
||||
for (i=0;i<NUMDESCRIPTORS;i++)
|
||||
investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
|
||||
|
||||
|
||||
|
||||
spin_unlock(&card->lock);
|
||||
leave("xircom_interrupt");
|
||||
return IRQ_HANDLED;
|
||||
@ -393,38 +393,38 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
int nextdescriptor;
|
||||
int desc;
|
||||
enter("xircom_start_xmit");
|
||||
|
||||
|
||||
card = netdev_priv(dev);
|
||||
spin_lock_irqsave(&card->lock,flags);
|
||||
|
||||
|
||||
/* First see if we can free some descriptors */
|
||||
for (desc=0;desc<NUMDESCRIPTORS;desc++)
|
||||
for (desc=0;desc<NUMDESCRIPTORS;desc++)
|
||||
investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
|
||||
|
||||
|
||||
|
||||
|
||||
nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
|
||||
desc = card->transmit_used;
|
||||
|
||||
|
||||
/* only send the packet if the descriptor is free */
|
||||
if (card->tx_buffer[4*desc]==0) {
|
||||
/* Copy the packet data; zero the memory first as the card
|
||||
sometimes sends more than you ask it to. */
|
||||
|
||||
|
||||
memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
|
||||
memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len);
|
||||
|
||||
|
||||
|
||||
|
||||
/* FIXME: The specification tells us that the length we send HAS to be a multiple of
|
||||
4 bytes. */
|
||||
|
||||
|
||||
card->tx_buffer[4*desc+1] = skb->len;
|
||||
if (desc == NUMDESCRIPTORS-1)
|
||||
card->tx_buffer[4*desc+1] |= (1<<25); /* bit 25: last descriptor of the ring */
|
||||
|
||||
card->tx_buffer[4*desc+1] |= 0xF0000000;
|
||||
/* 0xF0... means want interrupts*/
|
||||
/* 0xF0... means want interrupts*/
|
||||
card->tx_skb[desc] = skb;
|
||||
|
||||
|
||||
wmb();
|
||||
/* This gives the descriptor to the card */
|
||||
card->tx_buffer[4*desc] = 0x80000000;
|
||||
@ -433,18 +433,18 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
}
|
||||
card->transmit_used = nextdescriptor;
|
||||
leave("xircom-start_xmit - sent");
|
||||
leave("xircom-start_xmit - sent");
|
||||
spin_unlock_irqrestore(&card->lock,flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* Uh oh... no free descriptor... drop the packet */
|
||||
netif_stop_queue(dev);
|
||||
spin_unlock_irqrestore(&card->lock,flags);
|
||||
trigger_transmit(card);
|
||||
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
|
||||
leave("xircom_open - No IRQ");
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
xircom_up(xp);
|
||||
xp->open = 1;
|
||||
leave("xircom_open");
|
||||
@ -473,31 +473,31 @@ static int xircom_close(struct net_device *dev)
|
||||
{
|
||||
struct xircom_private *card;
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
enter("xircom_close");
|
||||
card = netdev_priv(dev);
|
||||
netif_stop_queue(dev); /* we don't want new packets */
|
||||
|
||||
|
||||
|
||||
spin_lock_irqsave(&card->lock,flags);
|
||||
|
||||
|
||||
disable_all_interrupts(card);
|
||||
#if 0
|
||||
#if 0
|
||||
/* We can enable this again once we send dummy packets on ifconfig ethX up */
|
||||
deactivate_receiver(card);
|
||||
deactivate_transmitter(card);
|
||||
#endif
|
||||
#endif
|
||||
remove_descriptors(card);
|
||||
|
||||
|
||||
spin_unlock_irqrestore(&card->lock,flags);
|
||||
|
||||
|
||||
card->open = 0;
|
||||
free_irq(dev->irq,dev);
|
||||
|
||||
|
||||
leave("xircom_close");
|
||||
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -506,8 +506,8 @@ static struct net_device_stats *xircom_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct xircom_private *card = netdev_priv(dev);
|
||||
return &card->stats;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void xircom_poll_controller(struct net_device *dev)
|
||||
@ -540,7 +540,7 @@ static void initialize_card(struct xircom_private *card)
|
||||
outl(val, card->io_port + CSR0);
|
||||
|
||||
|
||||
val = 0; /* Value 0x00 is a safe and conservative value
|
||||
val = 0; /* Value 0x00 is a safe and conservative value
|
||||
for the PCI configuration settings */
|
||||
outl(val, card->io_port + CSR0);
|
||||
|
||||
@ -617,23 +617,23 @@ static void setup_descriptors(struct xircom_private *card)
|
||||
|
||||
/* Rx Descr2: address of the buffer
|
||||
we store the buffer at the 2nd half of the page */
|
||||
|
||||
|
||||
address = (unsigned long) card->rx_dma_handle;
|
||||
card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
|
||||
/* Rx Desc3: address of 2nd buffer -> 0 */
|
||||
card->rx_buffer[i*4 + 3] = 0;
|
||||
}
|
||||
|
||||
|
||||
wmb();
|
||||
/* Write the receive descriptor ring address to the card */
|
||||
address = (unsigned long) card->rx_dma_handle;
|
||||
val = cpu_to_le32(address);
|
||||
val = cpu_to_le32(address);
|
||||
outl(val, card->io_port + CSR3); /* Receive descr list address */
|
||||
|
||||
|
||||
/* transmit descriptors */
|
||||
memset(card->tx_buffer, 0, 128); /* clear the descriptors */
|
||||
|
||||
|
||||
for (i=0;i<NUMDESCRIPTORS;i++ ) {
|
||||
/* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
|
||||
card->tx_buffer[i*4 + 0] = 0x00000000;
|
||||
@ -641,7 +641,7 @@ static void setup_descriptors(struct xircom_private *card)
|
||||
card->tx_buffer[i*4 + 1] = 1536;
|
||||
if (i==NUMDESCRIPTORS-1)
|
||||
card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */
|
||||
|
||||
|
||||
/* Tx Descr2: address of the buffer
|
||||
we store the buffer at the 2nd half of the page */
|
||||
address = (unsigned long) card->tx_dma_handle;
|
||||
@ -748,7 +748,7 @@ static int receive_active(struct xircom_private *card)
|
||||
activate_receiver enables the receiver on the card.
|
||||
Before being allowed to active the receiver, the receiver
|
||||
must be completely de-activated. To achieve this,
|
||||
this code actually disables the receiver first; then it waits for the
|
||||
this code actually disables the receiver first; then it waits for the
|
||||
receiver to become inactive, then it activates the receiver and then
|
||||
it waits for the receiver to be active.
|
||||
|
||||
@ -762,13 +762,13 @@ static void activate_receiver(struct xircom_private *card)
|
||||
|
||||
|
||||
val = inl(card->io_port + CSR6); /* Operation mode */
|
||||
|
||||
|
||||
/* If the "active" bit is set and the receiver is already
|
||||
active, no need to do the expensive thing */
|
||||
if ((val&2) && (receive_active(card)))
|
||||
return;
|
||||
|
||||
|
||||
|
||||
|
||||
val = val & ~2; /* disable the receiver */
|
||||
outl(val, card->io_port + CSR6);
|
||||
|
||||
@ -805,7 +805,7 @@ static void activate_receiver(struct xircom_private *card)
|
||||
|
||||
/*
|
||||
deactivate_receiver disables the receiver on the card.
|
||||
To achieve this this code disables the receiver first;
|
||||
To achieve this this code disables the receiver first;
|
||||
then it waits for the receiver to become inactive.
|
||||
|
||||
must be called with the lock held and interrupts disabled.
|
||||
@ -840,7 +840,7 @@ static void deactivate_receiver(struct xircom_private *card)
|
||||
activate_transmitter enables the transmitter on the card.
|
||||
Before being allowed to active the transmitter, the transmitter
|
||||
must be completely de-activated. To achieve this,
|
||||
this code actually disables the transmitter first; then it waits for the
|
||||
this code actually disables the transmitter first; then it waits for the
|
||||
transmitter to become inactive, then it activates the transmitter and then
|
||||
it waits for the transmitter to be active again.
|
||||
|
||||
@ -856,7 +856,7 @@ static void activate_transmitter(struct xircom_private *card)
|
||||
val = inl(card->io_port + CSR6); /* Operation mode */
|
||||
|
||||
/* If the "active" bit is set and the receiver is already
|
||||
active, no need to do the expensive thing */
|
||||
active, no need to do the expensive thing */
|
||||
if ((val&(1<<13)) && (transmit_active(card)))
|
||||
return;
|
||||
|
||||
@ -896,7 +896,7 @@ static void activate_transmitter(struct xircom_private *card)
|
||||
|
||||
/*
|
||||
deactivate_transmitter disables the transmitter on the card.
|
||||
To achieve this this code disables the transmitter first;
|
||||
To achieve this this code disables the transmitter first;
|
||||
then it waits for the transmitter to become inactive.
|
||||
|
||||
must be called with the lock held and interrupts disabled.
|
||||
@ -990,7 +990,7 @@ static void disable_all_interrupts(struct xircom_private *card)
|
||||
{
|
||||
unsigned int val;
|
||||
enter("enable_all_interrupts");
|
||||
|
||||
|
||||
val = 0; /* disable all interrupts */
|
||||
outl(val, card->io_port + CSR7);
|
||||
|
||||
@ -1031,8 +1031,8 @@ static int enable_promisc(struct xircom_private *card)
|
||||
unsigned int val;
|
||||
enter("enable_promisc");
|
||||
|
||||
val = inl(card->io_port + CSR6);
|
||||
val = val | (1 << 6);
|
||||
val = inl(card->io_port + CSR6);
|
||||
val = val | (1 << 6);
|
||||
outl(val, card->io_port + CSR6);
|
||||
|
||||
leave("enable_promisc");
|
||||
@ -1042,7 +1042,7 @@ static int enable_promisc(struct xircom_private *card)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
/*
|
||||
link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
|
||||
|
||||
Must be called in locked state with interrupts disabled
|
||||
@ -1051,15 +1051,15 @@ static int link_status(struct xircom_private *card)
|
||||
{
|
||||
unsigned int val;
|
||||
enter("link_status");
|
||||
|
||||
|
||||
val = inb(card->io_port + CSR12);
|
||||
|
||||
|
||||
if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
|
||||
return 10;
|
||||
if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
|
||||
return 100;
|
||||
|
||||
/* If we get here -> no link at all */
|
||||
|
||||
/* If we get here -> no link at all */
|
||||
|
||||
leave("link_status");
|
||||
return 0;
|
||||
@ -1071,7 +1071,7 @@ static int link_status(struct xircom_private *card)
|
||||
|
||||
/*
|
||||
read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
|
||||
|
||||
|
||||
This function will take the spinlock itself and can, as a result, not be called with the lock helt.
|
||||
*/
|
||||
static void read_mac_address(struct xircom_private *card)
|
||||
@ -1081,7 +1081,7 @@ static void read_mac_address(struct xircom_private *card)
|
||||
int i;
|
||||
|
||||
enter("read_mac_address");
|
||||
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
|
||||
@ -1095,7 +1095,7 @@ static void read_mac_address(struct xircom_private *card)
|
||||
outl(i + 3, card->io_port + CSR10);
|
||||
data_count = inl(card->io_port + CSR9) & 0xff;
|
||||
if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
|
||||
/*
|
||||
/*
|
||||
* This is it. We have the data we want.
|
||||
*/
|
||||
for (j = 0; j < 6; j++) {
|
||||
@ -1136,12 +1136,12 @@ static void transceiver_voodoo(struct xircom_private *card)
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
outl(0x0008, card->io_port + CSR15);
|
||||
udelay(25);
|
||||
udelay(25);
|
||||
outl(0xa8050000, card->io_port + CSR15);
|
||||
udelay(25);
|
||||
outl(0xa00f0000, card->io_port + CSR15);
|
||||
udelay(25);
|
||||
|
||||
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
netif_start_queue(card->dev);
|
||||
@ -1163,15 +1163,15 @@ static void xircom_up(struct xircom_private *card)
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
|
||||
|
||||
enable_link_interrupt(card);
|
||||
enable_transmit_interrupt(card);
|
||||
enable_receive_interrupt(card);
|
||||
enable_common_interrupts(card);
|
||||
enable_promisc(card);
|
||||
|
||||
|
||||
/* The card can have received packets already, read them away now */
|
||||
for (i=0;i<NUMDESCRIPTORS;i++)
|
||||
for (i=0;i<NUMDESCRIPTORS;i++)
|
||||
investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
|
||||
|
||||
|
||||
@ -1185,15 +1185,15 @@ static void xircom_up(struct xircom_private *card)
|
||||
/* Bufferoffset is in BYTES */
|
||||
static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset)
|
||||
{
|
||||
int status;
|
||||
|
||||
int status;
|
||||
|
||||
enter("investigate_read_descriptor");
|
||||
status = card->rx_buffer[4*descnr];
|
||||
|
||||
|
||||
if ((status > 0)) { /* packet received */
|
||||
|
||||
|
||||
/* TODO: discard error packets */
|
||||
|
||||
|
||||
short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -1216,7 +1216,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
|
||||
dev->last_rx = jiffies;
|
||||
card->stats.rx_packets++;
|
||||
card->stats.rx_bytes += pkt_len;
|
||||
|
||||
|
||||
out:
|
||||
/* give the buffer back to the card */
|
||||
card->rx_buffer[4*descnr] = 0x80000000;
|
||||
@ -1234,9 +1234,9 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
|
||||
int status;
|
||||
|
||||
enter("investigate_write_descriptor");
|
||||
|
||||
|
||||
status = card->tx_buffer[4*descnr];
|
||||
#if 0
|
||||
#if 0
|
||||
if (status & 0x8000) { /* Major error */
|
||||
printk(KERN_ERR "Major transmit error status %x \n", status);
|
||||
card->tx_buffer[4*descnr] = 0;
|
||||
@ -1258,7 +1258,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
|
||||
}
|
||||
|
||||
leave("investigate_write_descriptor");
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -1271,8 +1271,8 @@ static int __init xircom_init(void)
|
||||
static void __exit xircom_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&xircom_ops);
|
||||
}
|
||||
}
|
||||
|
||||
module_init(xircom_init)
|
||||
module_init(xircom_init)
|
||||
module_exit(xircom_exit)
|
||||
|
||||
|
@ -307,7 +307,7 @@ enum velocity_owner {
|
||||
#define TX_QUEUE_NO 4
|
||||
|
||||
#define MAX_HW_MIB_COUNTER 32
|
||||
#define VELOCITY_MIN_MTU (1514-14)
|
||||
#define VELOCITY_MIN_MTU (64)
|
||||
#define VELOCITY_MAX_MTU (9000)
|
||||
|
||||
/*
|
||||
|
@ -50,10 +50,6 @@ static const char* devname = "PCI200SYN";
|
||||
static int pci_clock_freq = 33000000;
|
||||
#define CLOCK_BASE pci_clock_freq
|
||||
|
||||
#define PCI_VENDOR_ID_GORAMO 0x10B5 /* uses PLX:9050 ID - this card */
|
||||
#define PCI_DEVICE_ID_PCI200SYN 0x9050 /* doesn't have its own ID */
|
||||
|
||||
|
||||
/*
|
||||
* PLX PCI9052 local configuration and shared runtime registers.
|
||||
* This structure can be used to access 9052 registers (memory mapped).
|
||||
@ -262,7 +258,7 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
|
||||
int i;
|
||||
card_t *card = pci_get_drvdata(pdev);
|
||||
|
||||
for(i = 0; i < 2; i++)
|
||||
for (i = 0; i < 2; i++)
|
||||
if (card->ports[i].card) {
|
||||
struct net_device *dev = port_to_dev(&card->ports[i]);
|
||||
unregister_hdlc_device(dev);
|
||||
@ -385,6 +381,15 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
|
||||
" %u RX packets rings\n", ramsize / 1024, ramphys,
|
||||
pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
|
||||
|
||||
if (pdev->subsystem_device == PCI_DEVICE_ID_PLX_9050) {
|
||||
printk(KERN_ERR "Detected PCI200SYN card with old "
|
||||
"configuration data.\n");
|
||||
printk(KERN_ERR "See <http://www.kernel.org/pub/"
|
||||
"linux/utils/net/hdlc/pci200syn/> for update.\n");
|
||||
printk(KERN_ERR "The card will stop working with"
|
||||
" future versions of Linux if not updated.\n");
|
||||
}
|
||||
|
||||
if (card->tx_ring_buffers < 1) {
|
||||
printk(KERN_ERR "pci200syn: RAM test failed\n");
|
||||
pci200_pci_remove_one(pdev);
|
||||
@ -396,7 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
|
||||
writew(readw(p) | 0x0040, p);
|
||||
|
||||
/* Allocate IRQ */
|
||||
if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
|
||||
if (request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
|
||||
printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
|
||||
pdev->irq);
|
||||
pci200_pci_remove_one(pdev);
|
||||
@ -406,7 +411,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
|
||||
|
||||
sca_init(card, 0);
|
||||
|
||||
for(i = 0; i < 2; i++) {
|
||||
for (i = 0; i < 2; i++) {
|
||||
port_t *port = &card->ports[i];
|
||||
struct net_device *dev = port_to_dev(port);
|
||||
hdlc_device *hdlc = dev_to_hdlc(dev);
|
||||
@ -425,7 +430,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
|
||||
hdlc->xmit = sca_xmit;
|
||||
port->settings.clock_type = CLOCK_EXT;
|
||||
port->card = card;
|
||||
if(register_hdlc_device(dev)) {
|
||||
if (register_hdlc_device(dev)) {
|
||||
printk(KERN_ERR "pci200syn: unable to register hdlc "
|
||||
"device\n");
|
||||
port->card = NULL;
|
||||
@ -445,8 +450,10 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
|
||||
|
||||
|
||||
static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
|
||||
{ PCI_VENDOR_ID_GORAMO, PCI_DEVICE_ID_PCI200SYN, PCI_ANY_ID,
|
||||
PCI_ANY_ID, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
|
||||
PCI_DEVICE_ID_PLX_9050, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
|
||||
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
@ -235,7 +235,35 @@ config IPW2200_MONITOR
|
||||
promiscuous mode via the Wireless Tool's Monitor mode. While in this
|
||||
mode, no packets can be sent.
|
||||
|
||||
config IPW_QOS
|
||||
config IPW2200_RADIOTAP
|
||||
bool "Enable radiotap format 802.11 raw packet support"
|
||||
depends on IPW2200_MONITOR
|
||||
|
||||
config IPW2200_PROMISCUOUS
|
||||
bool "Enable creation of a RF radiotap promiscuous interface"
|
||||
depends on IPW2200_MONITOR
|
||||
select IPW2200_RADIOTAP
|
||||
---help---
|
||||
Enables the creation of a second interface prefixed 'rtap'.
|
||||
This second interface will provide every received in radiotap
|
||||
format.
|
||||
|
||||
This is useful for performing wireless network analysis while
|
||||
maintaining an active association.
|
||||
|
||||
Example usage:
|
||||
|
||||
% modprobe ipw2200 rtap_iface=1
|
||||
% ifconfig rtap0 up
|
||||
% tethereal -i rtap0
|
||||
|
||||
If you do not specify 'rtap_iface=1' as a module parameter then
|
||||
the rtap interface will not be created and you will need to turn
|
||||
it on via sysfs:
|
||||
|
||||
% echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
|
||||
|
||||
config IPW2200_QOS
|
||||
bool "Enable QoS support"
|
||||
depends on IPW2200 && EXPERIMENTAL
|
||||
|
||||
@ -503,6 +531,23 @@ config PRISM54
|
||||
say M here and read <file:Documentation/modules.txt>. The module
|
||||
will be called prism54.ko.
|
||||
|
||||
config USB_ZD1201
|
||||
tristate "USB ZD1201 based Wireless device support"
|
||||
depends on USB && NET_RADIO
|
||||
select FW_LOADER
|
||||
---help---
|
||||
Say Y if you want to use wireless LAN adapters based on the ZyDAS
|
||||
ZD1201 chip.
|
||||
|
||||
This driver makes the adapter appear as a normal Ethernet interface,
|
||||
typically on wlan0.
|
||||
|
||||
The zd1201 device requires external firmware to be loaded.
|
||||
This can be found at http://linux-lc100020.sourceforge.net/
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called zd1201.
|
||||
|
||||
source "drivers/net/wireless/hostap/Kconfig"
|
||||
source "drivers/net/wireless/bcm43xx/Kconfig"
|
||||
|
||||
|
@ -40,3 +40,5 @@ obj-$(CONFIG_BCM43XX) += bcm43xx/
|
||||
# 16-bit wireless PCMCIA client drivers
|
||||
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
|
||||
obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
|
||||
|
||||
obj-$(CONFIG_USB_ZD1201) += zd1201.o
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <net/ieee80211.h>
|
||||
|
||||
#include "airo.h"
|
||||
|
||||
@ -467,6 +468,8 @@ static int do8bitIO = 0;
|
||||
#define RID_ECHOTEST_RESULTS 0xFF71
|
||||
#define RID_BSSLISTFIRST 0xFF72
|
||||
#define RID_BSSLISTNEXT 0xFF73
|
||||
#define RID_WPA_BSSLISTFIRST 0xFF74
|
||||
#define RID_WPA_BSSLISTNEXT 0xFF75
|
||||
|
||||
typedef struct {
|
||||
u16 cmd;
|
||||
@ -739,6 +742,14 @@ typedef struct {
|
||||
u16 extSoftCap;
|
||||
} CapabilityRid;
|
||||
|
||||
|
||||
/* Only present on firmware >= 5.30.17 */
|
||||
typedef struct {
|
||||
u16 unknown[4];
|
||||
u8 fixed[12]; /* WLAN management frame */
|
||||
u8 iep[624];
|
||||
} BSSListRidExtra;
|
||||
|
||||
typedef struct {
|
||||
u16 len;
|
||||
u16 index; /* First is 0 and 0xffff means end of list */
|
||||
@ -767,6 +778,9 @@ typedef struct {
|
||||
} fh;
|
||||
u16 dsChannel;
|
||||
u16 atimWindow;
|
||||
|
||||
/* Only present on firmware >= 5.30.17 */
|
||||
BSSListRidExtra extra;
|
||||
} BSSListRid;
|
||||
|
||||
typedef struct {
|
||||
@ -1140,8 +1154,6 @@ struct airo_info {
|
||||
char defindex; // Used with auto wep
|
||||
struct proc_dir_entry *proc_entry;
|
||||
spinlock_t aux_lock;
|
||||
unsigned long flags;
|
||||
#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
|
||||
#define FLAG_RADIO_OFF 0 /* User disabling of MAC */
|
||||
#define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */
|
||||
#define FLAG_RADIO_MASK 0x03
|
||||
@ -1151,6 +1163,7 @@ struct airo_info {
|
||||
#define FLAG_UPDATE_MULTI 5
|
||||
#define FLAG_UPDATE_UNI 6
|
||||
#define FLAG_802_11 7
|
||||
#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
|
||||
#define FLAG_PENDING_XMIT 9
|
||||
#define FLAG_PENDING_XMIT11 10
|
||||
#define FLAG_MPI 11
|
||||
@ -1158,17 +1171,19 @@ struct airo_info {
|
||||
#define FLAG_COMMIT 13
|
||||
#define FLAG_RESET 14
|
||||
#define FLAG_FLASHING 15
|
||||
#define JOB_MASK 0x2ff0000
|
||||
#define JOB_DIE 16
|
||||
#define JOB_XMIT 17
|
||||
#define JOB_XMIT11 18
|
||||
#define JOB_STATS 19
|
||||
#define JOB_PROMISC 20
|
||||
#define JOB_MIC 21
|
||||
#define JOB_EVENT 22
|
||||
#define JOB_AUTOWEP 23
|
||||
#define JOB_WSTATS 24
|
||||
#define JOB_SCAN_RESULTS 25
|
||||
#define FLAG_WPA_CAPABLE 16
|
||||
unsigned long flags;
|
||||
#define JOB_DIE 0
|
||||
#define JOB_XMIT 1
|
||||
#define JOB_XMIT11 2
|
||||
#define JOB_STATS 3
|
||||
#define JOB_PROMISC 4
|
||||
#define JOB_MIC 5
|
||||
#define JOB_EVENT 6
|
||||
#define JOB_AUTOWEP 7
|
||||
#define JOB_WSTATS 8
|
||||
#define JOB_SCAN_RESULTS 9
|
||||
unsigned long jobs;
|
||||
int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen,
|
||||
int whichbap);
|
||||
unsigned short *flash;
|
||||
@ -1208,6 +1223,11 @@ struct airo_info {
|
||||
#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
|
||||
char proc_name[IFNAMSIZ];
|
||||
|
||||
/* WPA-related stuff */
|
||||
unsigned int bssListFirst;
|
||||
unsigned int bssListNext;
|
||||
unsigned int bssListRidLen;
|
||||
|
||||
struct list_head network_list;
|
||||
struct list_head network_free_list;
|
||||
BSSListElement *networks;
|
||||
@ -1264,7 +1284,7 @@ static void micinit(struct airo_info *ai)
|
||||
{
|
||||
MICRid mic_rid;
|
||||
|
||||
clear_bit(JOB_MIC, &ai->flags);
|
||||
clear_bit(JOB_MIC, &ai->jobs);
|
||||
PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0);
|
||||
up(&ai->sem);
|
||||
|
||||
@ -1705,24 +1725,24 @@ static void emmh32_final(emmh32_context *context, u8 digest[4])
|
||||
static int readBSSListRid(struct airo_info *ai, int first,
|
||||
BSSListRid *list) {
|
||||
int rc;
|
||||
Cmd cmd;
|
||||
Resp rsp;
|
||||
Cmd cmd;
|
||||
Resp rsp;
|
||||
|
||||
if (first == 1) {
|
||||
if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.cmd=CMD_LISTBSS;
|
||||
if (down_interruptible(&ai->sem))
|
||||
return -ERESTARTSYS;
|
||||
issuecommand(ai, &cmd, &rsp);
|
||||
up(&ai->sem);
|
||||
/* Let the command take effect */
|
||||
ai->task = current;
|
||||
ssleep(3);
|
||||
ai->task = NULL;
|
||||
}
|
||||
rc = PC4500_readrid(ai, first ? RID_BSSLISTFIRST : RID_BSSLISTNEXT,
|
||||
list, sizeof(*list), 1);
|
||||
if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.cmd=CMD_LISTBSS;
|
||||
if (down_interruptible(&ai->sem))
|
||||
return -ERESTARTSYS;
|
||||
issuecommand(ai, &cmd, &rsp);
|
||||
up(&ai->sem);
|
||||
/* Let the command take effect */
|
||||
ai->task = current;
|
||||
ssleep(3);
|
||||
ai->task = NULL;
|
||||
}
|
||||
rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext,
|
||||
list, ai->bssListRidLen, 1);
|
||||
|
||||
list->len = le16_to_cpu(list->len);
|
||||
list->index = le16_to_cpu(list->index);
|
||||
@ -2112,7 +2132,7 @@ static void airo_end_xmit(struct net_device *dev) {
|
||||
int fid = priv->xmit.fid;
|
||||
u32 *fids = priv->fids;
|
||||
|
||||
clear_bit(JOB_XMIT, &priv->flags);
|
||||
clear_bit(JOB_XMIT, &priv->jobs);
|
||||
clear_bit(FLAG_PENDING_XMIT, &priv->flags);
|
||||
status = transmit_802_3_packet (priv, fids[fid], skb->data);
|
||||
up(&priv->sem);
|
||||
@ -2162,7 +2182,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
|
||||
if (down_trylock(&priv->sem) != 0) {
|
||||
set_bit(FLAG_PENDING_XMIT, &priv->flags);
|
||||
netif_stop_queue(dev);
|
||||
set_bit(JOB_XMIT, &priv->flags);
|
||||
set_bit(JOB_XMIT, &priv->jobs);
|
||||
wake_up_interruptible(&priv->thr_wait);
|
||||
} else
|
||||
airo_end_xmit(dev);
|
||||
@ -2177,7 +2197,7 @@ static void airo_end_xmit11(struct net_device *dev) {
|
||||
int fid = priv->xmit11.fid;
|
||||
u32 *fids = priv->fids;
|
||||
|
||||
clear_bit(JOB_XMIT11, &priv->flags);
|
||||
clear_bit(JOB_XMIT11, &priv->jobs);
|
||||
clear_bit(FLAG_PENDING_XMIT11, &priv->flags);
|
||||
status = transmit_802_11_packet (priv, fids[fid], skb->data);
|
||||
up(&priv->sem);
|
||||
@ -2233,7 +2253,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
|
||||
if (down_trylock(&priv->sem) != 0) {
|
||||
set_bit(FLAG_PENDING_XMIT11, &priv->flags);
|
||||
netif_stop_queue(dev);
|
||||
set_bit(JOB_XMIT11, &priv->flags);
|
||||
set_bit(JOB_XMIT11, &priv->jobs);
|
||||
wake_up_interruptible(&priv->thr_wait);
|
||||
} else
|
||||
airo_end_xmit11(dev);
|
||||
@ -2244,7 +2264,7 @@ static void airo_read_stats(struct airo_info *ai) {
|
||||
StatsRid stats_rid;
|
||||
u32 *vals = stats_rid.vals;
|
||||
|
||||
clear_bit(JOB_STATS, &ai->flags);
|
||||
clear_bit(JOB_STATS, &ai->jobs);
|
||||
if (ai->power.event) {
|
||||
up(&ai->sem);
|
||||
return;
|
||||
@ -2272,10 +2292,10 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct airo_info *local = dev->priv;
|
||||
|
||||
if (!test_bit(JOB_STATS, &local->flags)) {
|
||||
if (!test_bit(JOB_STATS, &local->jobs)) {
|
||||
/* Get stats out of the card if available */
|
||||
if (down_trylock(&local->sem) != 0) {
|
||||
set_bit(JOB_STATS, &local->flags);
|
||||
set_bit(JOB_STATS, &local->jobs);
|
||||
wake_up_interruptible(&local->thr_wait);
|
||||
} else
|
||||
airo_read_stats(local);
|
||||
@ -2290,7 +2310,7 @@ static void airo_set_promisc(struct airo_info *ai) {
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.cmd=CMD_SETMODE;
|
||||
clear_bit(JOB_PROMISC, &ai->flags);
|
||||
clear_bit(JOB_PROMISC, &ai->jobs);
|
||||
cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
|
||||
issuecommand(ai, &cmd, &rsp);
|
||||
up(&ai->sem);
|
||||
@ -2302,7 +2322,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
|
||||
if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
|
||||
change_bit(FLAG_PROMISC, &ai->flags);
|
||||
if (down_trylock(&ai->sem) != 0) {
|
||||
set_bit(JOB_PROMISC, &ai->flags);
|
||||
set_bit(JOB_PROMISC, &ai->jobs);
|
||||
wake_up_interruptible(&ai->thr_wait);
|
||||
} else
|
||||
airo_set_promisc(ai);
|
||||
@ -2380,7 +2400,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
|
||||
}
|
||||
clear_bit(FLAG_REGISTERED, &ai->flags);
|
||||
}
|
||||
set_bit(JOB_DIE, &ai->flags);
|
||||
set_bit(JOB_DIE, &ai->jobs);
|
||||
kill_proc(ai->thr_pid, SIGTERM, 1);
|
||||
wait_for_completion(&ai->thr_exited);
|
||||
|
||||
@ -2701,14 +2721,14 @@ static int reset_card( struct net_device *dev , int lock) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MAX_NETWORK_COUNT 64
|
||||
#define AIRO_MAX_NETWORK_COUNT 64
|
||||
static int airo_networks_allocate(struct airo_info *ai)
|
||||
{
|
||||
if (ai->networks)
|
||||
return 0;
|
||||
|
||||
ai->networks =
|
||||
kzalloc(MAX_NETWORK_COUNT * sizeof(BSSListElement),
|
||||
kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement),
|
||||
GFP_KERNEL);
|
||||
if (!ai->networks) {
|
||||
airo_print_warn(ai->dev->name, "Out of memory allocating beacons");
|
||||
@ -2732,11 +2752,33 @@ static void airo_networks_initialize(struct airo_info *ai)
|
||||
|
||||
INIT_LIST_HEAD(&ai->network_free_list);
|
||||
INIT_LIST_HEAD(&ai->network_list);
|
||||
for (i = 0; i < MAX_NETWORK_COUNT; i++)
|
||||
for (i = 0; i < AIRO_MAX_NETWORK_COUNT; i++)
|
||||
list_add_tail(&ai->networks[i].list,
|
||||
&ai->network_free_list);
|
||||
}
|
||||
|
||||
static int airo_test_wpa_capable(struct airo_info *ai)
|
||||
{
|
||||
int status;
|
||||
CapabilityRid cap_rid;
|
||||
const char *name = ai->dev->name;
|
||||
|
||||
status = readCapabilityRid(ai, &cap_rid, 1);
|
||||
if (status != SUCCESS) return 0;
|
||||
|
||||
/* Only firmware versions 5.30.17 or better can do WPA */
|
||||
if ((cap_rid.softVer > 0x530)
|
||||
|| ((cap_rid.softVer == 0x530) && (cap_rid.softSubVer >= 17))) {
|
||||
airo_print_info(name, "WPA is supported.");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* No WPA support */
|
||||
airo_print_info(name, "WPA unsupported (only firmware versions 5.30.17"
|
||||
" and greater support WPA. Detected %s)", cap_rid.prodVer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct net_device *_init_airo_card( unsigned short irq, int port,
|
||||
int is_pcmcia, struct pci_dev *pci,
|
||||
struct device *dmdev )
|
||||
@ -2759,6 +2801,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
|
||||
ai = dev->priv;
|
||||
ai->wifidev = NULL;
|
||||
ai->flags = 0;
|
||||
ai->jobs = 0;
|
||||
ai->dev = dev;
|
||||
if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
|
||||
airo_print_dbg(dev->name, "Found an MPI350 card");
|
||||
@ -2838,6 +2881,18 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
|
||||
set_bit(FLAG_FLASHING, &ai->flags);
|
||||
}
|
||||
|
||||
/* Test for WPA support */
|
||||
if (airo_test_wpa_capable(ai)) {
|
||||
set_bit(FLAG_WPA_CAPABLE, &ai->flags);
|
||||
ai->bssListFirst = RID_WPA_BSSLISTFIRST;
|
||||
ai->bssListNext = RID_WPA_BSSLISTNEXT;
|
||||
ai->bssListRidLen = sizeof(BSSListRid);
|
||||
} else {
|
||||
ai->bssListFirst = RID_BSSLISTFIRST;
|
||||
ai->bssListNext = RID_BSSLISTNEXT;
|
||||
ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra);
|
||||
}
|
||||
|
||||
rc = register_netdev(dev);
|
||||
if (rc) {
|
||||
airo_print_err(dev->name, "Couldn't register_netdev");
|
||||
@ -2875,7 +2930,7 @@ err_out_irq:
|
||||
err_out_unlink:
|
||||
del_airo_dev(dev);
|
||||
err_out_thr:
|
||||
set_bit(JOB_DIE, &ai->flags);
|
||||
set_bit(JOB_DIE, &ai->jobs);
|
||||
kill_proc(ai->thr_pid, SIGTERM, 1);
|
||||
wait_for_completion(&ai->thr_exited);
|
||||
err_out_free:
|
||||
@ -2933,7 +2988,7 @@ static void airo_send_event(struct net_device *dev) {
|
||||
union iwreq_data wrqu;
|
||||
StatusRid status_rid;
|
||||
|
||||
clear_bit(JOB_EVENT, &ai->flags);
|
||||
clear_bit(JOB_EVENT, &ai->jobs);
|
||||
PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0);
|
||||
up(&ai->sem);
|
||||
wrqu.data.length = 0;
|
||||
@ -2947,7 +3002,7 @@ static void airo_send_event(struct net_device *dev) {
|
||||
|
||||
static void airo_process_scan_results (struct airo_info *ai) {
|
||||
union iwreq_data wrqu;
|
||||
BSSListRid BSSList;
|
||||
BSSListRid bss;
|
||||
int rc;
|
||||
BSSListElement * loop_net;
|
||||
BSSListElement * tmp_net;
|
||||
@ -2960,15 +3015,15 @@ static void airo_process_scan_results (struct airo_info *ai) {
|
||||
}
|
||||
|
||||
/* Try to read the first entry of the scan result */
|
||||
rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 0);
|
||||
if((rc) || (BSSList.index == 0xffff)) {
|
||||
rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0);
|
||||
if((rc) || (bss.index == 0xffff)) {
|
||||
/* No scan results */
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Read and parse all entries */
|
||||
tmp_net = NULL;
|
||||
while((!rc) && (BSSList.index != 0xffff)) {
|
||||
while((!rc) && (bss.index != 0xffff)) {
|
||||
/* Grab a network off the free list */
|
||||
if (!list_empty(&ai->network_free_list)) {
|
||||
tmp_net = list_entry(ai->network_free_list.next,
|
||||
@ -2977,19 +3032,19 @@ static void airo_process_scan_results (struct airo_info *ai) {
|
||||
}
|
||||
|
||||
if (tmp_net != NULL) {
|
||||
memcpy(tmp_net, &BSSList, sizeof(tmp_net->bss));
|
||||
memcpy(tmp_net, &bss, sizeof(tmp_net->bss));
|
||||
list_add_tail(&tmp_net->list, &ai->network_list);
|
||||
tmp_net = NULL;
|
||||
}
|
||||
|
||||
/* Read next entry */
|
||||
rc = PC4500_readrid(ai, RID_BSSLISTNEXT,
|
||||
&BSSList, sizeof(BSSList), 0);
|
||||
rc = PC4500_readrid(ai, ai->bssListNext,
|
||||
&bss, ai->bssListRidLen, 0);
|
||||
}
|
||||
|
||||
out:
|
||||
ai->scan_timeout = 0;
|
||||
clear_bit(JOB_SCAN_RESULTS, &ai->flags);
|
||||
clear_bit(JOB_SCAN_RESULTS, &ai->jobs);
|
||||
up(&ai->sem);
|
||||
|
||||
/* Send an empty event to user space.
|
||||
@ -3019,10 +3074,10 @@ static int airo_thread(void *data) {
|
||||
/* make swsusp happy with our thread */
|
||||
try_to_freeze();
|
||||
|
||||
if (test_bit(JOB_DIE, &ai->flags))
|
||||
if (test_bit(JOB_DIE, &ai->jobs))
|
||||
break;
|
||||
|
||||
if (ai->flags & JOB_MASK) {
|
||||
if (ai->jobs) {
|
||||
locked = down_interruptible(&ai->sem);
|
||||
} else {
|
||||
wait_queue_t wait;
|
||||
@ -3031,16 +3086,16 @@ static int airo_thread(void *data) {
|
||||
add_wait_queue(&ai->thr_wait, &wait);
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (ai->flags & JOB_MASK)
|
||||
if (ai->jobs)
|
||||
break;
|
||||
if (ai->expires || ai->scan_timeout) {
|
||||
if (ai->scan_timeout &&
|
||||
time_after_eq(jiffies,ai->scan_timeout)){
|
||||
set_bit(JOB_SCAN_RESULTS,&ai->flags);
|
||||
set_bit(JOB_SCAN_RESULTS, &ai->jobs);
|
||||
break;
|
||||
} else if (ai->expires &&
|
||||
time_after_eq(jiffies,ai->expires)){
|
||||
set_bit(JOB_AUTOWEP,&ai->flags);
|
||||
set_bit(JOB_AUTOWEP, &ai->jobs);
|
||||
break;
|
||||
}
|
||||
if (!signal_pending(current)) {
|
||||
@ -3069,7 +3124,7 @@ static int airo_thread(void *data) {
|
||||
if (locked)
|
||||
continue;
|
||||
|
||||
if (test_bit(JOB_DIE, &ai->flags)) {
|
||||
if (test_bit(JOB_DIE, &ai->jobs)) {
|
||||
up(&ai->sem);
|
||||
break;
|
||||
}
|
||||
@ -3079,23 +3134,23 @@ static int airo_thread(void *data) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (test_bit(JOB_XMIT, &ai->flags))
|
||||
if (test_bit(JOB_XMIT, &ai->jobs))
|
||||
airo_end_xmit(dev);
|
||||
else if (test_bit(JOB_XMIT11, &ai->flags))
|
||||
else if (test_bit(JOB_XMIT11, &ai->jobs))
|
||||
airo_end_xmit11(dev);
|
||||
else if (test_bit(JOB_STATS, &ai->flags))
|
||||
else if (test_bit(JOB_STATS, &ai->jobs))
|
||||
airo_read_stats(ai);
|
||||
else if (test_bit(JOB_WSTATS, &ai->flags))
|
||||
else if (test_bit(JOB_WSTATS, &ai->jobs))
|
||||
airo_read_wireless_stats(ai);
|
||||
else if (test_bit(JOB_PROMISC, &ai->flags))
|
||||
else if (test_bit(JOB_PROMISC, &ai->jobs))
|
||||
airo_set_promisc(ai);
|
||||
else if (test_bit(JOB_MIC, &ai->flags))
|
||||
else if (test_bit(JOB_MIC, &ai->jobs))
|
||||
micinit(ai);
|
||||
else if (test_bit(JOB_EVENT, &ai->flags))
|
||||
else if (test_bit(JOB_EVENT, &ai->jobs))
|
||||
airo_send_event(dev);
|
||||
else if (test_bit(JOB_AUTOWEP, &ai->flags))
|
||||
else if (test_bit(JOB_AUTOWEP, &ai->jobs))
|
||||
timer_func(dev);
|
||||
else if (test_bit(JOB_SCAN_RESULTS, &ai->flags))
|
||||
else if (test_bit(JOB_SCAN_RESULTS, &ai->jobs))
|
||||
airo_process_scan_results(ai);
|
||||
else /* Shouldn't get here, but we make sure to unlock */
|
||||
up(&ai->sem);
|
||||
@ -3133,7 +3188,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
|
||||
if ( status & EV_MIC ) {
|
||||
OUT4500( apriv, EVACK, EV_MIC );
|
||||
if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) {
|
||||
set_bit(JOB_MIC, &apriv->flags);
|
||||
set_bit(JOB_MIC, &apriv->jobs);
|
||||
wake_up_interruptible(&apriv->thr_wait);
|
||||
}
|
||||
}
|
||||
@ -3187,7 +3242,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
|
||||
set_bit(FLAG_UPDATE_MULTI, &apriv->flags);
|
||||
|
||||
if (down_trylock(&apriv->sem) != 0) {
|
||||
set_bit(JOB_EVENT, &apriv->flags);
|
||||
set_bit(JOB_EVENT, &apriv->jobs);
|
||||
wake_up_interruptible(&apriv->thr_wait);
|
||||
} else
|
||||
airo_send_event(dev);
|
||||
@ -5485,7 +5540,7 @@ static void timer_func( struct net_device *dev ) {
|
||||
up(&apriv->sem);
|
||||
|
||||
/* Schedule check to see if the change worked */
|
||||
clear_bit(JOB_AUTOWEP, &apriv->flags);
|
||||
clear_bit(JOB_AUTOWEP, &apriv->jobs);
|
||||
apriv->expires = RUN_AT(HZ*3);
|
||||
}
|
||||
|
||||
@ -6876,7 +6931,7 @@ static int airo_get_range(struct net_device *dev,
|
||||
}
|
||||
range->num_txpower = i;
|
||||
range->txpower_capa = IW_TXPOW_MWATT;
|
||||
range->we_version_source = 12;
|
||||
range->we_version_source = 19;
|
||||
range->we_version_compiled = WIRELESS_EXT;
|
||||
range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
|
||||
range->retry_flags = IW_RETRY_LIMIT;
|
||||
@ -7152,6 +7207,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
|
||||
u16 capabilities;
|
||||
char * current_val; /* For rates */
|
||||
int i;
|
||||
char * buf;
|
||||
|
||||
/* First entry *MUST* be the AP MAC address */
|
||||
iwe.cmd = SIOCGIWAP;
|
||||
@ -7238,8 +7294,69 @@ static inline char *airo_translate_scan(struct net_device *dev,
|
||||
if((current_val - current_ev) > IW_EV_LCP_LEN)
|
||||
current_ev = current_val;
|
||||
|
||||
/* The other data in the scan result are not really
|
||||
* interesting, so for now drop it - Jean II */
|
||||
/* Beacon interval */
|
||||
buf = kmalloc(30, GFP_KERNEL);
|
||||
if (buf) {
|
||||
iwe.cmd = IWEVCUSTOM;
|
||||
sprintf(buf, "bcn_int=%d", bss->beaconInterval);
|
||||
iwe.u.data.length = strlen(buf);
|
||||
current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, buf);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
/* Put WPA/RSN Information Elements into the event stream */
|
||||
if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) {
|
||||
unsigned int num_null_ies = 0;
|
||||
u16 length = sizeof (bss->extra.iep);
|
||||
struct ieee80211_info_element *info_element =
|
||||
(struct ieee80211_info_element *) &bss->extra.iep;
|
||||
|
||||
while ((length >= sizeof(*info_element)) && (num_null_ies < 2)) {
|
||||
if (sizeof(*info_element) + info_element->len > length) {
|
||||
/* Invalid element, don't continue parsing IE */
|
||||
break;
|
||||
}
|
||||
|
||||
switch (info_element->id) {
|
||||
case MFIE_TYPE_SSID:
|
||||
/* Two zero-length SSID elements
|
||||
* mean we're done parsing elements */
|
||||
if (!info_element->len)
|
||||
num_null_ies++;
|
||||
break;
|
||||
|
||||
case MFIE_TYPE_GENERIC:
|
||||
if (info_element->len >= 4 &&
|
||||
info_element->data[0] == 0x00 &&
|
||||
info_element->data[1] == 0x50 &&
|
||||
info_element->data[2] == 0xf2 &&
|
||||
info_element->data[3] == 0x01) {
|
||||
iwe.cmd = IWEVGENIE;
|
||||
iwe.u.data.length = min(info_element->len + 2,
|
||||
MAX_WPA_IE_LEN);
|
||||
current_ev = iwe_stream_add_point(current_ev, end_buf,
|
||||
&iwe, (char *) info_element);
|
||||
}
|
||||
break;
|
||||
|
||||
case MFIE_TYPE_RSN:
|
||||
iwe.cmd = IWEVGENIE;
|
||||
iwe.u.data.length = min(info_element->len + 2,
|
||||
MAX_WPA_IE_LEN);
|
||||
current_ev = iwe_stream_add_point(current_ev, end_buf,
|
||||
&iwe, (char *) info_element);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
length -= sizeof(*info_element) + info_element->len;
|
||||
info_element =
|
||||
(struct ieee80211_info_element *)&info_element->
|
||||
data[info_element->len];
|
||||
}
|
||||
}
|
||||
return current_ev;
|
||||
}
|
||||
|
||||
@ -7521,7 +7638,7 @@ static void airo_read_wireless_stats(struct airo_info *local)
|
||||
u32 *vals = stats_rid.vals;
|
||||
|
||||
/* Get stats out of the card */
|
||||
clear_bit(JOB_WSTATS, &local->flags);
|
||||
clear_bit(JOB_WSTATS, &local->jobs);
|
||||
if (local->power.event) {
|
||||
up(&local->sem);
|
||||
return;
|
||||
@ -7565,10 +7682,10 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
|
||||
{
|
||||
struct airo_info *local = dev->priv;
|
||||
|
||||
if (!test_bit(JOB_WSTATS, &local->flags)) {
|
||||
if (!test_bit(JOB_WSTATS, &local->jobs)) {
|
||||
/* Get stats out of the card if available */
|
||||
if (down_trylock(&local->sem) != 0) {
|
||||
set_bit(JOB_WSTATS, &local->flags);
|
||||
set_bit(JOB_WSTATS, &local->jobs);
|
||||
wake_up_interruptible(&local->thr_wait);
|
||||
} else
|
||||
airo_read_wireless_stats(local);
|
||||
|
@ -645,7 +645,6 @@ struct bcm43xx_private {
|
||||
unsigned int irq;
|
||||
|
||||
void __iomem *mmio_addr;
|
||||
unsigned int mmio_len;
|
||||
|
||||
/* Do not use the lock directly. Use the bcm43xx_lock* helper
|
||||
* functions, to be MMIO-safe. */
|
||||
|
@ -92,7 +92,7 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
|
||||
fappend("subsystem_vendor: 0x%04x subsystem_device: 0x%04x\n",
|
||||
pci_dev->subsystem_vendor, pci_dev->subsystem_device);
|
||||
fappend("IRQ: %d\n", bcm->irq);
|
||||
fappend("mmio_addr: 0x%p mmio_len: %u\n", bcm->mmio_addr, bcm->mmio_len);
|
||||
fappend("mmio_addr: 0x%p\n", bcm->mmio_addr);
|
||||
fappend("chip_id: 0x%04x chip_rev: 0x%02x\n", bcm->chip_id, bcm->chip_rev);
|
||||
if ((bcm->core_80211[0].rev >= 3) && (bcm43xx_read32(bcm, 0x0158) & (1 << 16)))
|
||||
fappend("Radio disabled by hardware!\n");
|
||||
|
@ -128,13 +128,15 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for debugging.");
|
||||
static struct pci_device_id bcm43xx_pci_tbl[] = {
|
||||
/* Broadcom 4303 802.11b */
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
/* Broadcom 4307 802.11b */
|
||||
/* Broadcom 4307 802.11b */
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
/* Broadcom 4318 802.11b/g */
|
||||
/* Broadcom 4318 802.11b/g */
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
/* Broadcom 4319 802.11a/b/g */
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
/* Broadcom 4306 802.11b/g */
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
/* Broadcom 4306 802.11a */
|
||||
/* Broadcom 4306 802.11a */
|
||||
// { PCI_VENDOR_ID_BROADCOM, 0x4321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
/* Broadcom 4309 802.11a/b/g */
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
@ -3299,8 +3301,7 @@ static void bcm43xx_detach_board(struct bcm43xx_private *bcm)
|
||||
|
||||
bcm43xx_chipset_detach(bcm);
|
||||
/* Do _not_ access the chip, after it is detached. */
|
||||
iounmap(bcm->mmio_addr);
|
||||
|
||||
pci_iounmap(pci_dev, bcm->mmio_addr);
|
||||
pci_release_regions(pci_dev);
|
||||
pci_disable_device(pci_dev);
|
||||
|
||||
@ -3390,40 +3391,26 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
|
||||
struct net_device *net_dev = bcm->net_dev;
|
||||
int err;
|
||||
int i;
|
||||
unsigned long mmio_start, mmio_flags, mmio_len;
|
||||
u32 coremask;
|
||||
|
||||
err = pci_enable_device(pci_dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR PFX "unable to wake up pci device (%i)\n", err);
|
||||
printk(KERN_ERR PFX "pci_enable_device() failed\n");
|
||||
goto out;
|
||||
}
|
||||
mmio_start = pci_resource_start(pci_dev, 0);
|
||||
mmio_flags = pci_resource_flags(pci_dev, 0);
|
||||
mmio_len = pci_resource_len(pci_dev, 0);
|
||||
if (!(mmio_flags & IORESOURCE_MEM)) {
|
||||
printk(KERN_ERR PFX
|
||||
"%s, region #0 not an MMIO resource, aborting\n",
|
||||
pci_name(pci_dev));
|
||||
err = -ENODEV;
|
||||
goto err_pci_disable;
|
||||
}
|
||||
err = pci_request_regions(pci_dev, KBUILD_MODNAME);
|
||||
if (err) {
|
||||
printk(KERN_ERR PFX
|
||||
"could not access PCI resources (%i)\n", err);
|
||||
printk(KERN_ERR PFX "pci_request_regions() failed\n");
|
||||
goto err_pci_disable;
|
||||
}
|
||||
/* enable PCI bus-mastering */
|
||||
pci_set_master(pci_dev);
|
||||
bcm->mmio_addr = ioremap(mmio_start, mmio_len);
|
||||
bcm->mmio_addr = pci_iomap(pci_dev, 0, ~0UL);
|
||||
if (!bcm->mmio_addr) {
|
||||
printk(KERN_ERR PFX "%s: cannot remap MMIO, aborting\n",
|
||||
pci_name(pci_dev));
|
||||
printk(KERN_ERR PFX "pci_iomap() failed\n");
|
||||
err = -EIO;
|
||||
goto err_pci_release;
|
||||
}
|
||||
bcm->mmio_len = mmio_len;
|
||||
net_dev->base_addr = (unsigned long)bcm->mmio_addr;
|
||||
|
||||
bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_VENDOR_ID,
|
||||
@ -3517,7 +3504,7 @@ err_80211_unwind:
|
||||
err_chipset_detach:
|
||||
bcm43xx_chipset_detach(bcm);
|
||||
err_iounmap:
|
||||
iounmap(bcm->mmio_addr);
|
||||
pci_iounmap(pci_dev, bcm->mmio_addr);
|
||||
err_pci_release:
|
||||
pci_release_regions(pci_dev);
|
||||
err_pci_disable:
|
||||
@ -3568,7 +3555,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
|
||||
unsigned long flags;
|
||||
int keyidx;
|
||||
|
||||
dprintk(KERN_INFO PFX "set security called\n");
|
||||
dprintk(KERN_INFO PFX "set security called");
|
||||
|
||||
bcm43xx_lock_mmio(bcm, flags);
|
||||
|
||||
@ -3581,24 +3568,25 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
|
||||
|
||||
if (sec->flags & SEC_ACTIVE_KEY) {
|
||||
secinfo->active_key = sec->active_key;
|
||||
dprintk(KERN_INFO PFX " .active_key = %d\n", sec->active_key);
|
||||
dprintk(", .active_key = %d", sec->active_key);
|
||||
}
|
||||
if (sec->flags & SEC_UNICAST_GROUP) {
|
||||
secinfo->unicast_uses_group = sec->unicast_uses_group;
|
||||
dprintk(KERN_INFO PFX " .unicast_uses_group = %d\n", sec->unicast_uses_group);
|
||||
dprintk(", .unicast_uses_group = %d", sec->unicast_uses_group);
|
||||
}
|
||||
if (sec->flags & SEC_LEVEL) {
|
||||
secinfo->level = sec->level;
|
||||
dprintk(KERN_INFO PFX " .level = %d\n", sec->level);
|
||||
dprintk(", .level = %d", sec->level);
|
||||
}
|
||||
if (sec->flags & SEC_ENABLED) {
|
||||
secinfo->enabled = sec->enabled;
|
||||
dprintk(KERN_INFO PFX " .enabled = %d\n", sec->enabled);
|
||||
dprintk(", .enabled = %d", sec->enabled);
|
||||
}
|
||||
if (sec->flags & SEC_ENCRYPT) {
|
||||
secinfo->encrypt = sec->encrypt;
|
||||
dprintk(KERN_INFO PFX " .encrypt = %d\n", sec->encrypt);
|
||||
dprintk(", .encrypt = %d", sec->encrypt);
|
||||
}
|
||||
dprintk("\n");
|
||||
if (bcm->initialized && !bcm->ieee->host_encrypt) {
|
||||
if (secinfo->enabled) {
|
||||
/* upload WEP keys to hardware */
|
||||
|
@ -121,12 +121,6 @@ void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
|
||||
hw->iobase = address;
|
||||
hw->reg_spacing = reg_spacing;
|
||||
hw->inten = 0x0;
|
||||
|
||||
#ifdef HERMES_DEBUG_BUFFER
|
||||
hw->dbufp = 0;
|
||||
memset(&hw->dbuf, 0xff, sizeof(hw->dbuf));
|
||||
memset(&hw->profile, 0, sizeof(hw->profile));
|
||||
#endif
|
||||
}
|
||||
|
||||
int hermes_init(hermes_t *hw)
|
||||
@ -347,19 +341,6 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
|
||||
reg = hermes_read_reg(hw, oreg);
|
||||
}
|
||||
|
||||
#ifdef HERMES_DEBUG_BUFFER
|
||||
hw->profile[HERMES_BAP_BUSY_TIMEOUT - k]++;
|
||||
|
||||
if (k < HERMES_BAP_BUSY_TIMEOUT) {
|
||||
struct hermes_debug_entry *e =
|
||||
&hw->dbuf[(hw->dbufp++) % HERMES_DEBUG_BUFSIZE];
|
||||
e->bap = bap;
|
||||
e->id = id;
|
||||
e->offset = offset;
|
||||
e->cycles = HERMES_BAP_BUSY_TIMEOUT - k;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (reg & HERMES_OFFSET_BUSY)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
@ -419,8 +400,7 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
|
||||
}
|
||||
|
||||
/* Write a block of data to the chip's buffer, via the
|
||||
* BAP. Synchronization/serialization is the caller's problem. len
|
||||
* must be even.
|
||||
* BAP. Synchronization/serialization is the caller's problem.
|
||||
*
|
||||
* Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
|
||||
*/
|
||||
@ -430,7 +410,7 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
|
||||
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
|
||||
int err = 0;
|
||||
|
||||
if ( (len < 0) || (len % 2) )
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
||||
err = hermes_bap_seek(hw, bap, id, offset);
|
||||
@ -438,49 +418,12 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
|
||||
goto out;
|
||||
|
||||
/* Actually do the transfer */
|
||||
hermes_write_words(hw, dreg, buf, len/2);
|
||||
hermes_write_bytes(hw, dreg, buf, len);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Write a block of data to the chip's buffer with padding if
|
||||
* neccessary, via the BAP. Synchronization/serialization is the
|
||||
* caller's problem. len must be even.
|
||||
*
|
||||
* Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
|
||||
*/
|
||||
int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf, unsigned data_len, int len,
|
||||
u16 id, u16 offset)
|
||||
{
|
||||
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
|
||||
int err = 0;
|
||||
|
||||
if (len < 0 || len % 2 || data_len > len)
|
||||
return -EINVAL;
|
||||
|
||||
err = hermes_bap_seek(hw, bap, id, offset);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Transfer all the complete words of data */
|
||||
hermes_write_words(hw, dreg, buf, data_len/2);
|
||||
/* If there is an odd byte left over pad and transfer it */
|
||||
if (data_len & 1) {
|
||||
u8 end[2];
|
||||
end[1] = 0;
|
||||
end[0] = ((unsigned char *)buf)[data_len - 1];
|
||||
hermes_write_words(hw, dreg, end, 1);
|
||||
data_len ++;
|
||||
}
|
||||
/* Now send zeros for the padding */
|
||||
if (data_len < len)
|
||||
hermes_clear_words(hw, dreg, (len - data_len) / 2);
|
||||
/* Complete */
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Read a Length-Type-Value record from the card.
|
||||
*
|
||||
* If length is NULL, we ignore the length read from the card, and
|
||||
@ -553,7 +496,7 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
|
||||
|
||||
count = length - 1;
|
||||
|
||||
hermes_write_words(hw, dreg, value, count);
|
||||
hermes_write_bytes(hw, dreg, value, count << 1);
|
||||
|
||||
err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
|
||||
rid, NULL);
|
||||
@ -568,7 +511,6 @@ EXPORT_SYMBOL(hermes_allocate);
|
||||
|
||||
EXPORT_SYMBOL(hermes_bap_pread);
|
||||
EXPORT_SYMBOL(hermes_bap_pwrite);
|
||||
EXPORT_SYMBOL(hermes_bap_pwrite_pad);
|
||||
EXPORT_SYMBOL(hermes_read_ltv);
|
||||
EXPORT_SYMBOL(hermes_write_ltv);
|
||||
|
||||
|
@ -328,16 +328,6 @@ struct hermes_multicast {
|
||||
u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
// #define HERMES_DEBUG_BUFFER 1
|
||||
#define HERMES_DEBUG_BUFSIZE 4096
|
||||
struct hermes_debug_entry {
|
||||
int bap;
|
||||
u16 id, offset;
|
||||
int cycles;
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* Timeouts */
|
||||
#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
|
||||
|
||||
@ -347,14 +337,7 @@ typedef struct hermes {
|
||||
int reg_spacing;
|
||||
#define HERMES_16BIT_REGSPACING 0
|
||||
#define HERMES_32BIT_REGSPACING 1
|
||||
|
||||
u16 inten; /* Which interrupts should be enabled? */
|
||||
|
||||
#ifdef HERMES_DEBUG_BUFFER
|
||||
struct hermes_debug_entry dbuf[HERMES_DEBUG_BUFSIZE];
|
||||
unsigned long dbufp;
|
||||
unsigned long profile[HERMES_BAP_BUSY_TIMEOUT+1];
|
||||
#endif
|
||||
} hermes_t;
|
||||
|
||||
/* Register access convenience macros */
|
||||
@ -376,8 +359,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
|
||||
u16 id, u16 offset);
|
||||
int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
|
||||
u16 id, u16 offset);
|
||||
int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf,
|
||||
unsigned data_len, int len, u16 id, u16 offset);
|
||||
int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
|
||||
u16 *length, void *buf);
|
||||
int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
|
||||
@ -425,10 +406,13 @@ static inline void hermes_read_words(struct hermes *hw, int off, void *buf, unsi
|
||||
ioread16_rep(hw->iobase + off, buf, count);
|
||||
}
|
||||
|
||||
static inline void hermes_write_words(struct hermes *hw, int off, const void *buf, unsigned count)
|
||||
static inline void hermes_write_bytes(struct hermes *hw, int off,
|
||||
const char *buf, unsigned count)
|
||||
{
|
||||
off = off << hw->reg_spacing;
|
||||
iowrite16_rep(hw->iobase + off, buf, count);
|
||||
iowrite16_rep(hw->iobase + off, buf, count >> 1);
|
||||
if (unlikely(count & 1))
|
||||
iowrite8(buf[count - 1], hw->iobase + off);
|
||||
}
|
||||
|
||||
static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count)
|
||||
@ -462,21 +446,4 @@ static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
|
||||
return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
|
||||
}
|
||||
|
||||
#else /* ! __KERNEL__ */
|
||||
|
||||
/* These are provided for the benefit of userspace drivers and testing programs
|
||||
which use ioperm() or iopl() */
|
||||
|
||||
#define hermes_read_reg(base, off) (inw((base) + (off)))
|
||||
#define hermes_write_reg(base, off, val) (outw((val), (base) + (off)))
|
||||
|
||||
#define hermes_read_regn(base, name) (hermes_read_reg((base), HERMES_##name))
|
||||
#define hermes_write_regn(base, name, val) (hermes_write_reg((base), HERMES_##name, (val)))
|
||||
|
||||
/* Note that for the next two, the count is in 16-bit words, not bytes */
|
||||
#define hermes_read_data(base, off, buf, count) (insw((base) + (off), (buf), (count)))
|
||||
#define hermes_write_data(base, off, buf, count) (outsw((base) + (off), (buf), (count)))
|
||||
|
||||
#endif /* ! __KERNEL__ */
|
||||
|
||||
#endif /* _HERMES_H */
|
||||
|
@ -534,5 +534,4 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
|
||||
EXPORT_SYMBOL(hostap_dump_tx_80211);
|
||||
EXPORT_SYMBOL(hostap_master_start_xmit);
|
||||
|
@ -3276,17 +3276,6 @@ EXPORT_SYMBOL(hostap_init_data);
|
||||
EXPORT_SYMBOL(hostap_init_ap_proc);
|
||||
EXPORT_SYMBOL(hostap_free_data);
|
||||
EXPORT_SYMBOL(hostap_check_sta_fw_version);
|
||||
EXPORT_SYMBOL(hostap_handle_sta_tx);
|
||||
EXPORT_SYMBOL(hostap_handle_sta_release);
|
||||
EXPORT_SYMBOL(hostap_handle_sta_tx_exc);
|
||||
EXPORT_SYMBOL(hostap_update_sta_ps);
|
||||
EXPORT_SYMBOL(hostap_handle_sta_rx);
|
||||
EXPORT_SYMBOL(hostap_is_sta_assoc);
|
||||
EXPORT_SYMBOL(hostap_is_sta_authorized);
|
||||
EXPORT_SYMBOL(hostap_add_sta);
|
||||
EXPORT_SYMBOL(hostap_update_rates);
|
||||
EXPORT_SYMBOL(hostap_add_wds_links);
|
||||
EXPORT_SYMBOL(hostap_wds_link_oper);
|
||||
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
|
||||
EXPORT_SYMBOL(hostap_deauth_all_stas);
|
||||
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
|
||||
|
@ -881,6 +881,12 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
|
||||
PCMCIA_DEVICE_PROD_ID12(
|
||||
"ZoomAir 11Mbps High", "Rate wireless Networking",
|
||||
0x273fe3db, 0x32a1eaee),
|
||||
PCMCIA_DEVICE_PROD_ID123(
|
||||
"Pretec", "CompactWLAN Card 802.11b", "2.5",
|
||||
0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
|
||||
PCMCIA_DEVICE_PROD_ID123(
|
||||
"U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02",
|
||||
0xc7b8df9d, 0x1700d087, 0x4b74baa0),
|
||||
PCMCIA_DEVICE_NULL
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
|
||||
|
@ -1125,11 +1125,9 @@ EXPORT_SYMBOL(hostap_set_auth_algs);
|
||||
EXPORT_SYMBOL(hostap_dump_rx_header);
|
||||
EXPORT_SYMBOL(hostap_dump_tx_header);
|
||||
EXPORT_SYMBOL(hostap_80211_header_parse);
|
||||
EXPORT_SYMBOL(hostap_80211_prism_header_parse);
|
||||
EXPORT_SYMBOL(hostap_80211_get_hdrlen);
|
||||
EXPORT_SYMBOL(hostap_get_stats);
|
||||
EXPORT_SYMBOL(hostap_setup_dev);
|
||||
EXPORT_SYMBOL(hostap_proc);
|
||||
EXPORT_SYMBOL(hostap_set_multicast_list_queue);
|
||||
EXPORT_SYMBOL(hostap_set_hostapd);
|
||||
EXPORT_SYMBOL(hostap_set_hostapd_sta);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -789,7 +789,7 @@ struct ipw_sys_config {
|
||||
u8 bt_coexist_collision_thr;
|
||||
u8 silence_threshold;
|
||||
u8 accept_all_mgmt_bcpr;
|
||||
u8 accept_all_mgtm_frames;
|
||||
u8 accept_all_mgmt_frames;
|
||||
u8 pass_noise_stats_to_host;
|
||||
u8 reserved3;
|
||||
} __attribute__ ((packed));
|
||||
@ -1122,6 +1122,52 @@ struct ipw_fw_error {
|
||||
u8 payload[0];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#ifdef CONFIG_IPW2200_PROMISCUOUS
|
||||
|
||||
enum ipw_prom_filter {
|
||||
IPW_PROM_CTL_HEADER_ONLY = (1 << 0),
|
||||
IPW_PROM_MGMT_HEADER_ONLY = (1 << 1),
|
||||
IPW_PROM_DATA_HEADER_ONLY = (1 << 2),
|
||||
IPW_PROM_ALL_HEADER_ONLY = 0xf, /* bits 0..3 */
|
||||
IPW_PROM_NO_TX = (1 << 4),
|
||||
IPW_PROM_NO_RX = (1 << 5),
|
||||
IPW_PROM_NO_CTL = (1 << 6),
|
||||
IPW_PROM_NO_MGMT = (1 << 7),
|
||||
IPW_PROM_NO_DATA = (1 << 8),
|
||||
};
|
||||
|
||||
struct ipw_priv;
|
||||
struct ipw_prom_priv {
|
||||
struct ipw_priv *priv;
|
||||
struct ieee80211_device *ieee;
|
||||
enum ipw_prom_filter filter;
|
||||
int tx_packets;
|
||||
int rx_packets;
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_IPW2200_RADIOTAP) || defined(CONFIG_IPW2200_PROMISCUOUS)
|
||||
/* Magic struct that slots into the radiotap header -- no reason
|
||||
* to build this manually element by element, we can write it much
|
||||
* more efficiently than we can parse it. ORDER MATTERS HERE
|
||||
*
|
||||
* When sent to us via the simulated Rx interface in sysfs, the entire
|
||||
* structure is provided regardless of any bits unset.
|
||||
*/
|
||||
struct ipw_rt_hdr {
|
||||
struct ieee80211_radiotap_header rt_hdr;
|
||||
u64 rt_tsf; /* TSF */
|
||||
u8 rt_flags; /* radiotap packet flags */
|
||||
u8 rt_rate; /* rate in 500kb/s */
|
||||
u16 rt_channel; /* channel in mhz */
|
||||
u16 rt_chbitmask; /* channel bitfield */
|
||||
s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
|
||||
s8 rt_dbmnoise;
|
||||
u8 rt_antenna; /* antenna number */
|
||||
u8 payload[0]; /* payload... */
|
||||
} __attribute__ ((packed));
|
||||
#endif
|
||||
|
||||
struct ipw_priv {
|
||||
/* ieee device used by generic ieee processing code */
|
||||
struct ieee80211_device *ieee;
|
||||
@ -1133,6 +1179,12 @@ struct ipw_priv {
|
||||
struct pci_dev *pci_dev;
|
||||
struct net_device *net_dev;
|
||||
|
||||
#ifdef CONFIG_IPW2200_PROMISCUOUS
|
||||
/* Promiscuous mode */
|
||||
struct ipw_prom_priv *prom_priv;
|
||||
struct net_device *prom_net_dev;
|
||||
#endif
|
||||
|
||||
/* pci hardware address support */
|
||||
void __iomem *hw_base;
|
||||
unsigned long hw_len;
|
||||
@ -1153,11 +1205,9 @@ struct ipw_priv {
|
||||
u32 config;
|
||||
u32 capability;
|
||||
|
||||
u8 last_rx_rssi;
|
||||
u8 last_noise;
|
||||
struct average average_missed_beacons;
|
||||
struct average average_rssi;
|
||||
struct average average_noise;
|
||||
s16 exp_avg_rssi;
|
||||
s16 exp_avg_noise;
|
||||
u32 port_type;
|
||||
int rx_bufs_min; /**< minimum number of bufs in Rx queue */
|
||||
int rx_pend_max; /**< maximum pending buffers for one IRQ */
|
||||
@ -1308,6 +1358,29 @@ struct ipw_priv {
|
||||
|
||||
/* debug macros */
|
||||
|
||||
/* Debug and printf string expansion helpers for printing bitfields */
|
||||
#define BIT_FMT8 "%c%c%c%c-%c%c%c%c"
|
||||
#define BIT_FMT16 BIT_FMT8 ":" BIT_FMT8
|
||||
#define BIT_FMT32 BIT_FMT16 " " BIT_FMT16
|
||||
|
||||
#define BITC(x,y) (((x>>y)&1)?'1':'0')
|
||||
#define BIT_ARG8(x) \
|
||||
BITC(x,7),BITC(x,6),BITC(x,5),BITC(x,4),\
|
||||
BITC(x,3),BITC(x,2),BITC(x,1),BITC(x,0)
|
||||
|
||||
#define BIT_ARG16(x) \
|
||||
BITC(x,15),BITC(x,14),BITC(x,13),BITC(x,12),\
|
||||
BITC(x,11),BITC(x,10),BITC(x,9),BITC(x,8),\
|
||||
BIT_ARG8(x)
|
||||
|
||||
#define BIT_ARG32(x) \
|
||||
BITC(x,31),BITC(x,30),BITC(x,29),BITC(x,28),\
|
||||
BITC(x,27),BITC(x,26),BITC(x,25),BITC(x,24),\
|
||||
BITC(x,23),BITC(x,22),BITC(x,21),BITC(x,20),\
|
||||
BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\
|
||||
BIT_ARG16(x)
|
||||
|
||||
|
||||
#ifdef CONFIG_IPW2200_DEBUG
|
||||
#define IPW_DEBUG(level, fmt, args...) \
|
||||
do { if (ipw_debug_level & (level)) \
|
||||
|
@ -201,41 +201,12 @@ static struct {
|
||||
/* Data types */
|
||||
/********************************************************************/
|
||||
|
||||
/* Used in Event handling.
|
||||
* We avoid nested structures as they break on ARM -- Moustafa */
|
||||
struct hermes_tx_descriptor_802_11 {
|
||||
/* hermes_tx_descriptor */
|
||||
__le16 status;
|
||||
__le16 reserved1;
|
||||
__le16 reserved2;
|
||||
__le32 sw_support;
|
||||
u8 retry_count;
|
||||
u8 tx_rate;
|
||||
__le16 tx_control;
|
||||
|
||||
/* ieee80211_hdr */
|
||||
/* Beginning of the Tx descriptor, used in TxExc handling */
|
||||
struct hermes_txexc_data {
|
||||
struct hermes_tx_descriptor desc;
|
||||
__le16 frame_ctl;
|
||||
__le16 duration_id;
|
||||
u8 addr1[ETH_ALEN];
|
||||
u8 addr2[ETH_ALEN];
|
||||
u8 addr3[ETH_ALEN];
|
||||
__le16 seq_ctl;
|
||||
u8 addr4[ETH_ALEN];
|
||||
|
||||
__le16 data_len;
|
||||
|
||||
/* ethhdr */
|
||||
u8 h_dest[ETH_ALEN]; /* destination eth addr */
|
||||
u8 h_source[ETH_ALEN]; /* source ether addr */
|
||||
__be16 h_proto; /* packet type ID field */
|
||||
|
||||
/* p8022_hdr */
|
||||
u8 dsap;
|
||||
u8 ssap;
|
||||
u8 ctrl;
|
||||
u8 oui[3];
|
||||
|
||||
__be16 ethertype;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Rx frame header except compatibility 802.3 header */
|
||||
@ -450,53 +421,39 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
hermes_t *hw = &priv->hw;
|
||||
int err = 0;
|
||||
u16 txfid = priv->txfid;
|
||||
char *p;
|
||||
struct ethhdr *eh;
|
||||
int len, data_len, data_off;
|
||||
int data_off;
|
||||
struct hermes_tx_descriptor desc;
|
||||
unsigned long flags;
|
||||
|
||||
TRACE_ENTER(dev->name);
|
||||
|
||||
if (! netif_running(dev)) {
|
||||
printk(KERN_ERR "%s: Tx on stopped device!\n",
|
||||
dev->name);
|
||||
TRACE_EXIT(dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (netif_queue_stopped(dev)) {
|
||||
printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
|
||||
dev->name);
|
||||
TRACE_EXIT(dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (orinoco_lock(priv, &flags) != 0) {
|
||||
printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
|
||||
dev->name);
|
||||
TRACE_EXIT(dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (! netif_carrier_ok(dev) || (priv->iw_mode == IW_MODE_MONITOR)) {
|
||||
/* Oops, the firmware hasn't established a connection,
|
||||
silently drop the packet (this seems to be the
|
||||
safest approach). */
|
||||
stats->tx_errors++;
|
||||
orinoco_unlock(priv, &flags);
|
||||
dev_kfree_skb(skb);
|
||||
TRACE_EXIT(dev->name);
|
||||
return 0;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Length of the packet body */
|
||||
/* FIXME: what if the skb is smaller than this? */
|
||||
len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
|
||||
skb = skb_padto(skb, len);
|
||||
if (skb == NULL)
|
||||
goto fail;
|
||||
len -= ETH_HLEN;
|
||||
/* Check packet length */
|
||||
if (skb->len < ETH_HLEN)
|
||||
goto drop;
|
||||
|
||||
eh = (struct ethhdr *)skb->data;
|
||||
|
||||
@ -507,8 +464,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR "%s: Error %d writing Tx descriptor "
|
||||
"to BAP\n", dev->name, err);
|
||||
stats->tx_errors++;
|
||||
goto fail;
|
||||
goto busy;
|
||||
}
|
||||
|
||||
/* Clear the 802.11 header and data length fields - some
|
||||
@ -519,50 +475,38 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
/* Encapsulate Ethernet-II frames */
|
||||
if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
|
||||
struct header_struct hdr;
|
||||
data_len = len;
|
||||
data_off = HERMES_802_3_OFFSET + sizeof(hdr);
|
||||
p = skb->data + ETH_HLEN;
|
||||
struct header_struct {
|
||||
struct ethhdr eth; /* 802.3 header */
|
||||
u8 encap[6]; /* 802.2 header */
|
||||
} __attribute__ ((packed)) hdr;
|
||||
|
||||
/* 802.3 header */
|
||||
memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
|
||||
memcpy(hdr.src, eh->h_source, ETH_ALEN);
|
||||
hdr.len = htons(data_len + ENCAPS_OVERHEAD);
|
||||
|
||||
/* 802.2 header */
|
||||
memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
|
||||
|
||||
hdr.ethertype = eh->h_proto;
|
||||
err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
|
||||
txfid, HERMES_802_3_OFFSET);
|
||||
/* Strip destination and source from the data */
|
||||
skb_pull(skb, 2 * ETH_ALEN);
|
||||
data_off = HERMES_802_2_OFFSET + sizeof(encaps_hdr);
|
||||
|
||||
/* And move them to a separate header */
|
||||
memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
|
||||
hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
|
||||
memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
|
||||
|
||||
err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
|
||||
txfid, HERMES_802_3_OFFSET);
|
||||
if (err) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR "%s: Error %d writing packet "
|
||||
"header to BAP\n", dev->name, err);
|
||||
stats->tx_errors++;
|
||||
goto fail;
|
||||
goto busy;
|
||||
}
|
||||
/* Actual xfer length - allow for padding */
|
||||
len = ALIGN(data_len, 2);
|
||||
if (len < ETH_ZLEN - ETH_HLEN)
|
||||
len = ETH_ZLEN - ETH_HLEN;
|
||||
} else { /* IEEE 802.3 frame */
|
||||
data_len = len + ETH_HLEN;
|
||||
data_off = HERMES_802_3_OFFSET;
|
||||
p = skb->data;
|
||||
/* Actual xfer length - round up for odd length packets */
|
||||
len = ALIGN(data_len, 2);
|
||||
if (len < ETH_ZLEN)
|
||||
len = ETH_ZLEN;
|
||||
}
|
||||
|
||||
err = hermes_bap_pwrite_pad(hw, USER_BAP, p, data_len, len,
|
||||
err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
|
||||
txfid, data_off);
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
|
||||
dev->name, err);
|
||||
stats->tx_errors++;
|
||||
goto fail;
|
||||
goto busy;
|
||||
}
|
||||
|
||||
/* Finally, we actually initiate the send */
|
||||
@ -575,25 +519,27 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR "%s: Error %d transmitting packet\n",
|
||||
dev->name, err);
|
||||
stats->tx_errors++;
|
||||
goto fail;
|
||||
goto busy;
|
||||
}
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
stats->tx_bytes += data_off + data_len;
|
||||
stats->tx_bytes += data_off + skb->len;
|
||||
goto ok;
|
||||
|
||||
drop:
|
||||
stats->tx_errors++;
|
||||
stats->tx_dropped++;
|
||||
|
||||
ok:
|
||||
orinoco_unlock(priv, &flags);
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
TRACE_EXIT(dev->name);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
TRACE_EXIT(dev->name);
|
||||
|
||||
busy:
|
||||
if (err == -EIO)
|
||||
schedule_work(&priv->reset_work);
|
||||
orinoco_unlock(priv, &flags);
|
||||
return err;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
|
||||
@ -629,7 +575,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
|
||||
struct net_device_stats *stats = &priv->stats;
|
||||
u16 fid = hermes_read_regn(hw, TXCOMPLFID);
|
||||
u16 status;
|
||||
struct hermes_tx_descriptor_802_11 hdr;
|
||||
struct hermes_txexc_data hdr;
|
||||
int err = 0;
|
||||
|
||||
if (fid == DUMMY_FID)
|
||||
@ -637,8 +583,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
|
||||
|
||||
/* Read part of the frame header - we need status and addr1 */
|
||||
err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
|
||||
offsetof(struct hermes_tx_descriptor_802_11,
|
||||
addr2),
|
||||
sizeof(struct hermes_txexc_data),
|
||||
fid, 0);
|
||||
|
||||
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
|
||||
@ -658,7 +603,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
|
||||
* exceeded, because that's the only status that really mean
|
||||
* that this particular node went away.
|
||||
* Other errors means that *we* screwed up. - Jean II */
|
||||
status = le16_to_cpu(hdr.status);
|
||||
status = le16_to_cpu(hdr.desc.status);
|
||||
if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
|
||||
union iwreq_data wrqu;
|
||||
|
||||
@ -1398,16 +1343,12 @@ int __orinoco_down(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int orinoco_reinit_firmware(struct net_device *dev)
|
||||
static int orinoco_allocate_fid(struct net_device *dev)
|
||||
{
|
||||
struct orinoco_private *priv = netdev_priv(dev);
|
||||
struct hermes *hw = &priv->hw;
|
||||
int err;
|
||||
|
||||
err = hermes_init(hw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
|
||||
if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
|
||||
/* Try workaround for old Symbol firmware bug */
|
||||
@ -1426,6 +1367,19 @@ int orinoco_reinit_firmware(struct net_device *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
int orinoco_reinit_firmware(struct net_device *dev)
|
||||
{
|
||||
struct orinoco_private *priv = netdev_priv(dev);
|
||||
struct hermes *hw = &priv->hw;
|
||||
int err;
|
||||
|
||||
err = hermes_init(hw);
|
||||
if (!err)
|
||||
err = orinoco_allocate_fid(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
|
||||
{
|
||||
hermes_t *hw = &priv->hw;
|
||||
@ -2272,14 +2226,12 @@ static int orinoco_init(struct net_device *dev)
|
||||
u16 reclen;
|
||||
int len;
|
||||
|
||||
TRACE_ENTER(dev->name);
|
||||
|
||||
/* No need to lock, the hw_unavailable flag is already set in
|
||||
* alloc_orinocodev() */
|
||||
priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN;
|
||||
|
||||
/* Initialize the firmware */
|
||||
err = orinoco_reinit_firmware(dev);
|
||||
err = hermes_init(hw);
|
||||
if (err != 0) {
|
||||
printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
|
||||
dev->name, err);
|
||||
@ -2337,6 +2289,13 @@ static int orinoco_init(struct net_device *dev)
|
||||
|
||||
printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
|
||||
|
||||
err = orinoco_allocate_fid(dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s: failed to allocate NIC buffer!\n",
|
||||
dev->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Get allowed channels */
|
||||
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
|
||||
&priv->channel_mask);
|
||||
@ -2427,7 +2386,6 @@ static int orinoco_init(struct net_device *dev)
|
||||
printk(KERN_DEBUG "%s: ready\n", dev->name);
|
||||
|
||||
out:
|
||||
TRACE_EXIT(dev->name);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2795,8 +2753,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
|
||||
int numrates;
|
||||
int i, k;
|
||||
|
||||
TRACE_ENTER(dev->name);
|
||||
|
||||
rrq->length = sizeof(struct iw_range);
|
||||
memset(range, 0, sizeof(struct iw_range));
|
||||
|
||||
@ -2886,8 +2842,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
|
||||
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
|
||||
IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
|
||||
|
||||
TRACE_EXIT(dev->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3069,8 +3023,6 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
|
||||
int err = 0;
|
||||
unsigned long flags;
|
||||
|
||||
TRACE_ENTER(dev->name);
|
||||
|
||||
if (netif_running(dev)) {
|
||||
err = orinoco_hw_get_essid(priv, &active, essidbuf);
|
||||
if (err)
|
||||
@ -3085,8 +3037,6 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
|
||||
erq->flags = 1;
|
||||
erq->length = strlen(essidbuf) + 1;
|
||||
|
||||
TRACE_EXIT(dev->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4346,69 +4296,6 @@ static struct ethtool_ops orinoco_ethtool_ops = {
|
||||
.get_link = ethtool_op_get_link,
|
||||
};
|
||||
|
||||
/********************************************************************/
|
||||
/* Debugging */
|
||||
/********************************************************************/
|
||||
|
||||
#if 0
|
||||
static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
|
||||
{
|
||||
printk(KERN_DEBUG "RX descriptor:\n");
|
||||
printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status);
|
||||
printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time);
|
||||
printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence);
|
||||
printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal);
|
||||
printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate);
|
||||
printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow);
|
||||
printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved);
|
||||
|
||||
printk(KERN_DEBUG "IEEE 802.11 header:\n");
|
||||
printk(KERN_DEBUG " frame_ctl = 0x%04x\n",
|
||||
frame->p80211.frame_ctl);
|
||||
printk(KERN_DEBUG " duration_id = 0x%04x\n",
|
||||
frame->p80211.duration_id);
|
||||
printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
frame->p80211.addr1[0], frame->p80211.addr1[1],
|
||||
frame->p80211.addr1[2], frame->p80211.addr1[3],
|
||||
frame->p80211.addr1[4], frame->p80211.addr1[5]);
|
||||
printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
frame->p80211.addr2[0], frame->p80211.addr2[1],
|
||||
frame->p80211.addr2[2], frame->p80211.addr2[3],
|
||||
frame->p80211.addr2[4], frame->p80211.addr2[5]);
|
||||
printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
frame->p80211.addr3[0], frame->p80211.addr3[1],
|
||||
frame->p80211.addr3[2], frame->p80211.addr3[3],
|
||||
frame->p80211.addr3[4], frame->p80211.addr3[5]);
|
||||
printk(KERN_DEBUG " seq_ctl = 0x%04x\n",
|
||||
frame->p80211.seq_ctl);
|
||||
printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
frame->p80211.addr4[0], frame->p80211.addr4[1],
|
||||
frame->p80211.addr4[2], frame->p80211.addr4[3],
|
||||
frame->p80211.addr4[4], frame->p80211.addr4[5]);
|
||||
printk(KERN_DEBUG " data_len = 0x%04x\n",
|
||||
frame->p80211.data_len);
|
||||
|
||||
printk(KERN_DEBUG "IEEE 802.3 header:\n");
|
||||
printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
frame->p8023.h_dest[0], frame->p8023.h_dest[1],
|
||||
frame->p8023.h_dest[2], frame->p8023.h_dest[3],
|
||||
frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
|
||||
printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
frame->p8023.h_source[0], frame->p8023.h_source[1],
|
||||
frame->p8023.h_source[2], frame->p8023.h_source[3],
|
||||
frame->p8023.h_source[4], frame->p8023.h_source[5]);
|
||||
printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto);
|
||||
|
||||
printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
|
||||
printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap);
|
||||
printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap);
|
||||
printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl);
|
||||
printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n",
|
||||
frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
|
||||
printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype);
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
||||
/********************************************************************/
|
||||
/* Module initialization */
|
||||
/********************************************************************/
|
||||
|
@ -7,7 +7,7 @@
|
||||
#ifndef _ORINOCO_H
|
||||
#define _ORINOCO_H
|
||||
|
||||
#define DRIVER_VERSION "0.15rc3"
|
||||
#define DRIVER_VERSION "0.15"
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/wireless.h>
|
||||
@ -30,20 +30,6 @@ struct orinoco_key {
|
||||
char data[ORINOCO_MAX_KEY_SIZE];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct header_struct {
|
||||
/* 802.3 */
|
||||
u8 dest[ETH_ALEN];
|
||||
u8 src[ETH_ALEN];
|
||||
__be16 len;
|
||||
/* 802.2 */
|
||||
u8 dsap;
|
||||
u8 ssap;
|
||||
u8 ctrl;
|
||||
/* SNAP */
|
||||
u8 oui[3];
|
||||
unsigned short ethertype;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
typedef enum {
|
||||
FIRMWARE_TYPE_AGERE,
|
||||
FIRMWARE_TYPE_INTERSIL,
|
||||
@ -132,9 +118,6 @@ extern int orinoco_debug;
|
||||
#define DEBUG(n, args...) do { } while (0)
|
||||
#endif /* ORINOCO_DEBUG */
|
||||
|
||||
#define TRACE_ENTER(devname) DEBUG(2, "%s: -> %s()\n", devname, __FUNCTION__);
|
||||
#define TRACE_EXIT(devname) DEBUG(2, "%s: <- %s()\n", devname, __FUNCTION__);
|
||||
|
||||
/********************************************************************/
|
||||
/* Exported prototypes */
|
||||
/********************************************************************/
|
||||
|
@ -147,14 +147,11 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
|
||||
{
|
||||
struct net_device *dev = link->priv;
|
||||
|
||||
if (link->dev_node)
|
||||
unregister_netdev(dev);
|
||||
|
||||
orinoco_cs_release(link);
|
||||
|
||||
DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
|
||||
if (link->dev_node) {
|
||||
DEBUG(0, PFX "About to unregister net device %p\n",
|
||||
dev);
|
||||
unregister_netdev(dev);
|
||||
}
|
||||
free_orinocodev(dev);
|
||||
} /* orinoco_cs_detach */
|
||||
|
||||
@ -178,13 +175,10 @@ orinoco_cs_config(struct pcmcia_device *link)
|
||||
int last_fn, last_ret;
|
||||
u_char buf[64];
|
||||
config_info_t conf;
|
||||
cisinfo_t info;
|
||||
tuple_t tuple;
|
||||
cisparse_t parse;
|
||||
void __iomem *mem;
|
||||
|
||||
CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
|
||||
|
||||
/*
|
||||
* This reads the card's CONFIG tuple to find its
|
||||
* configuration registers.
|
||||
@ -234,12 +228,6 @@ orinoco_cs_config(struct pcmcia_device *link)
|
||||
goto next_entry;
|
||||
link->conf.ConfigIndex = cfg->index;
|
||||
|
||||
/* Does this card need audio output? */
|
||||
if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
|
||||
link->conf.Attributes |= CONF_ENABLE_SPKR;
|
||||
link->conf.Status = CCSR_AUDIO_ENA;
|
||||
}
|
||||
|
||||
/* Use power settings for Vcc and Vpp if present */
|
||||
/* Note that the CIS values need to be rescaled */
|
||||
if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
|
||||
@ -355,19 +343,10 @@ orinoco_cs_config(struct pcmcia_device *link)
|
||||
net_device has been registered */
|
||||
|
||||
/* Finally, report what we've done */
|
||||
printk(KERN_DEBUG "%s: index 0x%02x: ",
|
||||
dev->name, link->conf.ConfigIndex);
|
||||
if (link->conf.Vpp)
|
||||
printk(", Vpp %d.%d", link->conf.Vpp / 10,
|
||||
link->conf.Vpp % 10);
|
||||
printk(", irq %d", link->irq.AssignedIRQ);
|
||||
if (link->io.NumPorts1)
|
||||
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
|
||||
link->io.BasePort1 + link->io.NumPorts1 - 1);
|
||||
if (link->io.NumPorts2)
|
||||
printk(" & 0x%04x-0x%04x", link->io.BasePort2,
|
||||
link->io.BasePort2 + link->io.NumPorts2 - 1);
|
||||
printk("\n");
|
||||
printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
|
||||
"0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id,
|
||||
link->irq.AssignedIRQ, link->io.BasePort1,
|
||||
link->io.BasePort1 + link->io.NumPorts1 - 1);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -436,7 +415,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
|
||||
struct orinoco_private *priv = netdev_priv(dev);
|
||||
struct orinoco_pccard *card = priv->card;
|
||||
int err = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (! test_bit(0, &card->hard_reset_in_progress)) {
|
||||
err = orinoco_reinit_firmware(dev);
|
||||
@ -446,7 +424,7 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock(&priv->lock);
|
||||
|
||||
netif_device_attach(dev);
|
||||
priv->hw_unavailable--;
|
||||
@ -458,10 +436,10 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
|
||||
dev->name, err);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user