forked from Minki/linux
Merge branch 'davem-next' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
This commit is contained in:
commit
925068dcdc
@ -1,7 +1,7 @@
|
||||
Linux* Base Driver for the Intel(R) PRO/10GbE Family of Adapters
|
||||
================================================================
|
||||
Linux Base Driver for 10 Gigabit Intel(R) Network Connection
|
||||
=============================================================
|
||||
|
||||
November 17, 2004
|
||||
October 9, 2007
|
||||
|
||||
|
||||
Contents
|
||||
@ -9,94 +9,151 @@ Contents
|
||||
|
||||
- In This Release
|
||||
- Identifying Your Adapter
|
||||
- Building and Installation
|
||||
- Command Line Parameters
|
||||
- Improving Performance
|
||||
- Additional Configurations
|
||||
- Known Issues/Troubleshooting
|
||||
- Support
|
||||
|
||||
|
||||
|
||||
In This Release
|
||||
===============
|
||||
|
||||
This file describes the Linux* Base Driver for the Intel(R) PRO/10GbE Family
|
||||
of Adapters, version 1.0.x.
|
||||
This file describes the ixgb Linux Base Driver for the 10 Gigabit Intel(R)
|
||||
Network Connection. This driver includes support for Itanium(R)2-based
|
||||
systems.
|
||||
|
||||
For questions related to hardware requirements, refer to the documentation
|
||||
supplied with your 10 Gigabit adapter. All hardware requirements listed apply
|
||||
to use with Linux.
|
||||
|
||||
The following features are available in this kernel:
|
||||
- Native VLANs
|
||||
- Channel Bonding (teaming)
|
||||
- SNMP
|
||||
|
||||
Channel Bonding documentation can be found in the Linux kernel source:
|
||||
/Documentation/networking/bonding.txt
|
||||
|
||||
The driver information previously displayed in the /proc filesystem is not
|
||||
supported in this release. Alternatively, you can use ethtool (version 1.6
|
||||
or later), lspci, and ifconfig to obtain the same information.
|
||||
|
||||
Instructions on updating ethtool can be found in the section "Additional
|
||||
Configurations" later in this document.
|
||||
|
||||
For questions related to hardware requirements, refer to the documentation
|
||||
supplied with your Intel PRO/10GbE adapter. All hardware requirements listed
|
||||
apply to use with Linux.
|
||||
|
||||
Identifying Your Adapter
|
||||
========================
|
||||
|
||||
To verify your Intel adapter is supported, find the board ID number on the
|
||||
adapter. Look for a label that has a barcode and a number in the format
|
||||
A12345-001.
|
||||
The following Intel network adapters are compatible with the drivers in this
|
||||
release:
|
||||
|
||||
Use the above information and the Adapter & Driver ID Guide at:
|
||||
Controller Adapter Name Physical Layer
|
||||
---------- ------------ --------------
|
||||
82597EX Intel(R) PRO/10GbE LR/SR/CX4 10G Base-LR (1310 nm optical fiber)
|
||||
Server Adapters 10G Base-SR (850 nm optical fiber)
|
||||
10G Base-CX4(twin-axial copper cabling)
|
||||
|
||||
http://support.intel.com/support/network/adapter/pro100/21397.htm
|
||||
For more information on how to identify your adapter, go to the Adapter &
|
||||
Driver ID Guide at:
|
||||
|
||||
For the latest Intel network drivers for Linux, go to:
|
||||
http://support.intel.com/support/network/sb/CS-012904.htm
|
||||
|
||||
|
||||
Building and Installation
|
||||
=========================
|
||||
|
||||
select m for "Intel(R) PRO/10GbE support" located at:
|
||||
Location:
|
||||
-> Device Drivers
|
||||
-> Network device support (NETDEVICES [=y])
|
||||
-> Ethernet (10000 Mbit) (NETDEV_10000 [=y])
|
||||
1. make modules && make modules_install
|
||||
|
||||
2. Load the module:
|
||||
|
||||
modprobe ixgb <parameter>=<value>
|
||||
|
||||
The insmod command can be used if the full
|
||||
path to the driver module is specified. For example:
|
||||
|
||||
insmod /lib/modules/<KERNEL VERSION>/kernel/drivers/net/ixgb/ixgb.ko
|
||||
|
||||
With 2.6 based kernels also make sure that older ixgb drivers are
|
||||
removed from the kernel, before loading the new module:
|
||||
|
||||
rmmod ixgb; modprobe ixgb
|
||||
|
||||
3. Assign an IP address to the interface by entering the following, where
|
||||
x is the interface number:
|
||||
|
||||
ifconfig ethx <IP_address>
|
||||
|
||||
4. Verify that the interface works. Enter the following, where <IP_address>
|
||||
is the IP address for another machine on the same subnet as the interface
|
||||
that is being tested:
|
||||
|
||||
ping <IP_address>
|
||||
|
||||
http://downloadfinder.intel.com/scripts-df/support_intel.asp
|
||||
|
||||
Command Line Parameters
|
||||
=======================
|
||||
|
||||
If the driver is built as a module, the following optional parameters are
|
||||
used by entering them on the command line with the modprobe or insmod command
|
||||
using this syntax:
|
||||
If the driver is built as a module, the following optional parameters are
|
||||
used by entering them on the command line with the modprobe command using
|
||||
this syntax:
|
||||
|
||||
modprobe ixgb [<option>=<VAL1>,<VAL2>,...]
|
||||
|
||||
insmod ixgb [<option>=<VAL1>,<VAL2>,...]
|
||||
For example, with two 10GbE PCI adapters, entering:
|
||||
|
||||
For example, with two PRO/10GbE PCI adapters, entering:
|
||||
modprobe ixgb TxDescriptors=80,128
|
||||
|
||||
insmod ixgb TxDescriptors=80,128
|
||||
|
||||
loads the ixgb driver with 80 TX resources for the first adapter and 128 TX
|
||||
loads the ixgb driver with 80 TX resources for the first adapter and 128 TX
|
||||
resources for the second adapter.
|
||||
|
||||
The default value for each parameter is generally the recommended setting,
|
||||
unless otherwise noted. Also, if the driver is statically built into the
|
||||
kernel, the driver is loaded with the default values for all the parameters.
|
||||
Ethtool can be used to change some of the parameters at runtime.
|
||||
unless otherwise noted.
|
||||
|
||||
FlowControl
|
||||
Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
|
||||
Default: Read from the EEPROM
|
||||
If EEPROM is not detected, default is 3
|
||||
This parameter controls the automatic generation(Tx) and response(Rx) to
|
||||
Ethernet PAUSE frames.
|
||||
If EEPROM is not detected, default is 1
|
||||
This parameter controls the automatic generation(Tx) and response(Rx) to
|
||||
Ethernet PAUSE frames. There are hardware bugs associated with enabling
|
||||
Tx flow control so beware.
|
||||
|
||||
RxDescriptors
|
||||
Valid Range: 64-512
|
||||
Default Value: 512
|
||||
This value is the number of receive descriptors allocated by the driver.
|
||||
Increasing this value allows the driver to buffer more incoming packets.
|
||||
Each descriptor is 16 bytes. A receive buffer is also allocated for
|
||||
each descriptor and can be either 2048, 4056, 8192, or 16384 bytes,
|
||||
depending on the MTU setting. When the MTU size is 1500 or less, the
|
||||
This value is the number of receive descriptors allocated by the driver.
|
||||
Increasing this value allows the driver to buffer more incoming packets.
|
||||
Each descriptor is 16 bytes. A receive buffer is also allocated for
|
||||
each descriptor and can be either 2048, 4056, 8192, or 16384 bytes,
|
||||
depending on the MTU setting. When the MTU size is 1500 or less, the
|
||||
receive buffer size is 2048 bytes. When the MTU is greater than 1500 the
|
||||
receive buffer size will be either 4056, 8192, or 16384 bytes. The
|
||||
receive buffer size will be either 4056, 8192, or 16384 bytes. The
|
||||
maximum MTU size is 16114.
|
||||
|
||||
RxIntDelay
|
||||
Valid Range: 0-65535 (0=off)
|
||||
Default Value: 6
|
||||
This value delays the generation of receive interrupts in units of
|
||||
0.8192 microseconds. Receive interrupt reduction can improve CPU
|
||||
efficiency if properly tuned for specific network traffic. Increasing
|
||||
this value adds extra latency to frame reception and can end up
|
||||
decreasing the throughput of TCP traffic. If the system is reporting
|
||||
dropped receives, this value may be set too high, causing the driver to
|
||||
Default Value: 72
|
||||
This value delays the generation of receive interrupts in units of
|
||||
0.8192 microseconds. Receive interrupt reduction can improve CPU
|
||||
efficiency if properly tuned for specific network traffic. Increasing
|
||||
this value adds extra latency to frame reception and can end up
|
||||
decreasing the throughput of TCP traffic. If the system is reporting
|
||||
dropped receives, this value may be set too high, causing the driver to
|
||||
run out of available receive descriptors.
|
||||
|
||||
TxDescriptors
|
||||
Valid Range: 64-4096
|
||||
Default Value: 256
|
||||
This value is the number of transmit descriptors allocated by the driver.
|
||||
Increasing this value allows the driver to queue more transmits. Each
|
||||
Increasing this value allows the driver to queue more transmits. Each
|
||||
descriptor is 16 bytes.
|
||||
|
||||
XsumRX
|
||||
@ -105,51 +162,49 @@ Default Value: 1
|
||||
A value of '1' indicates that the driver should enable IP checksum
|
||||
offload for received packets (both UDP and TCP) to the adapter hardware.
|
||||
|
||||
XsumTX
|
||||
Valid Range: 0-1
|
||||
Default Value: 1
|
||||
A value of '1' indicates that the driver should enable IP checksum
|
||||
offload for transmitted packets (both UDP and TCP) to the adapter
|
||||
hardware.
|
||||
|
||||
Improving Performance
|
||||
=====================
|
||||
|
||||
With the Intel PRO/10 GbE adapter, the default Linux configuration will very
|
||||
likely limit the total available throughput artificially. There is a set of
|
||||
things that when applied together increase the ability of Linux to transmit
|
||||
and receive data. The following enhancements were originally acquired from
|
||||
settings published at http://www.spec.org/web99 for various submitted results
|
||||
using Linux.
|
||||
With the 10 Gigabit server adapters, the default Linux configuration will
|
||||
very likely limit the total available throughput artificially. There is a set
|
||||
of configuration changes that, when applied together, will increase the ability
|
||||
of Linux to transmit and receive data. The following enhancements were
|
||||
originally acquired from settings published at http://www.spec.org/web99/ for
|
||||
various submitted results using Linux.
|
||||
|
||||
NOTE: These changes are only suggestions, and serve as a starting point for
|
||||
tuning your network performance.
|
||||
NOTE: These changes are only suggestions, and serve as a starting point for
|
||||
tuning your network performance.
|
||||
|
||||
The changes are made in three major ways, listed in order of greatest effect:
|
||||
- Use ifconfig to modify the mtu (maximum transmission unit) and the txqueuelen
|
||||
- Use ifconfig to modify the mtu (maximum transmission unit) and the txqueuelen
|
||||
parameter.
|
||||
- Use sysctl to modify /proc parameters (essentially kernel tuning)
|
||||
- Use setpci to modify the MMRBC field in PCI-X configuration space to increase
|
||||
- Use setpci to modify the MMRBC field in PCI-X configuration space to increase
|
||||
transmit burst lengths on the bus.
|
||||
|
||||
NOTE: setpci modifies the adapter's configuration registers to allow it to read
|
||||
up to 4k bytes at a time (for transmits). However, for some systems the
|
||||
behavior after modifying this register may be undefined (possibly errors of some
|
||||
kind). A power-cycle, hard reset or explicitly setting the e6 register back to
|
||||
22 (setpci -d 8086:1048 e6.b=22) may be required to get back to a stable
|
||||
configuration.
|
||||
NOTE: setpci modifies the adapter's configuration registers to allow it to read
|
||||
up to 4k bytes at a time (for transmits). However, for some systems the
|
||||
behavior after modifying this register may be undefined (possibly errors of
|
||||
some kind). A power-cycle, hard reset or explicitly setting the e6 register
|
||||
back to 22 (setpci -d 8086:1a48 e6.b=22) may be required to get back to a
|
||||
stable configuration.
|
||||
|
||||
- COPY these lines and paste them into ixgb_perf.sh:
|
||||
#!/bin/bash
|
||||
echo "configuring network performance , edit this file to change the interface"
|
||||
echo "configuring network performance , edit this file to change the interface
|
||||
or device ID of 10GbE card"
|
||||
# set mmrbc to 4k reads, modify only Intel 10GbE device IDs
|
||||
setpci -d 8086:1048 e6.b=2e
|
||||
# set the MTU (max transmission unit) - it requires your switch and clients to change too!
|
||||
# replace 1a48 with appropriate 10GbE device's ID installed on the system,
|
||||
# if needed.
|
||||
setpci -d 8086:1a48 e6.b=2e
|
||||
# set the MTU (max transmission unit) - it requires your switch and clients
|
||||
# to change as well.
|
||||
# set the txqueuelen
|
||||
# your ixgb adapter should be loaded as eth1 for this to work, change if needed
|
||||
ifconfig eth1 mtu 9000 txqueuelen 1000 up
|
||||
# call the sysctl utility to modify /proc/sys entries
|
||||
sysctl -p ./sysctl_ixgb.conf
|
||||
# call the sysctl utility to modify /proc/sys entries
|
||||
sysctl -p ./sysctl_ixgb.conf
|
||||
- END ixgb_perf.sh
|
||||
|
||||
- COPY these lines and paste them into sysctl_ixgb.conf:
|
||||
@ -159,54 +214,220 @@ sysctl -p ./sysctl_ixgb.conf
|
||||
# several network benchmark tests, your mileage may vary
|
||||
|
||||
### IPV4 specific settings
|
||||
net.ipv4.tcp_timestamps = 0 # turns TCP timestamp support off, default 1, reduces CPU use
|
||||
net.ipv4.tcp_sack = 0 # turn SACK support off, default on
|
||||
# on systems with a VERY fast bus -> memory interface this is the big gainer
|
||||
net.ipv4.tcp_rmem = 10000000 10000000 10000000 # sets min/default/max TCP read buffer, default 4096 87380 174760
|
||||
net.ipv4.tcp_wmem = 10000000 10000000 10000000 # sets min/pressure/max TCP write buffer, default 4096 16384 131072
|
||||
net.ipv4.tcp_mem = 10000000 10000000 10000000 # sets min/pressure/max TCP buffer space, default 31744 32256 32768
|
||||
# turn TCP timestamp support off, default 1, reduces CPU use
|
||||
net.ipv4.tcp_timestamps = 0
|
||||
# turn SACK support off, default on
|
||||
# on systems with a VERY fast bus -> memory interface this is the big gainer
|
||||
net.ipv4.tcp_sack = 0
|
||||
# set min/default/max TCP read buffer, default 4096 87380 174760
|
||||
net.ipv4.tcp_rmem = 10000000 10000000 10000000
|
||||
# set min/pressure/max TCP write buffer, default 4096 16384 131072
|
||||
net.ipv4.tcp_wmem = 10000000 10000000 10000000
|
||||
# set min/pressure/max TCP buffer space, default 31744 32256 32768
|
||||
net.ipv4.tcp_mem = 10000000 10000000 10000000
|
||||
|
||||
### CORE settings (mostly for socket and UDP effect)
|
||||
net.core.rmem_max = 524287 # maximum receive socket buffer size, default 131071
|
||||
net.core.wmem_max = 524287 # maximum send socket buffer size, default 131071
|
||||
net.core.rmem_default = 524287 # default receive socket buffer size, default 65535
|
||||
net.core.wmem_default = 524287 # default send socket buffer size, default 65535
|
||||
net.core.optmem_max = 524287 # maximum amount of option memory buffers, default 10240
|
||||
net.core.netdev_max_backlog = 300000 # number of unprocessed input packets before kernel starts dropping them, default 300
|
||||
# set maximum receive socket buffer size, default 131071
|
||||
net.core.rmem_max = 524287
|
||||
# set maximum send socket buffer size, default 131071
|
||||
net.core.wmem_max = 524287
|
||||
# set default receive socket buffer size, default 65535
|
||||
net.core.rmem_default = 524287
|
||||
# set default send socket buffer size, default 65535
|
||||
net.core.wmem_default = 524287
|
||||
# set maximum amount of option memory buffers, default 10240
|
||||
net.core.optmem_max = 524287
|
||||
# set number of unprocessed input packets before kernel starts dropping them; default 300
|
||||
net.core.netdev_max_backlog = 300000
|
||||
- END sysctl_ixgb.conf
|
||||
|
||||
Edit the ixgb_perf.sh script if necessary to change eth1 to whatever interface
|
||||
your ixgb driver is using.
|
||||
Edit the ixgb_perf.sh script if necessary to change eth1 to whatever interface
|
||||
your ixgb driver is using and/or replace '1a48' with appropriate 10GbE device's
|
||||
ID installed on the system.
|
||||
|
||||
NOTE: Unless these scripts are added to the boot process, these changes will
|
||||
only last only until the next system reboot.
|
||||
NOTE: Unless these scripts are added to the boot process, these changes will
|
||||
only last only until the next system reboot.
|
||||
|
||||
|
||||
Resolving Slow UDP Traffic
|
||||
--------------------------
|
||||
If your server does not seem to be able to receive UDP traffic as fast as it
|
||||
can receive TCP traffic, it could be because Linux, by default, does not set
|
||||
the network stack buffers as large as they need to be to support high UDP
|
||||
transfer rates. One way to alleviate this problem is to allow more memory to
|
||||
be used by the IP stack to store incoming data.
|
||||
|
||||
If your server does not seem to be able to receive UDP traffic as fast as it
|
||||
can receive TCP traffic, it could be because Linux, by default, does not set
|
||||
the network stack buffers as large as they need to be to support high UDP
|
||||
transfer rates. One way to alleviate this problem is to allow more memory to
|
||||
be used by the IP stack to store incoming data.
|
||||
|
||||
For instance, use the commands:
|
||||
For instance, use the commands:
|
||||
sysctl -w net.core.rmem_max=262143
|
||||
and
|
||||
sysctl -w net.core.rmem_default=262143
|
||||
to increase the read buffer memory max and default to 262143 (256k - 1) from
|
||||
defaults of max=131071 (128k - 1) and default=65535 (64k - 1). These variables
|
||||
will increase the amount of memory used by the network stack for receives, and
|
||||
to increase the read buffer memory max and default to 262143 (256k - 1) from
|
||||
defaults of max=131071 (128k - 1) and default=65535 (64k - 1). These variables
|
||||
will increase the amount of memory used by the network stack for receives, and
|
||||
can be increased significantly more if necessary for your application.
|
||||
|
||||
|
||||
Additional Configurations
|
||||
=========================
|
||||
|
||||
Configuring the Driver on Different Distributions
|
||||
-------------------------------------------------
|
||||
Configuring a network driver to load properly when the system is started is
|
||||
distribution dependent. Typically, the configuration process involves adding
|
||||
an alias line to /etc/modprobe.conf as well as editing other system startup
|
||||
scripts and/or configuration files. Many popular Linux distributions ship
|
||||
with tools to make these changes for you. To learn the proper way to
|
||||
configure a network device for your system, refer to your distribution
|
||||
documentation. If during this process you are asked for the driver or module
|
||||
name, the name for the Linux Base Driver for the Intel 10GbE Family of
|
||||
Adapters is ixgb.
|
||||
|
||||
Viewing Link Messages
|
||||
---------------------
|
||||
Link messages will not be displayed to the console if the distribution is
|
||||
restricting system messages. In order to see network driver link messages on
|
||||
your console, set dmesg to eight by entering the following:
|
||||
|
||||
dmesg -n 8
|
||||
|
||||
NOTE: This setting is not saved across reboots.
|
||||
|
||||
|
||||
Jumbo Frames
|
||||
------------
|
||||
The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
|
||||
enabled by changing the MTU to a value larger than the default of 1500.
|
||||
The maximum value for the MTU is 16114. Use the ifconfig command to
|
||||
increase the MTU size. For example:
|
||||
|
||||
ifconfig ethx mtu 9000 up
|
||||
|
||||
The maximum MTU setting for Jumbo Frames is 16114. This value coincides
|
||||
with the maximum Jumbo Frames size of 16128.
|
||||
|
||||
|
||||
Ethtool
|
||||
-------
|
||||
The driver utilizes the ethtool interface for driver configuration and
|
||||
diagnostics, as well as displaying statistical information. Ethtool
|
||||
version 1.6 or later is required for this functionality.
|
||||
|
||||
The latest release of ethtool can be found from
|
||||
http://sourceforge.net/projects/gkernel
|
||||
|
||||
NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
|
||||
for a more complete ethtool feature set can be enabled by upgrading
|
||||
to the latest version.
|
||||
|
||||
|
||||
NAPI
|
||||
----
|
||||
|
||||
NAPI (Rx polling mode) is supported in the ixgb driver. NAPI is enabled
|
||||
or disabled based on the configuration of the kernel. see CONFIG_IXGB_NAPI
|
||||
|
||||
See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
|
||||
|
||||
|
||||
Known Issues/Troubleshooting
|
||||
============================
|
||||
|
||||
NOTE: After installing the driver, if your Intel Network Connection is not
|
||||
working, verify in the "In This Release" section of the readme that you have
|
||||
installed the correct driver.
|
||||
|
||||
Intel(R) PRO/10GbE CX4 Server Adapter Cable Interoperability Issue with
|
||||
Fujitsu XENPAK Module in SmartBits Chassis
|
||||
---------------------------------------------------------------------
|
||||
Excessive CRC errors may be observed if the Intel(R) PRO/10GbE CX4
|
||||
Server adapter is connected to a Fujitsu XENPAK CX4 module in a SmartBits
|
||||
chassis using 15 m/24AWG cable assemblies manufactured by Fujitsu or Leoni.
|
||||
The CRC errors may be received either by the Intel(R) PRO/10GbE CX4
|
||||
Server adapter or the SmartBits. If this situation occurs using a different
|
||||
cable assembly may resolve the issue.
|
||||
|
||||
CX4 Server Adapter Cable Interoperability Issues with HP Procurve 3400cl
|
||||
Switch Port
|
||||
------------------------------------------------------------------------
|
||||
Excessive CRC errors may be observed if the Intel(R) PRO/10GbE CX4 Server
|
||||
adapter is connected to an HP Procurve 3400cl switch port using short cables
|
||||
(1 m or shorter). If this situation occurs, using a longer cable may resolve
|
||||
the issue.
|
||||
|
||||
Excessive CRC errors may be observed using Fujitsu 24AWG cable assemblies that
|
||||
Are 10 m or longer or where using a Leoni 15 m/24AWG cable assembly. The CRC
|
||||
errors may be received either by the CX4 Server adapter or at the switch. If
|
||||
this situation occurs, using a different cable assembly may resolve the issue.
|
||||
|
||||
|
||||
Jumbo Frames System Requirement
|
||||
-------------------------------
|
||||
Memory allocation failures have been observed on Linux systems with 64 MB
|
||||
of RAM or less that are running Jumbo Frames. If you are using Jumbo
|
||||
Frames, your system may require more than the advertised minimum
|
||||
requirement of 64 MB of system memory.
|
||||
|
||||
|
||||
Performance Degradation with Jumbo Frames
|
||||
-----------------------------------------
|
||||
Degradation in throughput performance may be observed in some Jumbo frames
|
||||
environments. If this is observed, increasing the application's socket buffer
|
||||
size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
|
||||
See the specific application manual and /usr/src/linux*/Documentation/
|
||||
networking/ip-sysctl.txt for more details.
|
||||
|
||||
|
||||
Allocating Rx Buffers when Using Jumbo Frames
|
||||
---------------------------------------------
|
||||
Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if
|
||||
the available memory is heavily fragmented. This issue may be seen with PCI-X
|
||||
adapters or with packet split disabled. This can be reduced or eliminated
|
||||
by changing the amount of available memory for receive buffer allocation, by
|
||||
increasing /proc/sys/vm/min_free_kbytes.
|
||||
|
||||
|
||||
Multiple Interfaces on Same Ethernet Broadcast Network
|
||||
------------------------------------------------------
|
||||
Due to the default ARP behavior on Linux, it is not possible to have
|
||||
one system on two IP networks in the same Ethernet broadcast domain
|
||||
(non-partitioned switch) behave as expected. All Ethernet interfaces
|
||||
will respond to IP traffic for any IP address assigned to the system.
|
||||
This results in unbalanced receive traffic.
|
||||
|
||||
If you have multiple interfaces in a server, do either of the following:
|
||||
|
||||
- Turn on ARP filtering by entering:
|
||||
echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
|
||||
|
||||
- Install the interfaces in separate broadcast domains - either in
|
||||
different switches or in a switch partitioned to VLANs.
|
||||
|
||||
|
||||
UDP Stress Test Dropped Packet Issue
|
||||
--------------------------------------
|
||||
Under small packets UDP stress test with 10GbE driver, the Linux system
|
||||
may drop UDP packets due to the fullness of socket buffers. You may want
|
||||
to change the driver's Flow Control variables to the minimum value for
|
||||
controlling packet reception.
|
||||
|
||||
|
||||
Tx Hangs Possible Under Stress
|
||||
------------------------------
|
||||
Under stress conditions, if TX hangs occur, turning off TSO
|
||||
"ethtool -K eth0 tso off" may resolve the problem.
|
||||
|
||||
|
||||
Support
|
||||
=======
|
||||
|
||||
For general information and support, go to the Intel support website at:
|
||||
For general information, go to the Intel support website at:
|
||||
|
||||
http://support.intel.com
|
||||
|
||||
or the Intel Wired Networking project hosted by Sourceforge at:
|
||||
|
||||
http://sourceforge.net/projects/e1000
|
||||
|
||||
If an issue is identified with the released source code on the supported
|
||||
kernel with a supported adapter, email the specific information related to
|
||||
the issue to linux.nics@intel.com.
|
||||
kernel with a supported adapter, email the specific information related
|
||||
to the issue to e1000-devel@lists.sf.net
|
||||
|
@ -1694,26 +1694,6 @@ config VIA_RHINE_MMIO
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config VIA_RHINE_NAPI
|
||||
bool "Use Rx Polling (NAPI)"
|
||||
depends on VIA_RHINE
|
||||
help
|
||||
NAPI is a new driver API designed to reduce CPU and interrupt load
|
||||
when the driver is receiving lots of packets from the card.
|
||||
|
||||
If your estimated Rx load is 10kpps or more, or if the card will be
|
||||
deployed on potentially unfriendly networks (e.g. in a firewall),
|
||||
then say Y here.
|
||||
|
||||
config LAN_SAA9730
|
||||
bool "Philips SAA9730 Ethernet support"
|
||||
depends on NET_PCI && PCI && MIPS_ATLAS
|
||||
help
|
||||
The SAA9730 is a combined multimedia and peripheral controller used
|
||||
in thin clients, Internet access terminals, and diskless
|
||||
workstations.
|
||||
See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
|
||||
|
||||
config SC92031
|
||||
tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
|
||||
depends on NET_PCI && PCI && EXPERIMENTAL
|
||||
@ -2029,6 +2009,15 @@ config IGB
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called igb.
|
||||
|
||||
config IGB_LRO
|
||||
bool "Use software LRO"
|
||||
depends on IGB && INET
|
||||
select INET_LRO
|
||||
---help---
|
||||
Say Y here if you want to use large receive offload.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
source "drivers/net/ixp2000/Kconfig"
|
||||
|
||||
config MYRI_SBUS
|
||||
@ -2273,10 +2262,6 @@ config GIANFAR
|
||||
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
|
||||
and MPC86xx family of chips, and the FEC on the 8540.
|
||||
|
||||
config GFAR_NAPI
|
||||
bool "Use Rx Polling (NAPI)"
|
||||
depends on GIANFAR
|
||||
|
||||
config UCC_GETH
|
||||
tristate "Freescale QE Gigabit Ethernet"
|
||||
depends on QUICC_ENGINE
|
||||
@ -2285,10 +2270,6 @@ config UCC_GETH
|
||||
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
|
||||
which is available on some Freescale SOCs.
|
||||
|
||||
config UGETH_NAPI
|
||||
bool "Use Rx Polling (NAPI)"
|
||||
depends on UCC_GETH
|
||||
|
||||
config UGETH_MAGIC_PACKET
|
||||
bool "Magic Packet detection support"
|
||||
depends on UCC_GETH
|
||||
@ -2378,14 +2359,6 @@ config CHELSIO_T1_1G
|
||||
Enables support for Chelsio's gigabit Ethernet PCI cards. If you
|
||||
are using only 10G cards say 'N' here.
|
||||
|
||||
config CHELSIO_T1_NAPI
|
||||
bool "Use Rx Polling (NAPI)"
|
||||
depends on CHELSIO_T1
|
||||
default y
|
||||
help
|
||||
NAPI is a driver API designed to reduce CPU and interrupt load
|
||||
when the driver is receiving lots of packets from the card.
|
||||
|
||||
config CHELSIO_T3
|
||||
tristate "Chelsio Communications T3 10Gb Ethernet support"
|
||||
depends on PCI && INET
|
||||
@ -2457,20 +2430,6 @@ config IXGB
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called ixgb.
|
||||
|
||||
config IXGB_NAPI
|
||||
bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
|
||||
depends on IXGB && EXPERIMENTAL
|
||||
help
|
||||
NAPI is a new driver API designed to reduce CPU and interrupt load
|
||||
when the driver is receiving lots of packets from the card. It is
|
||||
still somewhat experimental and thus not yet enabled by default.
|
||||
|
||||
If your estimated Rx load is 10kpps or more, or if the card will be
|
||||
deployed on potentially unfriendly networks (e.g. in a firewall),
|
||||
then say Y here.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config S2IO
|
||||
tristate "S2IO 10Gbe XFrame NIC"
|
||||
depends on PCI
|
||||
|
@ -166,7 +166,6 @@ obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
|
||||
obj-$(CONFIG_8139CP) += 8139cp.o
|
||||
obj-$(CONFIG_8139TOO) += 8139too.o
|
||||
obj-$(CONFIG_ZNET) += znet.o
|
||||
obj-$(CONFIG_LAN_SAA9730) += saa9730.o
|
||||
obj-$(CONFIG_CPMAC) += cpmac.o
|
||||
obj-$(CONFIG_DEPCA) += depca.o
|
||||
obj-$(CONFIG_EWRK3) += ewrk3.o
|
||||
|
@ -1153,9 +1153,7 @@ static int __devinit init_one(struct pci_dev *pdev,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
netdev->poll_controller = t1_netpoll;
|
||||
#endif
|
||||
#ifdef CONFIG_CHELSIO_T1_NAPI
|
||||
netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
|
||||
#endif
|
||||
|
||||
SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
|
||||
}
|
||||
|
@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
|
||||
|
||||
if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
|
||||
st->vlan_xtract++;
|
||||
#ifdef CONFIG_CHELSIO_T1_NAPI
|
||||
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
|
||||
ntohs(p->vlan));
|
||||
#else
|
||||
vlan_hwaccel_rx(skb, adapter->vlan_grp,
|
||||
ntohs(p->vlan));
|
||||
#endif
|
||||
} else {
|
||||
#ifdef CONFIG_CHELSIO_T1_NAPI
|
||||
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
|
||||
ntohs(p->vlan));
|
||||
} else
|
||||
netif_receive_skb(skb);
|
||||
#else
|
||||
netif_rx(skb);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter)
|
||||
return (e->GenerationBit == Q->genbit);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CHELSIO_T1_NAPI
|
||||
/*
|
||||
* A simpler version of process_responses() that handles only pure (i.e.,
|
||||
* non data-carrying) responses. Such respones are too light-weight to justify
|
||||
@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget)
|
||||
return work_done;
|
||||
}
|
||||
|
||||
/*
|
||||
* NAPI version of the main interrupt handler.
|
||||
*/
|
||||
irqreturn_t t1_interrupt(int irq, void *data)
|
||||
{
|
||||
struct adapter *adapter = data;
|
||||
@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data)
|
||||
else {
|
||||
/* no data, no NAPI needed */
|
||||
writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
|
||||
napi_enable(&adapter->napi); /* undo schedule_prep */
|
||||
/* undo schedule_prep */
|
||||
napi_enable(&adapter->napi);
|
||||
}
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data)
|
||||
return IRQ_RETVAL(handled != 0);
|
||||
}
|
||||
|
||||
#else
|
||||
/*
|
||||
* Main interrupt handler, optimized assuming that we took a 'DATA'
|
||||
* interrupt.
|
||||
*
|
||||
* 1. Clear the interrupt
|
||||
* 2. Loop while we find valid descriptors and process them; accumulate
|
||||
* information that can be processed after the loop
|
||||
* 3. Tell the SGE at which index we stopped processing descriptors
|
||||
* 4. Bookkeeping; free TX buffers, ring doorbell if there are any
|
||||
* outstanding TX buffers waiting, replenish RX buffers, potentially
|
||||
* reenable upper layers if they were turned off due to lack of TX
|
||||
* resources which are available again.
|
||||
* 5. If we took an interrupt, but no valid respQ descriptors was found we
|
||||
* let the slow_intr_handler run and do error handling.
|
||||
*/
|
||||
irqreturn_t t1_interrupt(int irq, void *cookie)
|
||||
{
|
||||
int work_done;
|
||||
struct adapter *adapter = cookie;
|
||||
struct respQ *Q = &adapter->sge->respQ;
|
||||
|
||||
spin_lock(&adapter->async_lock);
|
||||
|
||||
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
|
||||
|
||||
if (likely(responses_pending(adapter)))
|
||||
work_done = process_responses(adapter, -1);
|
||||
else
|
||||
work_done = t1_slow_intr_handler(adapter);
|
||||
|
||||
/*
|
||||
* The unconditional clearing of the PL_CAUSE above may have raced
|
||||
* with DMA completion and the corresponding generation of a response
|
||||
* to cause us to miss the resulting data interrupt. The next write
|
||||
* is also unconditional to recover the missed interrupt and render
|
||||
* this race harmless.
|
||||
*/
|
||||
writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
|
||||
|
||||
if (!work_done)
|
||||
adapter->sge->stats.unhandled_irqs++;
|
||||
spin_unlock(&adapter->async_lock);
|
||||
return IRQ_RETVAL(work_done != 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
|
||||
*
|
||||
|
@ -110,10 +110,7 @@ struct ulp_iscsi_info {
|
||||
unsigned int llimit;
|
||||
unsigned int ulimit;
|
||||
unsigned int tagmask;
|
||||
unsigned int pgsz3;
|
||||
unsigned int pgsz2;
|
||||
unsigned int pgsz1;
|
||||
unsigned int pgsz0;
|
||||
u8 pgsz_factor[4];
|
||||
unsigned int max_rxsz;
|
||||
unsigned int max_txsz;
|
||||
struct pci_dev *pdev;
|
||||
|
@ -207,6 +207,17 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
|
||||
break;
|
||||
case ULP_ISCSI_SET_PARAMS:
|
||||
t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
|
||||
/* set MaxRxData and MaxCoalesceSize to 16224 */
|
||||
t3_write_reg(adapter, A_TP_PARA_REG2, 0x3f603f60);
|
||||
/* program the ddp page sizes */
|
||||
{
|
||||
int i;
|
||||
unsigned int val = 0;
|
||||
for (i = 0; i < 4; i++)
|
||||
val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
|
||||
if (val)
|
||||
t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
|
@ -1517,16 +1517,18 @@
|
||||
|
||||
#define A_ULPRX_ISCSI_TAGMASK 0x514
|
||||
|
||||
#define S_HPZ0 0
|
||||
#define M_HPZ0 0xf
|
||||
#define V_HPZ0(x) ((x) << S_HPZ0)
|
||||
#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
|
||||
#define A_ULPRX_ISCSI_PSZ 0x518
|
||||
|
||||
#define A_ULPRX_TDDP_LLIMIT 0x51c
|
||||
|
||||
#define A_ULPRX_TDDP_ULIMIT 0x520
|
||||
#define A_ULPRX_TDDP_PSZ 0x528
|
||||
|
||||
#define S_HPZ0 0
|
||||
#define M_HPZ0 0xf
|
||||
#define V_HPZ0(x) ((x) << S_HPZ0)
|
||||
#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
|
||||
|
||||
#define A_ULPRX_STAG_LLIMIT 0x52c
|
||||
|
||||
#define A_ULPRX_STAG_ULIMIT 0x530
|
||||
|
@ -191,6 +191,9 @@ union opcode_tid {
|
||||
#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
|
||||
#define G_TID(x) ((x) & 0xFFFFFF)
|
||||
|
||||
#define S_QNUM 0
|
||||
#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
|
||||
|
||||
#define S_HASHTYPE 22
|
||||
#define M_HASHTYPE 0x3
|
||||
#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
|
||||
@ -779,6 +782,12 @@ struct tx_data_wr {
|
||||
__be32 param;
|
||||
};
|
||||
|
||||
/* tx_data_wr.flags fields */
|
||||
#define S_TX_ACK_PAGES 21
|
||||
#define M_TX_ACK_PAGES 0x7
|
||||
#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
|
||||
#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
|
||||
|
||||
/* tx_data_wr.param fields */
|
||||
#define S_TX_PORT 0
|
||||
#define M_TX_PORT 0x7
|
||||
@ -1452,4 +1461,35 @@ struct cpl_rdma_terminate {
|
||||
#define M_TERM_TID 0xFFFFF
|
||||
#define V_TERM_TID(x) ((x) << S_TERM_TID)
|
||||
#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
|
||||
|
||||
/* ULP_TX opcodes */
|
||||
enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 };
|
||||
|
||||
#define S_ULPTX_CMD 28
|
||||
#define M_ULPTX_CMD 0xF
|
||||
#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
|
||||
|
||||
#define S_ULPTX_NFLITS 0
|
||||
#define M_ULPTX_NFLITS 0xFF
|
||||
#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS)
|
||||
|
||||
struct ulp_mem_io {
|
||||
WR_HDR;
|
||||
__be32 cmd_lock_addr;
|
||||
__be32 len;
|
||||
};
|
||||
|
||||
/* ulp_mem_io.cmd_lock_addr fields */
|
||||
#define S_ULP_MEMIO_ADDR 0
|
||||
#define M_ULP_MEMIO_ADDR 0x7FFFFFF
|
||||
#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
|
||||
#define S_ULP_MEMIO_LOCK 27
|
||||
#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
|
||||
#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U)
|
||||
|
||||
/* ulp_mem_io.len fields */
|
||||
#define S_ULP_MEMIO_DATA_LEN 28
|
||||
#define M_ULP_MEMIO_DATA_LEN 0xF
|
||||
#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
|
||||
|
||||
#endif /* T3_CPL_H */
|
||||
|
@ -64,6 +64,7 @@ struct t3cdev {
|
||||
void *l3opt; /* optional layer 3 data */
|
||||
void *l4opt; /* optional layer 4 data */
|
||||
void *ulp; /* ulp stuff */
|
||||
void *ulp_iscsi; /* ulp iscsi */
|
||||
};
|
||||
|
||||
#endif /* _T3CDEV_H_ */
|
||||
|
@ -44,8 +44,7 @@
|
||||
* happen immediately, but will wait until either a set number
|
||||
* of frames or amount of time have passed). In NAPI, the
|
||||
* interrupt handler will signal there is work to be done, and
|
||||
* exit. Without NAPI, the packet(s) will be handled
|
||||
* immediately. Both methods will start at the last known empty
|
||||
* exit. This method will start at the last known empty
|
||||
* descriptor, and process every subsequent descriptor until there
|
||||
* are none left with data (NAPI will stop after a set number of
|
||||
* packets to give time to other tasks, but will eventually
|
||||
@ -101,12 +100,6 @@
|
||||
#undef BRIEF_GFAR_ERRORS
|
||||
#undef VERBOSE_GFAR_ERRORS
|
||||
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
#define RECEIVE(x) netif_receive_skb(x)
|
||||
#else
|
||||
#define RECEIVE(x) netif_rx(x)
|
||||
#endif
|
||||
|
||||
const char gfar_driver_name[] = "Gianfar Ethernet";
|
||||
const char gfar_driver_version[] = "1.3";
|
||||
|
||||
@ -131,9 +124,7 @@ static void free_skb_resources(struct gfar_private *priv);
|
||||
static void gfar_set_multi(struct net_device *dev);
|
||||
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
|
||||
static void gfar_configure_serdes(struct net_device *dev);
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
static int gfar_poll(struct napi_struct *napi, int budget);
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void gfar_netpoll(struct net_device *dev);
|
||||
#endif
|
||||
@ -260,9 +251,7 @@ static int gfar_probe(struct platform_device *pdev)
|
||||
dev->hard_start_xmit = gfar_start_xmit;
|
||||
dev->tx_timeout = gfar_timeout;
|
||||
dev->watchdog_timeo = TX_TIMEOUT;
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
dev->poll_controller = gfar_netpoll;
|
||||
#endif
|
||||
@ -363,11 +352,7 @@ static int gfar_probe(struct platform_device *pdev)
|
||||
|
||||
/* Even more device info helps when determining which kernel */
|
||||
/* provided which set of benchmarks. */
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
|
||||
#else
|
||||
printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
|
||||
#endif
|
||||
printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
|
||||
dev->name, priv->rx_ring_size, priv->tx_ring_size);
|
||||
|
||||
@ -945,14 +930,10 @@ tx_skb_fail:
|
||||
/* Returns 0 for success. */
|
||||
static int gfar_enet_open(struct net_device *dev)
|
||||
{
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
struct gfar_private *priv = netdev_priv(dev);
|
||||
#endif
|
||||
int err;
|
||||
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
napi_enable(&priv->napi);
|
||||
#endif
|
||||
|
||||
/* Initialize a bunch of registers */
|
||||
init_registers(dev);
|
||||
@ -962,17 +943,13 @@ static int gfar_enet_open(struct net_device *dev)
|
||||
err = init_phy(dev);
|
||||
|
||||
if(err) {
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
napi_disable(&priv->napi);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
err = startup_gfar(dev);
|
||||
if (err) {
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
napi_disable(&priv->napi);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1128,9 +1105,7 @@ static int gfar_close(struct net_device *dev)
|
||||
{
|
||||
struct gfar_private *priv = netdev_priv(dev);
|
||||
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
napi_disable(&priv->napi);
|
||||
#endif
|
||||
|
||||
stop_gfar(dev);
|
||||
|
||||
@ -1427,14 +1402,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *) dev_id;
|
||||
struct gfar_private *priv = netdev_priv(dev);
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
u32 tempval;
|
||||
#else
|
||||
unsigned long flags;
|
||||
#endif
|
||||
|
||||
/* support NAPI */
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
/* Clear IEVENT, so interrupts aren't called again
|
||||
* because of the packets that have already arrived */
|
||||
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
|
||||
@ -1451,38 +1421,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
|
||||
dev->name, gfar_read(&priv->regs->ievent),
|
||||
gfar_read(&priv->regs->imask));
|
||||
}
|
||||
#else
|
||||
/* Clear IEVENT, so rx interrupt isn't called again
|
||||
* because of this interrupt */
|
||||
gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
|
||||
|
||||
spin_lock_irqsave(&priv->rxlock, flags);
|
||||
gfar_clean_rx_ring(dev, priv->rx_ring_size);
|
||||
|
||||
/* If we are coalescing interrupts, update the timer */
|
||||
/* Otherwise, clear it */
|
||||
if (likely(priv->rxcoalescing)) {
|
||||
gfar_write(&priv->regs->rxic, 0);
|
||||
gfar_write(&priv->regs->rxic,
|
||||
mk_ic_value(priv->rxcount, priv->rxtime));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->rxlock, flags);
|
||||
#endif
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static inline int gfar_rx_vlan(struct sk_buff *skb,
|
||||
struct vlan_group *vlgrp, unsigned short vlctl)
|
||||
{
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
|
||||
#else
|
||||
return vlan_hwaccel_rx(skb, vlgrp, vlctl);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
|
||||
{
|
||||
/* If valid headers were found, and valid sums
|
||||
@ -1539,10 +1481,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
/* Send the packet up the stack */
|
||||
if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
|
||||
ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
|
||||
else
|
||||
ret = RECEIVE(skb);
|
||||
if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) {
|
||||
ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp,
|
||||
fcb->vlctl);
|
||||
} else
|
||||
ret = netif_receive_skb(skb);
|
||||
|
||||
if (NET_RX_DROP == ret)
|
||||
priv->extra_stats.kernel_dropped++;
|
||||
@ -1629,7 +1572,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
|
||||
return howmany;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
static int gfar_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
|
||||
@ -1664,7 +1606,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
return howmany;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/*
|
||||
@ -2003,11 +1944,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
|
||||
|
||||
gfar_receive(irq, dev_id);
|
||||
|
||||
#ifndef CONFIG_GFAR_NAPI
|
||||
/* Clear the halt bit in RSTAT */
|
||||
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
|
||||
#endif
|
||||
|
||||
if (netif_msg_rx_err(priv))
|
||||
printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
|
||||
dev->name, gfar_read(&priv->regs->rstat));
|
||||
|
@ -77,13 +77,8 @@ extern const char gfar_driver_name[];
|
||||
extern const char gfar_driver_version[];
|
||||
|
||||
/* These need to be powers of 2 for this driver */
|
||||
#ifdef CONFIG_GFAR_NAPI
|
||||
#define DEFAULT_TX_RING_SIZE 256
|
||||
#define DEFAULT_RX_RING_SIZE 256
|
||||
#else
|
||||
#define DEFAULT_TX_RING_SIZE 64
|
||||
#define DEFAULT_RX_RING_SIZE 64
|
||||
#endif
|
||||
|
||||
#define GFAR_RX_MAX_RING_SIZE 256
|
||||
#define GFAR_TX_MAX_RING_SIZE 256
|
||||
@ -128,14 +123,8 @@ extern const char gfar_driver_version[];
|
||||
|
||||
#define DEFAULT_RXTIME 21
|
||||
|
||||
/* Non NAPI Case */
|
||||
#ifndef CONFIG_GFAR_NAPI
|
||||
#define DEFAULT_RX_COALESCE 1
|
||||
#define DEFAULT_RXCOUNT 16
|
||||
#else
|
||||
#define DEFAULT_RX_COALESCE 0
|
||||
#define DEFAULT_RXCOUNT 0
|
||||
#endif /* CONFIG_GFAR_NAPI */
|
||||
|
||||
#define MIIMCFG_INIT_VALUE 0x00000007
|
||||
#define MIIMCFG_RESET 0x80000000
|
||||
|
@ -31,6 +31,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
#include "e1000_mac.h"
|
||||
#include "e1000_82575.h"
|
||||
@ -45,7 +46,6 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *);
|
||||
static s32 igb_init_hw_82575(struct e1000_hw *);
|
||||
static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
|
||||
static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
|
||||
static void igb_rar_set_82575(struct e1000_hw *, u8 *, u32);
|
||||
static s32 igb_reset_hw_82575(struct e1000_hw *);
|
||||
static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
|
||||
static s32 igb_setup_copper_link_82575(struct e1000_hw *);
|
||||
@ -84,6 +84,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
||||
case E1000_DEV_ID_82575GB_QUAD_COPPER:
|
||||
mac->type = e1000_82575;
|
||||
break;
|
||||
case E1000_DEV_ID_82576:
|
||||
case E1000_DEV_ID_82576_FIBER:
|
||||
case E1000_DEV_ID_82576_SERDES:
|
||||
case E1000_DEV_ID_82576_QUAD_COPPER:
|
||||
mac->type = e1000_82576;
|
||||
break;
|
||||
default:
|
||||
return -E1000_ERR_MAC_INIT;
|
||||
break;
|
||||
@ -128,6 +134,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
||||
mac->mta_reg_count = 128;
|
||||
/* Set rar entry count */
|
||||
mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
|
||||
if (mac->type == e1000_82576)
|
||||
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
|
||||
/* Set if part includes ASF firmware */
|
||||
mac->asf_firmware_present = true;
|
||||
/* Set if manageability features are enabled. */
|
||||
@ -694,13 +702,12 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
|
||||
if ((hw->phy.media_type != e1000_media_type_copper) ||
|
||||
(igb_sgmii_active_82575(hw)))
|
||||
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
|
||||
&duplex);
|
||||
&duplex);
|
||||
else
|
||||
ret_val = igb_check_for_copper_link(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
|
||||
* @hw: pointer to the HW structure
|
||||
@ -757,18 +764,129 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_rar_set_82575 - Set receive address register
|
||||
* igb_init_rx_addrs_82575 - Initialize receive address's
|
||||
* @hw: pointer to the HW structure
|
||||
* @addr: pointer to the receive address
|
||||
* @index: receive address array register
|
||||
* @rar_count: receive address registers
|
||||
*
|
||||
* Sets the receive address array register at index to the address passed
|
||||
* in by addr.
|
||||
* Setups the receive address registers by setting the base receive address
|
||||
* register to the devices MAC address and clearing all the other receive
|
||||
* address registers to 0.
|
||||
**/
|
||||
static void igb_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index)
|
||||
static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
|
||||
{
|
||||
if (index < E1000_RAR_ENTRIES_82575)
|
||||
igb_rar_set(hw, addr, index);
|
||||
u32 i;
|
||||
u8 addr[6] = {0,0,0,0,0,0};
|
||||
/*
|
||||
* This function is essentially the same as that of
|
||||
* e1000_init_rx_addrs_generic. However it also takes care
|
||||
* of the special case where the register offset of the
|
||||
* second set of RARs begins elsewhere. This is implicitly taken care by
|
||||
* function e1000_rar_set_generic.
|
||||
*/
|
||||
|
||||
hw_dbg("e1000_init_rx_addrs_82575");
|
||||
|
||||
/* Setup the receive address */
|
||||
hw_dbg("Programming MAC Address into RAR[0]\n");
|
||||
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
|
||||
|
||||
/* Zero out the other (rar_entry_count - 1) receive addresses */
|
||||
hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
|
||||
for (i = 1; i < rar_count; i++)
|
||||
hw->mac.ops.rar_set(hw, addr, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_update_mc_addr_list_82575 - Update Multicast addresses
|
||||
* @hw: pointer to the HW structure
|
||||
* @mc_addr_list: array of multicast addresses to program
|
||||
* @mc_addr_count: number of multicast addresses to program
|
||||
* @rar_used_count: the first RAR register free to program
|
||||
* @rar_count: total number of supported Receive Address Registers
|
||||
*
|
||||
* Updates the Receive Address Registers and Multicast Table Array.
|
||||
* The caller must have a packed mc_addr_list of multicast addresses.
|
||||
* The parameter rar_count will usually be hw->mac.rar_entry_count
|
||||
* unless there are workarounds that change this.
|
||||
**/
|
||||
void igb_update_mc_addr_list_82575(struct e1000_hw *hw,
|
||||
u8 *mc_addr_list, u32 mc_addr_count,
|
||||
u32 rar_used_count, u32 rar_count)
|
||||
{
|
||||
u32 hash_value;
|
||||
u32 i;
|
||||
u8 addr[6] = {0,0,0,0,0,0};
|
||||
/*
|
||||
* This function is essentially the same as that of
|
||||
* igb_update_mc_addr_list_generic. However it also takes care
|
||||
* of the special case where the register offset of the
|
||||
* second set of RARs begins elsewhere. This is implicitly taken care by
|
||||
* function e1000_rar_set_generic.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Load the first set of multicast addresses into the exact
|
||||
* filters (RAR). If there are not enough to fill the RAR
|
||||
* array, clear the filters.
|
||||
*/
|
||||
for (i = rar_used_count; i < rar_count; i++) {
|
||||
if (mc_addr_count) {
|
||||
igb_rar_set(hw, mc_addr_list, i);
|
||||
mc_addr_count--;
|
||||
mc_addr_list += ETH_ALEN;
|
||||
} else {
|
||||
igb_rar_set(hw, addr, i);
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear the old settings from the MTA */
|
||||
hw_dbg("Clearing MTA\n");
|
||||
for (i = 0; i < hw->mac.mta_reg_count; i++) {
|
||||
array_wr32(E1000_MTA, i, 0);
|
||||
wrfl();
|
||||
}
|
||||
|
||||
/* Load any remaining multicast addresses into the hash table. */
|
||||
for (; mc_addr_count > 0; mc_addr_count--) {
|
||||
hash_value = igb_hash_mc_addr(hw, mc_addr_list);
|
||||
hw_dbg("Hash value = 0x%03X\n", hash_value);
|
||||
hw->mac.ops.mta_set(hw, hash_value);
|
||||
mc_addr_list += ETH_ALEN;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_shutdown_fiber_serdes_link_82575 - Remove link during power down
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* In the case of fiber serdes, shut down optics and PCS on driver unload
|
||||
* when management pass thru is not enabled.
|
||||
**/
|
||||
void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
if (hw->mac.type != e1000_82576 ||
|
||||
(hw->phy.media_type != e1000_media_type_fiber &&
|
||||
hw->phy.media_type != e1000_media_type_internal_serdes))
|
||||
return;
|
||||
|
||||
/* if the management interface is not enabled, then power down */
|
||||
if (!igb_enable_mng_pass_thru(hw)) {
|
||||
/* Disable PCS to turn off link */
|
||||
reg = rd32(E1000_PCS_CFG0);
|
||||
reg &= ~E1000_PCS_CFG_PCS_EN;
|
||||
wr32(E1000_PCS_CFG0, reg);
|
||||
|
||||
/* shutdown the laser */
|
||||
reg = rd32(E1000_CTRL_EXT);
|
||||
reg |= E1000_CTRL_EXT_SDP7_DATA;
|
||||
wr32(E1000_CTRL_EXT, reg);
|
||||
|
||||
/* flush the write to verify completion */
|
||||
wrfl();
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
@ -854,7 +972,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
|
||||
igb_clear_vfta(hw);
|
||||
|
||||
/* Setup the receive address */
|
||||
igb_init_rx_addrs(hw, rar_count);
|
||||
igb_init_rx_addrs_82575(hw, rar_count);
|
||||
/* Zero out the Multicast HASH table */
|
||||
hw_dbg("Zeroing the MTA\n");
|
||||
for (i = 0; i < mac->mta_reg_count; i++)
|
||||
@ -1113,6 +1231,70 @@ out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_translate_register_82576 - Translate the proper register offset
|
||||
* @reg: e1000 register to be read
|
||||
*
|
||||
* Registers in 82576 are located in different offsets than other adapters
|
||||
* even though they function in the same manner. This function takes in
|
||||
* the name of the register to read and returns the correct offset for
|
||||
* 82576 silicon.
|
||||
**/
|
||||
u32 igb_translate_register_82576(u32 reg)
|
||||
{
|
||||
/*
|
||||
* Some of the Kawela registers are located at different
|
||||
* offsets than they are in older adapters.
|
||||
* Despite the difference in location, the registers
|
||||
* function in the same manner.
|
||||
*/
|
||||
switch (reg) {
|
||||
case E1000_TDBAL(0):
|
||||
reg = 0x0E000;
|
||||
break;
|
||||
case E1000_TDBAH(0):
|
||||
reg = 0x0E004;
|
||||
break;
|
||||
case E1000_TDLEN(0):
|
||||
reg = 0x0E008;
|
||||
break;
|
||||
case E1000_TDH(0):
|
||||
reg = 0x0E010;
|
||||
break;
|
||||
case E1000_TDT(0):
|
||||
reg = 0x0E018;
|
||||
break;
|
||||
case E1000_TXDCTL(0):
|
||||
reg = 0x0E028;
|
||||
break;
|
||||
case E1000_RDBAL(0):
|
||||
reg = 0x0C000;
|
||||
break;
|
||||
case E1000_RDBAH(0):
|
||||
reg = 0x0C004;
|
||||
break;
|
||||
case E1000_RDLEN(0):
|
||||
reg = 0x0C008;
|
||||
break;
|
||||
case E1000_RDH(0):
|
||||
reg = 0x0C010;
|
||||
break;
|
||||
case E1000_RDT(0):
|
||||
reg = 0x0C018;
|
||||
break;
|
||||
case E1000_RXDCTL(0):
|
||||
reg = 0x0C028;
|
||||
break;
|
||||
case E1000_SRRCTL(0):
|
||||
reg = 0x0C00C;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_reset_init_script_82575 - Inits HW defaults after reset
|
||||
* @hw: pointer to the HW structure
|
||||
@ -1304,7 +1486,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
|
||||
.reset_hw = igb_reset_hw_82575,
|
||||
.init_hw = igb_init_hw_82575,
|
||||
.check_for_link = igb_check_for_link_82575,
|
||||
.rar_set = igb_rar_set_82575,
|
||||
.rar_set = igb_rar_set,
|
||||
.read_mac_addr = igb_read_mac_addr_82575,
|
||||
.get_speed_and_duplex = igb_get_speed_and_duplex_copper,
|
||||
};
|
||||
|
@ -28,9 +28,13 @@
|
||||
#ifndef _E1000_82575_H_
|
||||
#define _E1000_82575_H_
|
||||
|
||||
u32 igb_translate_register_82576(u32 reg);
|
||||
void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32);
|
||||
extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
|
||||
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
|
||||
|
||||
#define E1000_RAR_ENTRIES_82575 16
|
||||
#define E1000_RAR_ENTRIES_82576 24
|
||||
|
||||
/* SRRCTL bit definitions */
|
||||
#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
|
||||
@ -95,6 +99,8 @@ union e1000_adv_rx_desc {
|
||||
/* RSS Hash results */
|
||||
|
||||
/* RSS Packet Types as indicated in the receive descriptor */
|
||||
#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
|
||||
#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
|
||||
|
||||
/* Transmit Descriptor - Advanced */
|
||||
union e1000_adv_tx_desc {
|
||||
@ -144,9 +150,25 @@ struct e1000_adv_tx_context_desc {
|
||||
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
|
||||
|
||||
/* Direct Cache Access (DCA) definitions */
|
||||
#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
|
||||
#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
|
||||
|
||||
#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
|
||||
#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
|
||||
|
||||
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
|
||||
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
|
||||
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
|
||||
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
|
||||
|
||||
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
|
||||
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
|
||||
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
|
||||
|
||||
/* Additional DCA related definitions, note change in position of CPUID */
|
||||
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
|
||||
#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
|
||||
#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
|
||||
#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
|
||||
|
||||
#endif
|
||||
|
@ -90,6 +90,11 @@
|
||||
#define E1000_I2CCMD_ERROR 0x80000000
|
||||
#define E1000_MAX_SGMII_PHY_REG_ADDR 255
|
||||
#define E1000_I2CCMD_PHY_TIMEOUT 200
|
||||
#define E1000_IVAR_VALID 0x80
|
||||
#define E1000_GPIE_NSICR 0x00000001
|
||||
#define E1000_GPIE_MSIX_MODE 0x00000010
|
||||
#define E1000_GPIE_EIAME 0x40000000
|
||||
#define E1000_GPIE_PBA 0x80000000
|
||||
|
||||
/* Receive Descriptor bit definitions */
|
||||
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
|
||||
@ -213,6 +218,7 @@
|
||||
/* Device Control */
|
||||
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
|
||||
#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
|
||||
#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
|
||||
#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
|
||||
#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
|
||||
#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
|
||||
@ -244,6 +250,7 @@
|
||||
*/
|
||||
|
||||
#define E1000_CONNSW_ENRGSRC 0x4
|
||||
#define E1000_PCS_CFG_PCS_EN 8
|
||||
#define E1000_PCS_LCTL_FLV_LINK_UP 1
|
||||
#define E1000_PCS_LCTL_FSV_100 2
|
||||
#define E1000_PCS_LCTL_FSV_1000 4
|
||||
@ -253,6 +260,7 @@
|
||||
#define E1000_PCS_LCTL_AN_ENABLE 0x10000
|
||||
#define E1000_PCS_LCTL_AN_RESTART 0x20000
|
||||
#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
|
||||
#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
|
||||
|
||||
#define E1000_PCS_LSTS_LINK_OK 1
|
||||
#define E1000_PCS_LSTS_SPEED_100 2
|
||||
@ -360,6 +368,7 @@
|
||||
#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
|
||||
#define E1000_PBA_24K 0x0018
|
||||
#define E1000_PBA_34K 0x0022
|
||||
#define E1000_PBA_64K 0x0040 /* 64KB */
|
||||
|
||||
#define IFS_MAX 80
|
||||
#define IFS_MIN 40
|
||||
@ -528,6 +537,7 @@
|
||||
/* PHY Control Register */
|
||||
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
|
||||
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
|
||||
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
|
||||
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
|
||||
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
|
||||
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
|
||||
|
@ -38,6 +38,10 @@
|
||||
|
||||
struct e1000_hw;
|
||||
|
||||
#define E1000_DEV_ID_82576 0x10C9
|
||||
#define E1000_DEV_ID_82576_FIBER 0x10E6
|
||||
#define E1000_DEV_ID_82576_SERDES 0x10E7
|
||||
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
|
||||
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
|
||||
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
|
||||
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
|
||||
@ -50,6 +54,7 @@ struct e1000_hw;
|
||||
enum e1000_mac_type {
|
||||
e1000_undefined = 0,
|
||||
e1000_82575,
|
||||
e1000_82576,
|
||||
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
|
||||
};
|
||||
|
||||
@ -410,14 +415,17 @@ struct e1000_mac_operations {
|
||||
s32 (*check_for_link)(struct e1000_hw *);
|
||||
s32 (*reset_hw)(struct e1000_hw *);
|
||||
s32 (*init_hw)(struct e1000_hw *);
|
||||
bool (*check_mng_mode)(struct e1000_hw *);
|
||||
s32 (*setup_physical_interface)(struct e1000_hw *);
|
||||
void (*rar_set)(struct e1000_hw *, u8 *, u32);
|
||||
s32 (*read_mac_addr)(struct e1000_hw *);
|
||||
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
|
||||
void (*mta_set)(struct e1000_hw *, u32);
|
||||
};
|
||||
|
||||
struct e1000_phy_operations {
|
||||
s32 (*acquire_phy)(struct e1000_hw *);
|
||||
s32 (*check_reset_block)(struct e1000_hw *);
|
||||
s32 (*force_speed_duplex)(struct e1000_hw *);
|
||||
s32 (*get_cfg_done)(struct e1000_hw *hw);
|
||||
s32 (*get_cable_length)(struct e1000_hw *);
|
||||
|
@ -36,7 +36,6 @@
|
||||
|
||||
static s32 igb_set_default_fc(struct e1000_hw *hw);
|
||||
static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
|
||||
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
|
||||
|
||||
/**
|
||||
* igb_remove_device - Free device specific structure
|
||||
@ -360,7 +359,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
|
||||
* the multicast filter table array address and new table value. See
|
||||
* igb_mta_set()
|
||||
**/
|
||||
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
|
||||
u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
|
||||
{
|
||||
u32 hash_value, hash_mask;
|
||||
u8 bit_shift = 0;
|
||||
|
@ -94,5 +94,6 @@ enum e1000_mng_mode {
|
||||
#define E1000_HICR_C 0x02
|
||||
|
||||
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
|
||||
extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
|
||||
|
||||
#endif
|
||||
|
@ -56,6 +56,9 @@
|
||||
#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
|
||||
#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
|
||||
#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
|
||||
#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
|
||||
#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
|
||||
#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
|
||||
#define E1000_TCTL 0x00400 /* TX Control - RW */
|
||||
#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
|
||||
#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
|
||||
@ -217,6 +220,7 @@
|
||||
#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
|
||||
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
|
||||
#define E1000_RA 0x05400 /* Receive Address - RW Array */
|
||||
#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
|
||||
#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
|
||||
#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */
|
||||
#define E1000_WUC 0x05800 /* Wakeup Control - RW */
|
||||
@ -235,6 +239,8 @@
|
||||
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
|
||||
#define E1000_SWSM 0x05B50 /* SW Semaphore */
|
||||
#define E1000_FWSM 0x05B54 /* FW Semaphore */
|
||||
#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
|
||||
#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
|
||||
#define E1000_HICR 0x08F00 /* Host Inteface Control */
|
||||
|
||||
/* RSS registers */
|
||||
@ -256,7 +262,8 @@
|
||||
#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
|
||||
#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
|
||||
|
||||
#define E1000_REGISTER(a, reg) reg
|
||||
#define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \
|
||||
? reg : e1000_translate_register_82576(reg))
|
||||
|
||||
#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
|
||||
#define rd32(reg) (readl(hw->hw_addr + reg))
|
||||
|
@ -36,12 +36,20 @@
|
||||
|
||||
struct igb_adapter;
|
||||
|
||||
#ifdef CONFIG_IGB_LRO
|
||||
#include <linux/inet_lro.h>
|
||||
#define MAX_LRO_AGGR 32
|
||||
#define MAX_LRO_DESCRIPTORS 8
|
||||
#endif
|
||||
|
||||
/* Interrupt defines */
|
||||
#define IGB_MAX_TX_CLEAN 72
|
||||
|
||||
#define IGB_MIN_DYN_ITR 3000
|
||||
#define IGB_MAX_DYN_ITR 96000
|
||||
#define IGB_START_ITR 6000
|
||||
|
||||
/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
|
||||
#define IGB_START_ITR 648
|
||||
|
||||
#define IGB_DYN_ITR_PACKET_THRESHOLD 2
|
||||
#define IGB_DYN_ITR_LENGTH_LOW 200
|
||||
@ -62,6 +70,7 @@ struct igb_adapter;
|
||||
|
||||
/* Transmit and receive queues */
|
||||
#define IGB_MAX_RX_QUEUES 4
|
||||
#define IGB_MAX_TX_QUEUES 4
|
||||
|
||||
/* RX descriptor control thresholds.
|
||||
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
|
||||
@ -124,6 +133,7 @@ struct igb_buffer {
|
||||
struct {
|
||||
struct page *page;
|
||||
u64 page_dma;
|
||||
unsigned int page_offset;
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -157,18 +167,19 @@ struct igb_ring {
|
||||
union {
|
||||
/* TX */
|
||||
struct {
|
||||
spinlock_t tx_clean_lock;
|
||||
spinlock_t tx_lock;
|
||||
struct igb_queue_stats tx_stats;
|
||||
bool detect_tx_hung;
|
||||
};
|
||||
/* RX */
|
||||
struct {
|
||||
/* arrays of page information for packet split */
|
||||
struct sk_buff *pending_skb;
|
||||
int pending_skb_page;
|
||||
int no_itr_adjust;
|
||||
struct igb_queue_stats rx_stats;
|
||||
struct napi_struct napi;
|
||||
int set_itr;
|
||||
struct igb_ring *buddy;
|
||||
#ifdef CONFIG_IGB_LRO
|
||||
struct net_lro_mgr lro_mgr;
|
||||
bool lro_used;
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
@ -211,7 +222,6 @@ struct igb_adapter {
|
||||
u32 itr_setting;
|
||||
u16 tx_itr;
|
||||
u16 rx_itr;
|
||||
int set_itr;
|
||||
|
||||
struct work_struct reset_task;
|
||||
struct work_struct watchdog_task;
|
||||
@ -270,15 +280,32 @@ struct igb_adapter {
|
||||
|
||||
/* to not mess up cache alignment, always add to the bottom */
|
||||
unsigned long state;
|
||||
unsigned int msi_enabled;
|
||||
|
||||
unsigned int flags;
|
||||
u32 eeprom_wol;
|
||||
|
||||
/* for ioport free */
|
||||
int bars;
|
||||
int need_ioport;
|
||||
|
||||
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
|
||||
struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
|
||||
#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
|
||||
#ifdef CONFIG_IGB_LRO
|
||||
unsigned int lro_max_aggr;
|
||||
unsigned int lro_aggregated;
|
||||
unsigned int lro_flushed;
|
||||
unsigned int lro_no_desc;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define IGB_FLAG_HAS_MSI (1 << 0)
|
||||
#define IGB_FLAG_MSI_ENABLE (1 << 1)
|
||||
#define IGB_FLAG_HAS_DCA (1 << 2)
|
||||
#define IGB_FLAG_DCA_ENABLED (1 << 3)
|
||||
#define IGB_FLAG_IN_NETPOLL (1 << 5)
|
||||
#define IGB_FLAG_QUAD_PORT_A (1 << 6)
|
||||
#define IGB_FLAG_NEED_CTX_IDX (1 << 7)
|
||||
|
||||
enum e1000_state_t {
|
||||
__IGB_TESTING,
|
||||
__IGB_RESETTING,
|
||||
|
@ -93,13 +93,16 @@ static const struct igb_stats igb_gstrings_stats[] = {
|
||||
{ "tx_smbus", IGB_STAT(stats.mgptc) },
|
||||
{ "rx_smbus", IGB_STAT(stats.mgprc) },
|
||||
{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
|
||||
#ifdef CONFIG_IGB_LRO
|
||||
{ "lro_aggregated", IGB_STAT(lro_aggregated) },
|
||||
{ "lro_flushed", IGB_STAT(lro_flushed) },
|
||||
{ "lro_no_desc", IGB_STAT(lro_no_desc) },
|
||||
#endif
|
||||
};
|
||||
|
||||
#define IGB_QUEUE_STATS_LEN \
|
||||
((((((struct igb_adapter *)netdev->priv)->num_rx_queues > 1) ? \
|
||||
((struct igb_adapter *)netdev->priv)->num_rx_queues : 0) + \
|
||||
(((((struct igb_adapter *)netdev->priv)->num_tx_queues > 1) ? \
|
||||
((struct igb_adapter *)netdev->priv)->num_tx_queues : 0))) * \
|
||||
((((struct igb_adapter *)netdev->priv)->num_rx_queues + \
|
||||
((struct igb_adapter *)netdev->priv)->num_tx_queues) * \
|
||||
(sizeof(struct igb_queue_stats) / sizeof(u64)))
|
||||
#define IGB_GLOBAL_STATS_LEN \
|
||||
sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
|
||||
@ -829,8 +832,9 @@ err_setup:
|
||||
/* ethtool register test data */
|
||||
struct igb_reg_test {
|
||||
u16 reg;
|
||||
u8 array_len;
|
||||
u8 test_type;
|
||||
u16 reg_offset;
|
||||
u16 array_len;
|
||||
u16 test_type;
|
||||
u32 mask;
|
||||
u32 write;
|
||||
};
|
||||
@ -852,34 +856,72 @@ struct igb_reg_test {
|
||||
#define TABLE64_TEST_LO 5
|
||||
#define TABLE64_TEST_HI 6
|
||||
|
||||
/* default register test */
|
||||
static struct igb_reg_test reg_test_82575[] = {
|
||||
{ E1000_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_VET, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
/* 82576 reg test */
|
||||
static struct igb_reg_test reg_test_82576[] = {
|
||||
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_RDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
/* Enable all four RX queues before testing. */
|
||||
{ E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
|
||||
{ E1000_RXDCTL(0), 0x100, 1, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
|
||||
/* RDH is read-only for 82576, only test RDT. */
|
||||
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
|
||||
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
|
||||
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
|
||||
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_TDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
|
||||
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* 82575 register test */
|
||||
static struct igb_reg_test reg_test_82575[] = {
|
||||
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
/* Enable all four RX queues before testing. */
|
||||
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
|
||||
/* RDH is read-only for 82575, only test RDT. */
|
||||
{ E1000_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
|
||||
{ E1000_FCRTH, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
|
||||
{ E1000_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
|
||||
{ E1000_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
{ E1000_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
|
||||
{ E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
|
||||
{ E1000_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_TXCW, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
|
||||
{ E1000_RA, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
|
||||
{ E1000_MTA, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
|
||||
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
|
||||
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
|
||||
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
|
||||
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
|
||||
{ E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
@ -939,7 +981,15 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
|
||||
u32 i, toggle;
|
||||
|
||||
toggle = 0x7FFFF3FF;
|
||||
test = reg_test_82575;
|
||||
|
||||
switch (adapter->hw.mac.type) {
|
||||
case e1000_82576:
|
||||
test = reg_test_82576;
|
||||
break;
|
||||
default:
|
||||
test = reg_test_82575;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Because the status register is such a special case,
|
||||
* we handle it separately from the rest of the register
|
||||
@ -966,19 +1016,19 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
|
||||
for (i = 0; i < test->array_len; i++) {
|
||||
switch (test->test_type) {
|
||||
case PATTERN_TEST:
|
||||
REG_PATTERN_TEST(test->reg + (i * 0x100),
|
||||
REG_PATTERN_TEST(test->reg + (i * test->reg_offset),
|
||||
test->mask,
|
||||
test->write);
|
||||
break;
|
||||
case SET_READ_TEST:
|
||||
REG_SET_AND_CHECK(test->reg + (i * 0x100),
|
||||
REG_SET_AND_CHECK(test->reg + (i * test->reg_offset),
|
||||
test->mask,
|
||||
test->write);
|
||||
break;
|
||||
case WRITE_NO_TEST:
|
||||
writel(test->write,
|
||||
(adapter->hw.hw_addr + test->reg)
|
||||
+ (i * 0x100));
|
||||
+ (i * test->reg_offset));
|
||||
break;
|
||||
case TABLE32_TEST:
|
||||
REG_PATTERN_TEST(test->reg + (i * 4),
|
||||
@ -1052,7 +1102,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
|
||||
if (adapter->msix_entries) {
|
||||
/* NOTE: we don't test MSI-X interrupts here, yet */
|
||||
return 0;
|
||||
} else if (adapter->msi_enabled) {
|
||||
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
|
||||
shared_int = false;
|
||||
if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
|
||||
*data = 1;
|
||||
@ -1394,13 +1444,39 @@ static int igb_set_phy_loopback(struct igb_adapter *adapter)
|
||||
static int igb_setup_loopback_test(struct igb_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 rctl;
|
||||
u32 reg;
|
||||
|
||||
if (hw->phy.media_type == e1000_media_type_fiber ||
|
||||
hw->phy.media_type == e1000_media_type_internal_serdes) {
|
||||
rctl = rd32(E1000_RCTL);
|
||||
rctl |= E1000_RCTL_LBM_TCVR;
|
||||
wr32(E1000_RCTL, rctl);
|
||||
reg = rd32(E1000_RCTL);
|
||||
reg |= E1000_RCTL_LBM_TCVR;
|
||||
wr32(E1000_RCTL, reg);
|
||||
|
||||
wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
|
||||
|
||||
reg = rd32(E1000_CTRL);
|
||||
reg &= ~(E1000_CTRL_RFCE |
|
||||
E1000_CTRL_TFCE |
|
||||
E1000_CTRL_LRST);
|
||||
reg |= E1000_CTRL_SLU |
|
||||
E1000_CTRL_FD;
|
||||
wr32(E1000_CTRL, reg);
|
||||
|
||||
/* Unset switch control to serdes energy detect */
|
||||
reg = rd32(E1000_CONNSW);
|
||||
reg &= ~E1000_CONNSW_ENRGSRC;
|
||||
wr32(E1000_CONNSW, reg);
|
||||
|
||||
/* Set PCS register for forced speed */
|
||||
reg = rd32(E1000_PCS_LCTL);
|
||||
reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
|
||||
reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
|
||||
E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
|
||||
E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
|
||||
E1000_PCS_LCTL_FSD | /* Force Speed */
|
||||
E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
|
||||
wr32(E1000_PCS_LCTL, reg);
|
||||
|
||||
return 0;
|
||||
} else if (hw->phy.media_type == e1000_media_type_copper) {
|
||||
return igb_set_phy_loopback(adapter);
|
||||
@ -1660,6 +1736,8 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
|
||||
wol->supported = 0;
|
||||
break;
|
||||
case E1000_DEV_ID_82575EB_FIBER_SERDES:
|
||||
case E1000_DEV_ID_82576_FIBER:
|
||||
case E1000_DEV_ID_82576_SERDES:
|
||||
/* Wake events not supported on port B */
|
||||
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
|
||||
wol->supported = 0;
|
||||
@ -1668,6 +1746,15 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
|
||||
/* return success for non excluded adapter ports */
|
||||
retval = 0;
|
||||
break;
|
||||
case E1000_DEV_ID_82576_QUAD_COPPER:
|
||||
/* quad port adapters only support WoL on port A */
|
||||
if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
|
||||
wol->supported = 0;
|
||||
break;
|
||||
}
|
||||
/* return success for non excluded adapter ports */
|
||||
retval = 0;
|
||||
break;
|
||||
default:
|
||||
/* dual port cards only support WoL on port A from now on
|
||||
* unless it was enabled in the eeprom for port B
|
||||
@ -1774,6 +1861,8 @@ static int igb_set_coalesce(struct net_device *netdev,
|
||||
struct ethtool_coalesce *ec)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
int i;
|
||||
|
||||
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
|
||||
((ec->rx_coalesce_usecs > 3) &&
|
||||
@ -1782,13 +1871,16 @@ static int igb_set_coalesce(struct net_device *netdev,
|
||||
return -EINVAL;
|
||||
|
||||
/* convert to rate of irq's per second */
|
||||
if (ec->rx_coalesce_usecs <= 3)
|
||||
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
|
||||
adapter->itr_setting = ec->rx_coalesce_usecs;
|
||||
else
|
||||
adapter->itr_setting = (1000000 / ec->rx_coalesce_usecs);
|
||||
adapter->itr = IGB_START_ITR;
|
||||
} else {
|
||||
adapter->itr_setting = ec->rx_coalesce_usecs << 2;
|
||||
adapter->itr = adapter->itr_setting;
|
||||
}
|
||||
|
||||
if (netif_running(netdev))
|
||||
igb_reinit_locked(adapter);
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
wr32(adapter->rx_ring[i].itr_register, adapter->itr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1801,7 +1893,7 @@ static int igb_get_coalesce(struct net_device *netdev,
|
||||
if (adapter->itr_setting <= 3)
|
||||
ec->rx_coalesce_usecs = adapter->itr_setting;
|
||||
else
|
||||
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
|
||||
ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1835,6 +1927,18 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
|
||||
int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
|
||||
int j;
|
||||
int i;
|
||||
#ifdef CONFIG_IGB_LRO
|
||||
int aggregated = 0, flushed = 0, no_desc = 0;
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
|
||||
flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
|
||||
no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
|
||||
}
|
||||
adapter->lro_aggregated = aggregated;
|
||||
adapter->lro_flushed = flushed;
|
||||
adapter->lro_no_desc = no_desc;
|
||||
#endif
|
||||
|
||||
igb_update_stats(adapter);
|
||||
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
|
||||
@ -1842,6 +1946,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
|
||||
data[i] = (igb_gstrings_stats[i].sizeof_stat ==
|
||||
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
|
||||
}
|
||||
for (j = 0; j < adapter->num_tx_queues; j++) {
|
||||
int k;
|
||||
queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
|
||||
for (k = 0; k < stat_count; k++)
|
||||
data[i + k] = queue_stat[k];
|
||||
i += k;
|
||||
}
|
||||
for (j = 0; j < adapter->num_rx_queues; j++) {
|
||||
int k;
|
||||
queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
################################################################################
|
||||
#
|
||||
# Intel PRO/10GbE Linux driver
|
||||
# Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
# Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify it
|
||||
# under the terms and conditions of the GNU General Public License,
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
@ -89,18 +89,16 @@ struct ixgb_adapter;
|
||||
|
||||
|
||||
/* TX/RX descriptor defines */
|
||||
#define DEFAULT_TXD 256
|
||||
#define MAX_TXD 4096
|
||||
#define MIN_TXD 64
|
||||
#define DEFAULT_TXD 256
|
||||
#define MAX_TXD 4096
|
||||
#define MIN_TXD 64
|
||||
|
||||
/* hardware cannot reliably support more than 512 descriptors owned by
|
||||
* hardware descrioptor cache otherwise an unreliable ring under heavy
|
||||
* recieve load may result */
|
||||
/* #define DEFAULT_RXD 1024 */
|
||||
/* #define MAX_RXD 4096 */
|
||||
#define DEFAULT_RXD 512
|
||||
#define MAX_RXD 512
|
||||
#define MIN_RXD 64
|
||||
* hardware descriptor cache otherwise an unreliable ring under heavy
|
||||
* receive load may result */
|
||||
#define DEFAULT_RXD 512
|
||||
#define MAX_RXD 512
|
||||
#define MIN_RXD 64
|
||||
|
||||
/* Supported Rx Buffer Sizes */
|
||||
#define IXGB_RXBUFFER_2048 2048
|
||||
@ -157,7 +155,6 @@ struct ixgb_adapter {
|
||||
u32 part_num;
|
||||
u16 link_speed;
|
||||
u16 link_duplex;
|
||||
spinlock_t tx_lock;
|
||||
struct work_struct tx_timeout_task;
|
||||
|
||||
struct timer_list blink_timer;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
@ -108,7 +108,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
|
||||
*/
|
||||
eecd_reg &= ~IXGB_EECD_DI;
|
||||
|
||||
if(data & mask)
|
||||
if (data & mask)
|
||||
eecd_reg |= IXGB_EECD_DI;
|
||||
|
||||
IXGB_WRITE_REG(hw, EECD, eecd_reg);
|
||||
@ -120,7 +120,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
|
||||
|
||||
mask = mask >> 1;
|
||||
|
||||
} while(mask);
|
||||
} while (mask);
|
||||
|
||||
/* We leave the "DI" bit set to "0" when we leave this routine. */
|
||||
eecd_reg &= ~IXGB_EECD_DI;
|
||||
@ -152,14 +152,14 @@ ixgb_shift_in_bits(struct ixgb_hw *hw)
|
||||
eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
|
||||
data = 0;
|
||||
|
||||
for(i = 0; i < 16; i++) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
data = data << 1;
|
||||
ixgb_raise_clock(hw, &eecd_reg);
|
||||
|
||||
eecd_reg = IXGB_READ_REG(hw, EECD);
|
||||
|
||||
eecd_reg &= ~(IXGB_EECD_DI);
|
||||
if(eecd_reg & IXGB_EECD_DO)
|
||||
if (eecd_reg & IXGB_EECD_DO)
|
||||
data |= 1;
|
||||
|
||||
ixgb_lower_clock(hw, &eecd_reg);
|
||||
@ -205,7 +205,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
|
||||
|
||||
eecd_reg = IXGB_READ_REG(hw, EECD);
|
||||
|
||||
/* Deselct EEPROM */
|
||||
/* Deselect EEPROM */
|
||||
eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
|
||||
IXGB_WRITE_REG(hw, EECD, eecd_reg);
|
||||
udelay(50);
|
||||
@ -293,14 +293,14 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
|
||||
*/
|
||||
ixgb_standby_eeprom(hw);
|
||||
|
||||
/* Now read DO repeatedly until is high (equal to '1'). The EEEPROM will
|
||||
/* Now read DO repeatedly until is high (equal to '1'). The EEPROM will
|
||||
* signal that the command has been completed by raising the DO signal.
|
||||
* If DO does not go high in 10 milliseconds, then error out.
|
||||
*/
|
||||
for(i = 0; i < 200; i++) {
|
||||
for (i = 0; i < 200; i++) {
|
||||
eecd_reg = IXGB_READ_REG(hw, EECD);
|
||||
|
||||
if(eecd_reg & IXGB_EECD_DO)
|
||||
if (eecd_reg & IXGB_EECD_DO)
|
||||
return (true);
|
||||
|
||||
udelay(50);
|
||||
@ -328,10 +328,10 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
|
||||
u16 checksum = 0;
|
||||
u16 i;
|
||||
|
||||
for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
|
||||
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
|
||||
checksum += ixgb_read_eeprom(hw, i);
|
||||
|
||||
if(checksum == (u16) EEPROM_SUM)
|
||||
if (checksum == (u16) EEPROM_SUM)
|
||||
return (true);
|
||||
else
|
||||
return (false);
|
||||
@ -351,7 +351,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
|
||||
u16 checksum = 0;
|
||||
u16 i;
|
||||
|
||||
for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
|
||||
for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
|
||||
checksum += ixgb_read_eeprom(hw, i);
|
||||
|
||||
checksum = (u16) EEPROM_SUM - checksum;
|
||||
@ -365,7 +365,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
|
||||
*
|
||||
* hw - Struct containing variables accessed by shared code
|
||||
* reg - offset within the EEPROM to be written to
|
||||
* data - 16 bit word to be writen to the EEPROM
|
||||
* data - 16 bit word to be written to the EEPROM
|
||||
*
|
||||
* If ixgb_update_eeprom_checksum is not called after this function, the
|
||||
* EEPROM will most likely contain an invalid checksum.
|
||||
@ -472,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
|
||||
ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
|
||||
|
||||
DEBUGOUT("ixgb_ee: Reading eeprom data\n");
|
||||
for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
|
||||
for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
|
||||
u16 ee_data;
|
||||
ee_data = ixgb_read_eeprom(hw, i);
|
||||
checksum += ee_data;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
@ -34,11 +34,11 @@
|
||||
#define IXGB_ETH_LENGTH_OF_ADDRESS 6
|
||||
|
||||
/* EEPROM Commands */
|
||||
#define EEPROM_READ_OPCODE 0x6 /* EERPOM read opcode */
|
||||
#define EEPROM_WRITE_OPCODE 0x5 /* EERPOM write opcode */
|
||||
#define EEPROM_ERASE_OPCODE 0x7 /* EERPOM erase opcode */
|
||||
#define EEPROM_EWEN_OPCODE 0x13 /* EERPOM erase/write enable */
|
||||
#define EEPROM_EWDS_OPCODE 0x10 /* EERPOM erast/write disable */
|
||||
#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
|
||||
#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
|
||||
#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
|
||||
#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
|
||||
#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
|
||||
|
||||
/* EEPROM MAP (Word Offsets) */
|
||||
#define EEPROM_IA_1_2_REG 0x0000
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
@ -95,7 +95,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
||||
ecmd->port = PORT_FIBRE;
|
||||
ecmd->transceiver = XCVR_EXTERNAL;
|
||||
|
||||
if(netif_carrier_ok(adapter->netdev)) {
|
||||
if (netif_carrier_ok(adapter->netdev)) {
|
||||
ecmd->speed = SPEED_10000;
|
||||
ecmd->duplex = DUPLEX_FULL;
|
||||
} else {
|
||||
@ -122,11 +122,11 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if(ecmd->autoneg == AUTONEG_ENABLE ||
|
||||
if (ecmd->autoneg == AUTONEG_ENABLE ||
|
||||
ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
|
||||
return -EINVAL;
|
||||
|
||||
if(netif_running(adapter->netdev)) {
|
||||
|
||||
if (netif_running(adapter->netdev)) {
|
||||
ixgb_down(adapter, true);
|
||||
ixgb_reset(adapter);
|
||||
ixgb_up(adapter);
|
||||
@ -143,14 +143,14 @@ ixgb_get_pauseparam(struct net_device *netdev,
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgb_hw *hw = &adapter->hw;
|
||||
|
||||
|
||||
pause->autoneg = AUTONEG_DISABLE;
|
||||
|
||||
if(hw->fc.type == ixgb_fc_rx_pause)
|
||||
|
||||
if (hw->fc.type == ixgb_fc_rx_pause)
|
||||
pause->rx_pause = 1;
|
||||
else if(hw->fc.type == ixgb_fc_tx_pause)
|
||||
else if (hw->fc.type == ixgb_fc_tx_pause)
|
||||
pause->tx_pause = 1;
|
||||
else if(hw->fc.type == ixgb_fc_full) {
|
||||
else if (hw->fc.type == ixgb_fc_full) {
|
||||
pause->rx_pause = 1;
|
||||
pause->tx_pause = 1;
|
||||
}
|
||||
@ -162,26 +162,26 @@ ixgb_set_pauseparam(struct net_device *netdev,
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgb_hw *hw = &adapter->hw;
|
||||
|
||||
if(pause->autoneg == AUTONEG_ENABLE)
|
||||
|
||||
if (pause->autoneg == AUTONEG_ENABLE)
|
||||
return -EINVAL;
|
||||
|
||||
if(pause->rx_pause && pause->tx_pause)
|
||||
if (pause->rx_pause && pause->tx_pause)
|
||||
hw->fc.type = ixgb_fc_full;
|
||||
else if(pause->rx_pause && !pause->tx_pause)
|
||||
else if (pause->rx_pause && !pause->tx_pause)
|
||||
hw->fc.type = ixgb_fc_rx_pause;
|
||||
else if(!pause->rx_pause && pause->tx_pause)
|
||||
else if (!pause->rx_pause && pause->tx_pause)
|
||||
hw->fc.type = ixgb_fc_tx_pause;
|
||||
else if(!pause->rx_pause && !pause->tx_pause)
|
||||
else if (!pause->rx_pause && !pause->tx_pause)
|
||||
hw->fc.type = ixgb_fc_none;
|
||||
|
||||
if(netif_running(adapter->netdev)) {
|
||||
if (netif_running(adapter->netdev)) {
|
||||
ixgb_down(adapter, true);
|
||||
ixgb_up(adapter);
|
||||
ixgb_set_speed_duplex(netdev);
|
||||
} else
|
||||
ixgb_reset(adapter);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -200,7 +200,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data)
|
||||
|
||||
adapter->rx_csum = data;
|
||||
|
||||
if(netif_running(netdev)) {
|
||||
if (netif_running(netdev)) {
|
||||
ixgb_down(adapter, true);
|
||||
ixgb_up(adapter);
|
||||
ixgb_set_speed_duplex(netdev);
|
||||
@ -208,7 +208,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data)
|
||||
ixgb_reset(adapter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static u32
|
||||
ixgb_get_tx_csum(struct net_device *netdev)
|
||||
{
|
||||
@ -229,12 +229,12 @@ ixgb_set_tx_csum(struct net_device *netdev, u32 data)
|
||||
static int
|
||||
ixgb_set_tso(struct net_device *netdev, u32 data)
|
||||
{
|
||||
if(data)
|
||||
if (data)
|
||||
netdev->features |= NETIF_F_TSO;
|
||||
else
|
||||
netdev->features &= ~NETIF_F_TSO;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
ixgb_get_msglevel(struct net_device *netdev)
|
||||
@ -251,7 +251,7 @@ ixgb_set_msglevel(struct net_device *netdev, u32 data)
|
||||
}
|
||||
#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
|
||||
|
||||
static int
|
||||
static int
|
||||
ixgb_get_regs_len(struct net_device *netdev)
|
||||
{
|
||||
#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
|
||||
@ -301,7 +301,7 @@ ixgb_get_regs(struct net_device *netdev,
|
||||
*reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
|
||||
|
||||
/* there are 16 RAR entries in hardware, we only use 3 */
|
||||
for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
|
||||
for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
|
||||
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
|
||||
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
|
||||
}
|
||||
@ -415,7 +415,7 @@ ixgb_get_eeprom(struct net_device *netdev,
|
||||
int i, max_len, first_word, last_word;
|
||||
int ret_val = 0;
|
||||
|
||||
if(eeprom->len == 0) {
|
||||
if (eeprom->len == 0) {
|
||||
ret_val = -EINVAL;
|
||||
goto geeprom_error;
|
||||
}
|
||||
@ -424,12 +424,12 @@ ixgb_get_eeprom(struct net_device *netdev,
|
||||
|
||||
max_len = ixgb_get_eeprom_len(netdev);
|
||||
|
||||
if(eeprom->offset > eeprom->offset + eeprom->len) {
|
||||
if (eeprom->offset > eeprom->offset + eeprom->len) {
|
||||
ret_val = -EINVAL;
|
||||
goto geeprom_error;
|
||||
}
|
||||
|
||||
if((eeprom->offset + eeprom->len) > max_len)
|
||||
if ((eeprom->offset + eeprom->len) > max_len)
|
||||
eeprom->len = (max_len - eeprom->offset);
|
||||
|
||||
first_word = eeprom->offset >> 1;
|
||||
@ -437,16 +437,14 @@ ixgb_get_eeprom(struct net_device *netdev,
|
||||
|
||||
eeprom_buff = kmalloc(sizeof(__le16) *
|
||||
(last_word - first_word + 1), GFP_KERNEL);
|
||||
if(!eeprom_buff)
|
||||
if (!eeprom_buff)
|
||||
return -ENOMEM;
|
||||
|
||||
/* note the eeprom was good because the driver loaded */
|
||||
for(i = 0; i <= (last_word - first_word); i++) {
|
||||
for (i = 0; i <= (last_word - first_word); i++)
|
||||
eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
|
||||
}
|
||||
|
||||
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
|
||||
eeprom->len);
|
||||
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
|
||||
kfree(eeprom_buff);
|
||||
|
||||
geeprom_error:
|
||||
@ -464,47 +462,47 @@ ixgb_set_eeprom(struct net_device *netdev,
|
||||
int max_len, first_word, last_word;
|
||||
u16 i;
|
||||
|
||||
if(eeprom->len == 0)
|
||||
if (eeprom->len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
|
||||
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
|
||||
return -EFAULT;
|
||||
|
||||
max_len = ixgb_get_eeprom_len(netdev);
|
||||
|
||||
if(eeprom->offset > eeprom->offset + eeprom->len)
|
||||
if (eeprom->offset > eeprom->offset + eeprom->len)
|
||||
return -EINVAL;
|
||||
|
||||
if((eeprom->offset + eeprom->len) > max_len)
|
||||
if ((eeprom->offset + eeprom->len) > max_len)
|
||||
eeprom->len = (max_len - eeprom->offset);
|
||||
|
||||
first_word = eeprom->offset >> 1;
|
||||
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
|
||||
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
|
||||
if(!eeprom_buff)
|
||||
if (!eeprom_buff)
|
||||
return -ENOMEM;
|
||||
|
||||
ptr = (void *)eeprom_buff;
|
||||
|
||||
if(eeprom->offset & 1) {
|
||||
if (eeprom->offset & 1) {
|
||||
/* need read/modify/write of first changed EEPROM word */
|
||||
/* only the second byte of the word is being modified */
|
||||
eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
|
||||
ptr++;
|
||||
}
|
||||
if((eeprom->offset + eeprom->len) & 1) {
|
||||
if ((eeprom->offset + eeprom->len) & 1) {
|
||||
/* need read/modify/write of last changed EEPROM word */
|
||||
/* only the first byte of the word is being modified */
|
||||
eeprom_buff[last_word - first_word]
|
||||
eeprom_buff[last_word - first_word]
|
||||
= ixgb_read_eeprom(hw, last_word);
|
||||
}
|
||||
|
||||
memcpy(ptr, bytes, eeprom->len);
|
||||
for(i = 0; i <= (last_word - first_word); i++)
|
||||
for (i = 0; i <= (last_word - first_word); i++)
|
||||
ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
|
||||
|
||||
/* Update the checksum over the first part of the EEPROM if needed */
|
||||
if(first_word <= EEPROM_CHECKSUM_REG)
|
||||
if (first_word <= EEPROM_CHECKSUM_REG)
|
||||
ixgb_update_eeprom_checksum(hw);
|
||||
|
||||
kfree(eeprom_buff);
|
||||
@ -534,7 +532,7 @@ ixgb_get_ringparam(struct net_device *netdev,
|
||||
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
|
||||
struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
|
||||
|
||||
ring->rx_max_pending = MAX_RXD;
|
||||
ring->rx_max_pending = MAX_RXD;
|
||||
ring->tx_max_pending = MAX_TXD;
|
||||
ring->rx_mini_max_pending = 0;
|
||||
ring->rx_jumbo_max_pending = 0;
|
||||
@ -544,7 +542,7 @@ ixgb_get_ringparam(struct net_device *netdev,
|
||||
ring->rx_jumbo_pending = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int
|
||||
ixgb_set_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *ring)
|
||||
{
|
||||
@ -557,10 +555,10 @@ ixgb_set_ringparam(struct net_device *netdev,
|
||||
tx_old = adapter->tx_ring;
|
||||
rx_old = adapter->rx_ring;
|
||||
|
||||
if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
return -EINVAL;
|
||||
|
||||
if(netif_running(adapter->netdev))
|
||||
if (netif_running(adapter->netdev))
|
||||
ixgb_down(adapter, true);
|
||||
|
||||
rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
|
||||
@ -571,11 +569,11 @@ ixgb_set_ringparam(struct net_device *netdev,
|
||||
txdr->count = min(txdr->count,(u32)MAX_TXD);
|
||||
txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
|
||||
|
||||
if(netif_running(adapter->netdev)) {
|
||||
if (netif_running(adapter->netdev)) {
|
||||
/* Try to get new resources before deleting old */
|
||||
if((err = ixgb_setup_rx_resources(adapter)))
|
||||
if ((err = ixgb_setup_rx_resources(adapter)))
|
||||
goto err_setup_rx;
|
||||
if((err = ixgb_setup_tx_resources(adapter)))
|
||||
if ((err = ixgb_setup_tx_resources(adapter)))
|
||||
goto err_setup_tx;
|
||||
|
||||
/* save the new, restore the old in order to free it,
|
||||
@ -589,7 +587,7 @@ ixgb_set_ringparam(struct net_device *netdev,
|
||||
ixgb_free_tx_resources(adapter);
|
||||
adapter->rx_ring = rx_new;
|
||||
adapter->tx_ring = tx_new;
|
||||
if((err = ixgb_up(adapter)))
|
||||
if ((err = ixgb_up(adapter)))
|
||||
return err;
|
||||
ixgb_set_speed_duplex(netdev);
|
||||
}
|
||||
@ -615,7 +613,7 @@ ixgb_led_blink_callback(unsigned long data)
|
||||
{
|
||||
struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
|
||||
|
||||
if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
|
||||
if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
|
||||
ixgb_led_off(&adapter->hw);
|
||||
else
|
||||
ixgb_led_on(&adapter->hw);
|
||||
@ -631,7 +629,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data)
|
||||
if (!data)
|
||||
data = INT_MAX;
|
||||
|
||||
if(!adapter->blink_timer.function) {
|
||||
if (!adapter->blink_timer.function) {
|
||||
init_timer(&adapter->blink_timer);
|
||||
adapter->blink_timer.function = ixgb_led_blink_callback;
|
||||
adapter->blink_timer.data = (unsigned long)adapter;
|
||||
@ -647,7 +645,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int
|
||||
ixgb_get_sset_count(struct net_device *netdev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
@ -658,30 +656,30 @@ ixgb_get_sset_count(struct net_device *netdev, int sset)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ixgb_get_ethtool_stats(struct net_device *netdev,
|
||||
static void
|
||||
ixgb_get_ethtool_stats(struct net_device *netdev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
ixgb_update_stats(adapter);
|
||||
for(i = 0; i < IXGB_STATS_LEN; i++) {
|
||||
char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
|
||||
data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
|
||||
for (i = 0; i < IXGB_STATS_LEN; i++) {
|
||||
char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
|
||||
data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
|
||||
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
static void
|
||||
ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
|
||||
{
|
||||
int i;
|
||||
|
||||
switch(stringset) {
|
||||
case ETH_SS_STATS:
|
||||
for(i=0; i < IXGB_STATS_LEN; i++) {
|
||||
memcpy(data + i * ETH_GSTRING_LEN,
|
||||
for (i = 0; i < IXGB_STATS_LEN; i++) {
|
||||
memcpy(data + i * ETH_GSTRING_LEN,
|
||||
ixgb_gstrings_stats[i].stat_string,
|
||||
ETH_GSTRING_LEN);
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
@ -125,7 +125,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
|
||||
/* If we are stopped or resetting exit gracefully and wait to be
|
||||
* started again before accessing the hardware.
|
||||
*/
|
||||
if(hw->adapter_stopped) {
|
||||
if (hw->adapter_stopped) {
|
||||
DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
|
||||
return false;
|
||||
}
|
||||
@ -347,7 +347,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
|
||||
|
||||
/* Zero out the Multicast HASH table */
|
||||
DEBUGOUT("Zeroing the MTA\n");
|
||||
for(i = 0; i < IXGB_MC_TBL_SIZE; i++)
|
||||
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
|
||||
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
|
||||
|
||||
/* Zero out the VLAN Filter Table Array */
|
||||
@ -371,7 +371,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
|
||||
* hw - Struct containing variables accessed by shared code
|
||||
*
|
||||
* Places the MAC address in receive address register 0 and clears the rest
|
||||
* of the receive addresss registers. Clears the multicast table. Assumes
|
||||
* of the receive address registers. Clears the multicast table. Assumes
|
||||
* the receiver is in reset when the routine is called.
|
||||
*****************************************************************************/
|
||||
static void
|
||||
@ -413,7 +413,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
|
||||
|
||||
/* Zero out the other 15 receive addresses. */
|
||||
DEBUGOUT("Clearing RAR[1-15]\n");
|
||||
for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
|
||||
for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
|
||||
/* Write high reg first to disable the AV bit first */
|
||||
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
|
||||
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
|
||||
@ -452,19 +452,18 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
|
||||
|
||||
/* Clear RAR[1-15] */
|
||||
DEBUGOUT(" Clearing RAR[1-15]\n");
|
||||
for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
|
||||
for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
|
||||
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
|
||||
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
|
||||
}
|
||||
|
||||
/* Clear the MTA */
|
||||
DEBUGOUT(" Clearing MTA\n");
|
||||
for(i = 0; i < IXGB_MC_TBL_SIZE; i++) {
|
||||
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
|
||||
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
|
||||
}
|
||||
|
||||
/* Add the new addresses */
|
||||
for(i = 0; i < mc_addr_count; i++) {
|
||||
for (i = 0; i < mc_addr_count; i++) {
|
||||
DEBUGOUT(" Adding the multicast addresses:\n");
|
||||
DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
|
||||
mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
|
||||
@ -482,7 +481,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
|
||||
/* Place this multicast address in the RAR if there is room, *
|
||||
* else put it in the MTA
|
||||
*/
|
||||
if(rar_used_count < IXGB_RAR_ENTRIES) {
|
||||
if (rar_used_count < IXGB_RAR_ENTRIES) {
|
||||
ixgb_rar_set(hw,
|
||||
mc_addr_list +
|
||||
(i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
|
||||
@ -649,7 +648,7 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
|
||||
{
|
||||
u32 offset;
|
||||
|
||||
for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
|
||||
for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
|
||||
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
|
||||
return;
|
||||
}
|
||||
@ -719,9 +718,8 @@ ixgb_setup_fc(struct ixgb_hw *hw)
|
||||
/* Write the new settings */
|
||||
IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
|
||||
|
||||
if (pap_reg != 0) {
|
||||
if (pap_reg != 0)
|
||||
IXGB_WRITE_REG(hw, PAP, pap_reg);
|
||||
}
|
||||
|
||||
/* Set the flow control receive threshold registers. Normally,
|
||||
* these registers will be set to a default threshold that may be
|
||||
@ -729,14 +727,14 @@ ixgb_setup_fc(struct ixgb_hw *hw)
|
||||
* ability to transmit pause frames in not enabled, then these
|
||||
* registers will be set to 0.
|
||||
*/
|
||||
if(!(hw->fc.type & ixgb_fc_tx_pause)) {
|
||||
if (!(hw->fc.type & ixgb_fc_tx_pause)) {
|
||||
IXGB_WRITE_REG(hw, FCRTL, 0);
|
||||
IXGB_WRITE_REG(hw, FCRTH, 0);
|
||||
} else {
|
||||
/* We need to set up the Receive Threshold high and low water
|
||||
* marks as well as (optionally) enabling the transmission of XON
|
||||
* frames. */
|
||||
if(hw->fc.send_xon) {
|
||||
if (hw->fc.send_xon) {
|
||||
IXGB_WRITE_REG(hw, FCRTL,
|
||||
(hw->fc.low_water | IXGB_FCRTL_XONE));
|
||||
} else {
|
||||
@ -791,7 +789,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
|
||||
** from the CPU Write to the Ready bit assertion.
|
||||
**************************************************************/
|
||||
|
||||
for(i = 0; i < 10; i++)
|
||||
for (i = 0; i < 10; i++)
|
||||
{
|
||||
udelay(10);
|
||||
|
||||
@ -818,7 +816,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
|
||||
** from the CPU Write to the Ready bit assertion.
|
||||
**************************************************************/
|
||||
|
||||
for(i = 0; i < 10; i++)
|
||||
for (i = 0; i < 10; i++)
|
||||
{
|
||||
udelay(10);
|
||||
|
||||
@ -887,7 +885,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
|
||||
** from the CPU Write to the Ready bit assertion.
|
||||
**************************************************************/
|
||||
|
||||
for(i = 0; i < 10; i++)
|
||||
for (i = 0; i < 10; i++)
|
||||
{
|
||||
udelay(10);
|
||||
|
||||
@ -914,7 +912,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
|
||||
** from the CPU Write to the Ready bit assertion.
|
||||
**************************************************************/
|
||||
|
||||
for(i = 0; i < 10; i++)
|
||||
for (i = 0; i < 10; i++)
|
||||
{
|
||||
udelay(10);
|
||||
|
||||
@ -965,7 +963,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Check for a bad link condition that may have occured.
|
||||
* Check for a bad link condition that may have occurred.
|
||||
* The indication is that the RFC / LFC registers may be incrementing
|
||||
* continually. A full adapter reset is required to recover.
|
||||
*
|
||||
@ -1007,7 +1005,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
|
||||
DEBUGFUNC("ixgb_clear_hw_cntrs");
|
||||
|
||||
/* if we are stopped or resetting exit gracefully */
|
||||
if(hw->adapter_stopped) {
|
||||
if (hw->adapter_stopped) {
|
||||
DEBUGOUT("Exiting because the adapter is stopped!!!\n");
|
||||
return;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
@ -38,11 +38,11 @@
|
||||
#define SUN_VENDOR_ID 0x108E
|
||||
#define SUN_SUBVENDOR_ID 0x108E
|
||||
|
||||
#define IXGB_DEVICE_ID_82597EX 0x1048
|
||||
#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
|
||||
#define IXGB_DEVICE_ID_82597EX 0x1048
|
||||
#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
|
||||
#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
|
||||
#define IXGB_SUBDEVICE_ID_A11F 0xA11F
|
||||
#define IXGB_SUBDEVICE_ID_A01F 0xA01F
|
||||
#define IXGB_SUBDEVICE_ID_A11F 0xA11F
|
||||
#define IXGB_SUBDEVICE_ID_A01F 0xA01F
|
||||
|
||||
#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
|
||||
#define IXGB_SUBDEVICE_ID_A00C 0xA00C
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
@ -40,7 +40,7 @@
|
||||
#include <linux/sched.h>
|
||||
|
||||
#undef ASSERT
|
||||
#define ASSERT(x) if(!(x)) BUG()
|
||||
#define ASSERT(x) if (!(x)) BUG()
|
||||
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
|
||||
|
||||
#ifdef DBG
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
|
||||
Intel PRO/10GbE Linux driver
|
||||
Copyright(c) 1999 - 2006 Intel Corporation.
|
||||
Copyright(c) 1999 - 2008 Intel Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
@ -136,7 +136,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
|
||||
/* Flow control request timeout (how long to pause the link partner's tx)
|
||||
* (PAP 15:0)
|
||||
*
|
||||
* Valid Range: 1 - 65535
|
||||
* Valid Range: 1 - 65535
|
||||
*
|
||||
* Default Value: 65535 (0xffff) (we'll send an xon if we recover)
|
||||
*/
|
||||
@ -200,7 +200,7 @@ struct ixgb_option {
|
||||
static int __devinit
|
||||
ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
|
||||
{
|
||||
if(*value == OPTION_UNSET) {
|
||||
if (*value == OPTION_UNSET) {
|
||||
*value = opt->def;
|
||||
return 0;
|
||||
}
|
||||
@ -217,7 +217,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
|
||||
}
|
||||
break;
|
||||
case range_option:
|
||||
if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
|
||||
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
|
||||
printk(KERN_INFO "%s set to %i\n", opt->name, *value);
|
||||
return 0;
|
||||
}
|
||||
@ -226,10 +226,10 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
|
||||
int i;
|
||||
struct ixgb_opt_list *ent;
|
||||
|
||||
for(i = 0; i < opt->arg.l.nr; i++) {
|
||||
for (i = 0; i < opt->arg.l.nr; i++) {
|
||||
ent = &opt->arg.l.p[i];
|
||||
if(*value == ent->i) {
|
||||
if(ent->str[0] != '\0')
|
||||
if (*value == ent->i) {
|
||||
if (ent->str[0] != '\0')
|
||||
printk(KERN_INFO "%s\n", ent->str);
|
||||
return 0;
|
||||
}
|
||||
@ -260,7 +260,7 @@ void __devinit
|
||||
ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
{
|
||||
int bd = adapter->bd_number;
|
||||
if(bd >= IXGB_MAX_NIC) {
|
||||
if (bd >= IXGB_MAX_NIC) {
|
||||
printk(KERN_NOTICE
|
||||
"Warning: no configuration for board #%i\n", bd);
|
||||
printk(KERN_NOTICE "Using defaults for all values\n");
|
||||
@ -277,7 +277,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
};
|
||||
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
|
||||
|
||||
if(num_TxDescriptors > bd) {
|
||||
if (num_TxDescriptors > bd) {
|
||||
tx_ring->count = TxDescriptors[bd];
|
||||
ixgb_validate_option(&tx_ring->count, &opt);
|
||||
} else {
|
||||
@ -296,7 +296,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
};
|
||||
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
|
||||
|
||||
if(num_RxDescriptors > bd) {
|
||||
if (num_RxDescriptors > bd) {
|
||||
rx_ring->count = RxDescriptors[bd];
|
||||
ixgb_validate_option(&rx_ring->count, &opt);
|
||||
} else {
|
||||
@ -312,7 +312,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.def = OPTION_ENABLED
|
||||
};
|
||||
|
||||
if(num_XsumRX > bd) {
|
||||
if (num_XsumRX > bd) {
|
||||
unsigned int rx_csum = XsumRX[bd];
|
||||
ixgb_validate_option(&rx_csum, &opt);
|
||||
adapter->rx_csum = rx_csum;
|
||||
@ -338,7 +338,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.p = fc_list }}
|
||||
};
|
||||
|
||||
if(num_FlowControl > bd) {
|
||||
if (num_FlowControl > bd) {
|
||||
unsigned int fc = FlowControl[bd];
|
||||
ixgb_validate_option(&fc, &opt);
|
||||
adapter->hw.fc.type = fc;
|
||||
@ -356,14 +356,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.max = MAX_FCRTH}}
|
||||
};
|
||||
|
||||
if(num_RxFCHighThresh > bd) {
|
||||
if (num_RxFCHighThresh > bd) {
|
||||
adapter->hw.fc.high_water = RxFCHighThresh[bd];
|
||||
ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
|
||||
} else {
|
||||
adapter->hw.fc.high_water = opt.def;
|
||||
}
|
||||
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
|
||||
printk (KERN_INFO
|
||||
printk(KERN_INFO
|
||||
"Ignoring RxFCHighThresh when no RxFC\n");
|
||||
}
|
||||
{ /* Receive Flow Control Low Threshold */
|
||||
@ -376,14 +376,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.max = MAX_FCRTL}}
|
||||
};
|
||||
|
||||
if(num_RxFCLowThresh > bd) {
|
||||
if (num_RxFCLowThresh > bd) {
|
||||
adapter->hw.fc.low_water = RxFCLowThresh[bd];
|
||||
ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
|
||||
} else {
|
||||
adapter->hw.fc.low_water = opt.def;
|
||||
}
|
||||
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
|
||||
printk (KERN_INFO
|
||||
printk(KERN_INFO
|
||||
"Ignoring RxFCLowThresh when no RxFC\n");
|
||||
}
|
||||
{ /* Flow Control Pause Time Request*/
|
||||
@ -396,7 +396,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.max = MAX_FCPAUSE}}
|
||||
};
|
||||
|
||||
if(num_FCReqTimeout > bd) {
|
||||
if (num_FCReqTimeout > bd) {
|
||||
unsigned int pause_time = FCReqTimeout[bd];
|
||||
ixgb_validate_option(&pause_time, &opt);
|
||||
adapter->hw.fc.pause_time = pause_time;
|
||||
@ -404,7 +404,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
adapter->hw.fc.pause_time = opt.def;
|
||||
}
|
||||
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
|
||||
printk (KERN_INFO
|
||||
printk(KERN_INFO
|
||||
"Ignoring FCReqTimeout when no RxFC\n");
|
||||
}
|
||||
/* high low and spacing check for rx flow control thresholds */
|
||||
@ -412,7 +412,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
/* high must be greater than low */
|
||||
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
|
||||
/* set defaults */
|
||||
printk (KERN_INFO
|
||||
printk(KERN_INFO
|
||||
"RxFCHighThresh must be >= (RxFCLowThresh + 8), "
|
||||
"Using Defaults\n");
|
||||
adapter->hw.fc.high_water = DEFAULT_FCRTH;
|
||||
@ -429,7 +429,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.max = MAX_RDTR}}
|
||||
};
|
||||
|
||||
if(num_RxIntDelay > bd) {
|
||||
if (num_RxIntDelay > bd) {
|
||||
adapter->rx_int_delay = RxIntDelay[bd];
|
||||
ixgb_validate_option(&adapter->rx_int_delay, &opt);
|
||||
} else {
|
||||
@ -446,7 +446,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.max = MAX_TIDV}}
|
||||
};
|
||||
|
||||
if(num_TxIntDelay > bd) {
|
||||
if (num_TxIntDelay > bd) {
|
||||
adapter->tx_int_delay = TxIntDelay[bd];
|
||||
ixgb_validate_option(&adapter->tx_int_delay, &opt);
|
||||
} else {
|
||||
@ -462,7 +462,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
|
||||
.def = OPTION_ENABLED
|
||||
};
|
||||
|
||||
if(num_IntDelayEnable > bd) {
|
||||
if (num_IntDelayEnable > bd) {
|
||||
unsigned int ide = IntDelayEnable[bd];
|
||||
ixgb_validate_option(&ide, &opt);
|
||||
adapter->tx_int_delay_enable = ide;
|
||||
|
@ -177,6 +177,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
|
||||
|
||||
return bus;
|
||||
}
|
||||
EXPORT_SYMBOL(alloc_mdio_bitbang);
|
||||
|
||||
void free_mdio_bitbang(struct mii_bus *bus)
|
||||
{
|
||||
@ -185,5 +186,6 @@ void free_mdio_bitbang(struct mii_bus *bus)
|
||||
module_put(ctrl->ops->owner);
|
||||
kfree(bus);
|
||||
}
|
||||
EXPORT_SYMBOL(free_mdio_bitbang);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -86,7 +86,7 @@
|
||||
#include "s2io.h"
|
||||
#include "s2io-regs.h"
|
||||
|
||||
#define DRV_VERSION "2.0.26.24"
|
||||
#define DRV_VERSION "2.0.26.25"
|
||||
|
||||
/* S2io Driver name & version. */
|
||||
static char s2io_driver_name[] = "Neterion";
|
||||
@ -1891,8 +1891,6 @@ static int init_nic(struct s2io_nic *nic)
|
||||
|
||||
static int s2io_link_fault_indication(struct s2io_nic *nic)
|
||||
{
|
||||
if (nic->config.intr_type != INTA)
|
||||
return MAC_RMAC_ERR_TIMER;
|
||||
if (nic->device_type == XFRAME_II_DEVICE)
|
||||
return LINK_UP_DOWN_INTERRUPT;
|
||||
else
|
||||
@ -1925,7 +1923,9 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
|
||||
{
|
||||
struct XENA_dev_config __iomem *bar0 = nic->bar0;
|
||||
register u64 gen_int_mask = 0;
|
||||
u64 interruptible;
|
||||
|
||||
writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
|
||||
if (mask & TX_DMA_INTR) {
|
||||
|
||||
gen_int_mask |= TXDMA_INT_M;
|
||||
@ -2015,10 +2015,12 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
|
||||
gen_int_mask |= RXMAC_INT_M;
|
||||
do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
|
||||
&bar0->mac_int_mask);
|
||||
do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
|
||||
interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
|
||||
RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
|
||||
RMAC_DOUBLE_ECC_ERR |
|
||||
RMAC_LINK_STATE_CHANGE_INT,
|
||||
RMAC_DOUBLE_ECC_ERR;
|
||||
if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
|
||||
interruptible |= RMAC_LINK_STATE_CHANGE_INT;
|
||||
do_s2io_write_bits(interruptible,
|
||||
flag, &bar0->mac_rmac_err_mask);
|
||||
}
|
||||
|
||||
@ -2501,6 +2503,9 @@ static void stop_nic(struct s2io_nic *nic)
|
||||
/**
|
||||
* fill_rx_buffers - Allocates the Rx side skbs
|
||||
* @ring_info: per ring structure
|
||||
* @from_card_up: If this is true, we will map the buffer to get
|
||||
* the dma address for buf0 and buf1 to give it to the card.
|
||||
* Else we will sync the already mapped buffer to give it to the card.
|
||||
* Description:
|
||||
* The function allocates Rx side skbs and puts the physical
|
||||
* address of these buffers into the RxD buffer pointers, so that the NIC
|
||||
@ -2518,7 +2523,7 @@ static void stop_nic(struct s2io_nic *nic)
|
||||
* SUCCESS on success or an appropriate -ve value on failure.
|
||||
*/
|
||||
|
||||
static int fill_rx_buffers(struct ring_info *ring)
|
||||
static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct RxD_t *rxdp;
|
||||
@ -2637,17 +2642,16 @@ static int fill_rx_buffers(struct ring_info *ring)
|
||||
skb->data = (void *) (unsigned long)tmp;
|
||||
skb_reset_tail_pointer(skb);
|
||||
|
||||
/* AK: check is wrong. 0 can be valid dma address */
|
||||
if (!(rxdp3->Buffer0_ptr))
|
||||
if (from_card_up) {
|
||||
rxdp3->Buffer0_ptr =
|
||||
pci_map_single(ring->pdev, ba->ba_0,
|
||||
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
||||
else
|
||||
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
|
||||
goto pci_map_failed;
|
||||
} else
|
||||
pci_dma_sync_single_for_device(ring->pdev,
|
||||
(dma_addr_t) rxdp3->Buffer0_ptr,
|
||||
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
|
||||
goto pci_map_failed;
|
||||
|
||||
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
|
||||
if (ring->rxd_mode == RXD_MODE_3B) {
|
||||
@ -2664,21 +2668,22 @@ static int fill_rx_buffers(struct ring_info *ring)
|
||||
if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
|
||||
goto pci_map_failed;
|
||||
|
||||
/* AK: check is wrong */
|
||||
if (!rxdp3->Buffer1_ptr)
|
||||
if (from_card_up) {
|
||||
rxdp3->Buffer1_ptr =
|
||||
pci_map_single(ring->pdev,
|
||||
ba->ba_1, BUF1_LEN,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
|
||||
pci_unmap_single
|
||||
(ring->pdev,
|
||||
(dma_addr_t)(unsigned long)
|
||||
skb->data,
|
||||
ring->mtu + 4,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
goto pci_map_failed;
|
||||
if (pci_dma_mapping_error
|
||||
(rxdp3->Buffer1_ptr)) {
|
||||
pci_unmap_single
|
||||
(ring->pdev,
|
||||
(dma_addr_t)(unsigned long)
|
||||
skb->data,
|
||||
ring->mtu + 4,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
goto pci_map_failed;
|
||||
}
|
||||
}
|
||||
rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
|
||||
rxdp->Control_2 |= SET_BUFFER2_SIZE_3
|
||||
@ -2813,7 +2818,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
|
||||
|
||||
static int s2io_chk_rx_buffers(struct ring_info *ring)
|
||||
{
|
||||
if (fill_rx_buffers(ring) == -ENOMEM) {
|
||||
if (fill_rx_buffers(ring, 0) == -ENOMEM) {
|
||||
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
|
||||
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
|
||||
}
|
||||
@ -2944,7 +2949,7 @@ static void s2io_netpoll(struct net_device *dev)
|
||||
rx_intr_handler(&mac_control->rings[i], 0);
|
||||
|
||||
for (i = 0; i < config->rx_ring_num; i++) {
|
||||
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
|
||||
if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
|
||||
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
|
||||
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
|
||||
break;
|
||||
@ -4373,18 +4378,24 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
|
||||
/* Nothing much can be done. Get out */
|
||||
return IRQ_HANDLED;
|
||||
|
||||
writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
|
||||
if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
|
||||
writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
|
||||
|
||||
if (reason & GEN_INTR_TXTRAFFIC)
|
||||
writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
|
||||
if (reason & GEN_INTR_TXPIC)
|
||||
s2io_txpic_intr_handle(sp);
|
||||
|
||||
for (i = 0; i < config->tx_fifo_num; i++)
|
||||
tx_intr_handler(&fifos[i]);
|
||||
if (reason & GEN_INTR_TXTRAFFIC)
|
||||
writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
|
||||
|
||||
writeq(sp->general_int_mask, &bar0->general_int_mask);
|
||||
readl(&bar0->general_int_status);
|
||||
for (i = 0; i < config->tx_fifo_num; i++)
|
||||
tx_intr_handler(&fifos[i]);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
writeq(sp->general_int_mask, &bar0->general_int_mask);
|
||||
readl(&bar0->general_int_status);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
/* The interrupt was not raised by us */
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static void s2io_txpic_intr_handle(struct s2io_nic *sp)
|
||||
@ -7112,6 +7123,9 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
|
||||
|
||||
s2io_rem_isr(sp);
|
||||
|
||||
/* stop the tx queue, indicate link down */
|
||||
s2io_link(sp, LINK_DOWN);
|
||||
|
||||
/* Check if the device is Quiescent and then Reset the NIC */
|
||||
while(do_io) {
|
||||
/* As per the HW requirement we need to replenish the
|
||||
@ -7183,7 +7197,7 @@ static int s2io_card_up(struct s2io_nic * sp)
|
||||
|
||||
for (i = 0; i < config->rx_ring_num; i++) {
|
||||
mac_control->rings[i].mtu = dev->mtu;
|
||||
ret = fill_rx_buffers(&mac_control->rings[i]);
|
||||
ret = fill_rx_buffers(&mac_control->rings[i], 1);
|
||||
if (ret) {
|
||||
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
|
||||
dev->name);
|
||||
@ -7244,17 +7258,19 @@ static int s2io_card_up(struct s2io_nic * sp)
|
||||
|
||||
S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
|
||||
|
||||
set_bit(__S2IO_STATE_CARD_UP, &sp->state);
|
||||
|
||||
/* Enable select interrupts */
|
||||
en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
|
||||
if (sp->config.intr_type != INTA)
|
||||
en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
|
||||
else {
|
||||
if (sp->config.intr_type != INTA) {
|
||||
interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
|
||||
en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
|
||||
} else {
|
||||
interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
|
||||
interruptible |= TX_PIC_INTR;
|
||||
en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
|
||||
}
|
||||
|
||||
set_bit(__S2IO_STATE_CARD_UP, &sp->state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1107,6 +1107,7 @@ static int init_shared_mem(struct s2io_nic *sp);
|
||||
static void free_shared_mem(struct s2io_nic *sp);
|
||||
static int init_nic(struct s2io_nic *nic);
|
||||
static int rx_intr_handler(struct ring_info *ring_data, int budget);
|
||||
static void s2io_txpic_intr_handle(struct s2io_nic *sp);
|
||||
static void tx_intr_handler(struct fifo_info *fifo_data);
|
||||
static void s2io_handle_errors(void * dev_id);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,384 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2000, 2005 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Carsten Langgaard <carstenl@mips.com>
|
||||
* Maciej W. Rozycki <macro@mips.com>
|
||||
*
|
||||
* ########################################################################
|
||||
*
|
||||
* This program is free software; you can distribute it and/or modify it
|
||||
* under the terms of the GNU General Public License (Version 2) as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
*
|
||||
* ########################################################################
|
||||
*
|
||||
* SAA9730 ethernet driver description.
|
||||
*
|
||||
*/
|
||||
#ifndef _SAA9730_H
|
||||
#define _SAA9730_H
|
||||
|
||||
|
||||
/* Number of 6-byte entries in the CAM. */
|
||||
#define LAN_SAA9730_CAM_ENTRIES 10
|
||||
#define LAN_SAA9730_CAM_DWORDS ((LAN_SAA9730_CAM_ENTRIES*6)/4)
|
||||
|
||||
/* TX and RX packet size: fixed to 2048 bytes, according to HW requirements. */
|
||||
#define LAN_SAA9730_PACKET_SIZE 2048
|
||||
|
||||
/*
|
||||
* Number of TX buffers = number of RX buffers = 2, which is fixed according
|
||||
* to HW requirements.
|
||||
*/
|
||||
#define LAN_SAA9730_BUFFERS 2
|
||||
|
||||
/* Number of RX packets per RX buffer. */
|
||||
#define LAN_SAA9730_RCV_Q_SIZE 15
|
||||
|
||||
/* Number of TX packets per TX buffer. */
|
||||
#define LAN_SAA9730_TXM_Q_SIZE 15
|
||||
|
||||
/*
|
||||
* We get an interrupt for each LAN_SAA9730_DEFAULT_RCV_Q_INT_THRESHOLD
|
||||
* packets received.
|
||||
* If however we receive less than LAN_SAA9730_DEFAULT_RCV_Q_INT_THRESHOLD
|
||||
* packets, the hardware can timeout after a certain time and still tell
|
||||
* us packets have arrived.
|
||||
* The timeout value in unit of 32 PCI clocks (33Mhz).
|
||||
* The value 200 approximates 0.0002 seconds.
|
||||
*/
|
||||
#define LAN_SAA9730_RCV_Q_INT_THRESHOLD 1
|
||||
#define LAN_SAA9730_DEFAULT_TIME_OUT_CNT 10
|
||||
|
||||
#define RXSF_NDIS 0
|
||||
#define RXSF_READY 2
|
||||
#define RXSF_HWDONE 3
|
||||
|
||||
#define TXSF_EMPTY 0
|
||||
#define TXSF_READY 2
|
||||
#define TXSF_HWDONE 3
|
||||
|
||||
#define LANEND_LITTLE 0
|
||||
#define LANEND_BIG_2143 1
|
||||
#define LANEND_BIG_4321 2
|
||||
|
||||
#define LANMB_ANY 0
|
||||
#define LANMB_8 1
|
||||
#define LANMB_32 2
|
||||
#define LANMB_64 3
|
||||
|
||||
#define MACCM_AUTOMATIC 0
|
||||
#define MACCM_10MB 1
|
||||
#define MACCM_MII 2
|
||||
|
||||
/*
|
||||
* PHY definitions for Basic registers of QS6612 (used on MIPS ATLAS board)
|
||||
*/
|
||||
#define PHY_CONTROL 0x0
|
||||
#define PHY_STATUS 0x1
|
||||
#define PHY_STATUS_LINK_UP 0x4
|
||||
#define PHY_CONTROL_RESET 0x8000
|
||||
#define PHY_CONTROL_AUTO_NEG 0x1000
|
||||
#define PHY_CONTROL_RESTART_AUTO_NEG 0x0200
|
||||
#define PHY_ADDRESS 0x0
|
||||
|
||||
/* PK_COUNT register. */
|
||||
#define PK_COUNT_TX_A_SHF 24
|
||||
#define PK_COUNT_TX_A_MSK (0xff << PK_COUNT_TX_A_SHF)
|
||||
#define PK_COUNT_TX_B_SHF 16
|
||||
#define PK_COUNT_TX_B_MSK (0xff << PK_COUNT_TX_B_SHF)
|
||||
#define PK_COUNT_RX_A_SHF 8
|
||||
#define PK_COUNT_RX_A_MSK (0xff << PK_COUNT_RX_A_SHF)
|
||||
#define PK_COUNT_RX_B_SHF 0
|
||||
#define PK_COUNT_RX_B_MSK (0xff << PK_COUNT_RX_B_SHF)
|
||||
|
||||
/* OK2USE register. */
|
||||
#define OK2USE_TX_A 0x8
|
||||
#define OK2USE_TX_B 0x4
|
||||
#define OK2USE_RX_A 0x2
|
||||
#define OK2USE_RX_B 0x1
|
||||
|
||||
/* LAN DMA CONTROL register. */
|
||||
#define DMA_CTL_BLK_INT 0x80000000
|
||||
#define DMA_CTL_MAX_XFER_SHF 18
|
||||
#define DMA_CTL_MAX_XFER_MSK (0x3 << LAN_DMA_CTL_MAX_XFER_SHF)
|
||||
#define DMA_CTL_ENDIAN_SHF 16
|
||||
#define DMA_CTL_ENDIAN_MSK (0x3 << LAN_DMA_CTL_ENDIAN_SHF)
|
||||
#define DMA_CTL_RX_INT_COUNT_SHF 8
|
||||
#define DMA_CTL_RX_INT_COUNT_MSK (0xff << LAN_DMA_CTL_RX_INT_COUNT_SHF)
|
||||
#define DMA_CTL_EN_TX_DMA 0x00000080
|
||||
#define DMA_CTL_EN_RX_DMA 0x00000040
|
||||
#define DMA_CTL_RX_INT_BUFFUL_EN 0x00000020
|
||||
#define DMA_CTL_RX_INT_TO_EN 0x00000010
|
||||
#define DMA_CTL_RX_INT_EN 0x00000008
|
||||
#define DMA_CTL_TX_INT_EN 0x00000004
|
||||
#define DMA_CTL_MAC_TX_INT_EN 0x00000002
|
||||
#define DMA_CTL_MAC_RX_INT_EN 0x00000001
|
||||
|
||||
/* DMA STATUS register. */
|
||||
#define DMA_STATUS_BAD_ADDR_SHF 16
|
||||
#define DMA_STATUS_BAD_ADDR_MSK (0xf << DMA_STATUS_BAD_ADDR_SHF)
|
||||
#define DMA_STATUS_RX_PKTS_RECEIVED_SHF 8
|
||||
#define DMA_STATUS_RX_PKTS_RECEIVED_MSK (0xff << DMA_STATUS_RX_PKTS_RECEIVED_SHF)
|
||||
#define DMA_STATUS_TX_EN_SYNC 0x00000080
|
||||
#define DMA_STATUS_RX_BUF_A_FUL 0x00000040
|
||||
#define DMA_STATUS_RX_BUF_B_FUL 0x00000020
|
||||
#define DMA_STATUS_RX_TO_INT 0x00000010
|
||||
#define DMA_STATUS_RX_INT 0x00000008
|
||||
#define DMA_STATUS_TX_INT 0x00000004
|
||||
#define DMA_STATUS_MAC_TX_INT 0x00000002
|
||||
#define DMA_STATUS_MAC_RX_INT 0x00000001
|
||||
|
||||
/* DMA TEST/PANIC SWITHES register. */
|
||||
#define DMA_TEST_LOOPBACK 0x01000000
|
||||
#define DMA_TEST_SW_RESET 0x00000001
|
||||
|
||||
/* MAC CONTROL register. */
|
||||
#define MAC_CONTROL_EN_MISS_ROLL 0x00002000
|
||||
#define MAC_CONTROL_MISS_ROLL 0x00000400
|
||||
#define MAC_CONTROL_LOOP10 0x00000080
|
||||
#define MAC_CONTROL_CONN_SHF 5
|
||||
#define MAC_CONTROL_CONN_MSK (0x3 << MAC_CONTROL_CONN_SHF)
|
||||
#define MAC_CONTROL_MAC_LOOP 0x00000010
|
||||
#define MAC_CONTROL_FULL_DUP 0x00000008
|
||||
#define MAC_CONTROL_RESET 0x00000004
|
||||
#define MAC_CONTROL_HALT_IMM 0x00000002
|
||||
#define MAC_CONTROL_HALT_REQ 0x00000001
|
||||
|
||||
/* CAM CONTROL register. */
|
||||
#define CAM_CONTROL_COMP_EN 0x00000010
|
||||
#define CAM_CONTROL_NEG_CAM 0x00000008
|
||||
#define CAM_CONTROL_BROAD_ACC 0x00000004
|
||||
#define CAM_CONTROL_GROUP_ACC 0x00000002
|
||||
#define CAM_CONTROL_STATION_ACC 0x00000001
|
||||
|
||||
/* TRANSMIT CONTROL register. */
|
||||
#define TX_CTL_EN_COMP 0x00004000
|
||||
#define TX_CTL_EN_TX_PAR 0x00002000
|
||||
#define TX_CTL_EN_LATE_COLL 0x00001000
|
||||
#define TX_CTL_EN_EX_COLL 0x00000800
|
||||
#define TX_CTL_EN_L_CARR 0x00000400
|
||||
#define TX_CTL_EN_EX_DEFER 0x00000200
|
||||
#define TX_CTL_EN_UNDER 0x00000100
|
||||
#define TX_CTL_MII10 0x00000080
|
||||
#define TX_CTL_SD_PAUSE 0x00000040
|
||||
#define TX_CTL_NO_EX_DEF0 0x00000020
|
||||
#define TX_CTL_F_BACK 0x00000010
|
||||
#define TX_CTL_NO_CRC 0x00000008
|
||||
#define TX_CTL_NO_PAD 0x00000004
|
||||
#define TX_CTL_TX_HALT 0x00000002
|
||||
#define TX_CTL_TX_EN 0x00000001
|
||||
|
||||
/* TRANSMIT STATUS register. */
|
||||
#define TX_STATUS_SQ_ERR 0x00010000
|
||||
#define TX_STATUS_TX_HALTED 0x00008000
|
||||
#define TX_STATUS_COMP 0x00004000
|
||||
#define TX_STATUS_TX_PAR 0x00002000
|
||||
#define TX_STATUS_LATE_COLL 0x00001000
|
||||
#define TX_STATUS_TX10_STAT 0x00000800
|
||||
#define TX_STATUS_L_CARR 0x00000400
|
||||
#define TX_STATUS_EX_DEFER 0x00000200
|
||||
#define TX_STATUS_UNDER 0x00000100
|
||||
#define TX_STATUS_IN_TX 0x00000080
|
||||
#define TX_STATUS_PAUSED 0x00000040
|
||||
#define TX_STATUS_TX_DEFERRED 0x00000020
|
||||
#define TX_STATUS_EX_COLL 0x00000010
|
||||
#define TX_STATUS_TX_COLL_SHF 0
|
||||
#define TX_STATUS_TX_COLL_MSK (0xf << TX_STATUS_TX_COLL_SHF)
|
||||
|
||||
/* RECEIVE CONTROL register. */
|
||||
#define RX_CTL_EN_GOOD 0x00004000
|
||||
#define RX_CTL_EN_RX_PAR 0x00002000
|
||||
#define RX_CTL_EN_LONG_ERR 0x00000800
|
||||
#define RX_CTL_EN_OVER 0x00000400
|
||||
#define RX_CTL_EN_CRC_ERR 0x00000200
|
||||
#define RX_CTL_EN_ALIGN 0x00000100
|
||||
#define RX_CTL_IGNORE_CRC 0x00000040
|
||||
#define RX_CTL_PASS_CTL 0x00000020
|
||||
#define RX_CTL_STRIP_CRC 0x00000010
|
||||
#define RX_CTL_SHORT_EN 0x00000008
|
||||
#define RX_CTL_LONG_EN 0x00000004
|
||||
#define RX_CTL_RX_HALT 0x00000002
|
||||
#define RX_CTL_RX_EN 0x00000001
|
||||
|
||||
/* RECEIVE STATUS register. */
|
||||
#define RX_STATUS_RX_HALTED 0x00008000
|
||||
#define RX_STATUS_GOOD 0x00004000
|
||||
#define RX_STATUS_RX_PAR 0x00002000
|
||||
#define RX_STATUS_LONG_ERR 0x00000800
|
||||
#define RX_STATUS_OVERFLOW 0x00000400
|
||||
#define RX_STATUS_CRC_ERR 0x00000200
|
||||
#define RX_STATUS_ALIGN_ERR 0x00000100
|
||||
#define RX_STATUS_RX10_STAT 0x00000080
|
||||
#define RX_STATUS_INT_RX 0x00000040
|
||||
#define RX_STATUS_CTL_RECD 0x00000020
|
||||
|
||||
/* MD_CA register. */
|
||||
#define MD_CA_PRE_SUP 0x00001000
|
||||
#define MD_CA_BUSY 0x00000800
|
||||
#define MD_CA_WR 0x00000400
|
||||
#define MD_CA_PHY_SHF 5
|
||||
#define MD_CA_PHY_MSK (0x1f << MD_CA_PHY_SHF)
|
||||
#define MD_CA_ADDR_SHF 0
|
||||
#define MD_CA_ADDR_MSK (0x1f << MD_CA_ADDR_SHF)
|
||||
|
||||
/* Tx Status/Control. */
|
||||
#define TX_STAT_CTL_OWNER_SHF 30
|
||||
#define TX_STAT_CTL_OWNER_MSK (0x3 << TX_STAT_CTL_OWNER_SHF)
|
||||
#define TX_STAT_CTL_FRAME_SHF 27
|
||||
#define TX_STAT_CTL_FRAME_MSK (0x7 << TX_STAT_CTL_FRAME_SHF)
|
||||
#define TX_STAT_CTL_STATUS_SHF 11
|
||||
#define TX_STAT_CTL_STATUS_MSK (0x1ffff << TX_STAT_CTL_STATUS_SHF)
|
||||
#define TX_STAT_CTL_LENGTH_SHF 0
|
||||
#define TX_STAT_CTL_LENGTH_MSK (0x7ff << TX_STAT_CTL_LENGTH_SHF)
|
||||
|
||||
#define TX_STAT_CTL_ERROR_MSK ((TX_STATUS_SQ_ERR | \
|
||||
TX_STATUS_TX_HALTED | \
|
||||
TX_STATUS_TX_PAR | \
|
||||
TX_STATUS_LATE_COLL | \
|
||||
TX_STATUS_L_CARR | \
|
||||
TX_STATUS_EX_DEFER | \
|
||||
TX_STATUS_UNDER | \
|
||||
TX_STATUS_PAUSED | \
|
||||
TX_STATUS_TX_DEFERRED | \
|
||||
TX_STATUS_EX_COLL | \
|
||||
TX_STATUS_TX_COLL_MSK) \
|
||||
<< TX_STAT_CTL_STATUS_SHF)
|
||||
#define TX_STAT_CTL_INT_AFTER_TX 0x4
|
||||
|
||||
/* Rx Status/Control. */
|
||||
#define RX_STAT_CTL_OWNER_SHF 30
|
||||
#define RX_STAT_CTL_OWNER_MSK (0x3 << RX_STAT_CTL_OWNER_SHF)
|
||||
#define RX_STAT_CTL_STATUS_SHF 11
|
||||
#define RX_STAT_CTL_STATUS_MSK (0xffff << RX_STAT_CTL_STATUS_SHF)
|
||||
#define RX_STAT_CTL_LENGTH_SHF 0
|
||||
#define RX_STAT_CTL_LENGTH_MSK (0x7ff << RX_STAT_CTL_LENGTH_SHF)
|
||||
|
||||
|
||||
|
||||
/* The SAA9730 (LAN) controller register map, as seen via the PCI-bus. */
|
||||
#define SAA9730_LAN_REGS_ADDR 0x20400
|
||||
#define SAA9730_LAN_REGS_SIZE 0x00400
|
||||
|
||||
struct lan_saa9730_regmap {
|
||||
volatile unsigned int TxBuffA; /* 0x20400 */
|
||||
volatile unsigned int TxBuffB; /* 0x20404 */
|
||||
volatile unsigned int RxBuffA; /* 0x20408 */
|
||||
volatile unsigned int RxBuffB; /* 0x2040c */
|
||||
volatile unsigned int PacketCount; /* 0x20410 */
|
||||
volatile unsigned int Ok2Use; /* 0x20414 */
|
||||
volatile unsigned int LanDmaCtl; /* 0x20418 */
|
||||
volatile unsigned int Timeout; /* 0x2041c */
|
||||
volatile unsigned int DmaStatus; /* 0x20420 */
|
||||
volatile unsigned int DmaTest; /* 0x20424 */
|
||||
volatile unsigned char filler20428[0x20430 - 0x20428];
|
||||
volatile unsigned int PauseCount; /* 0x20430 */
|
||||
volatile unsigned int RemotePauseCount; /* 0x20434 */
|
||||
volatile unsigned char filler20438[0x20440 - 0x20438];
|
||||
volatile unsigned int MacCtl; /* 0x20440 */
|
||||
volatile unsigned int CamCtl; /* 0x20444 */
|
||||
volatile unsigned int TxCtl; /* 0x20448 */
|
||||
volatile unsigned int TxStatus; /* 0x2044c */
|
||||
volatile unsigned int RxCtl; /* 0x20450 */
|
||||
volatile unsigned int RxStatus; /* 0x20454 */
|
||||
volatile unsigned int StationMgmtData; /* 0x20458 */
|
||||
volatile unsigned int StationMgmtCtl; /* 0x2045c */
|
||||
volatile unsigned int CamAddress; /* 0x20460 */
|
||||
volatile unsigned int CamData; /* 0x20464 */
|
||||
volatile unsigned int CamEnable; /* 0x20468 */
|
||||
volatile unsigned char filler2046c[0x20500 - 0x2046c];
|
||||
volatile unsigned int DebugPCIMasterAddr; /* 0x20500 */
|
||||
volatile unsigned int DebugLanTxStateMachine; /* 0x20504 */
|
||||
volatile unsigned int DebugLanRxStateMachine; /* 0x20508 */
|
||||
volatile unsigned int DebugLanTxFifoPointers; /* 0x2050c */
|
||||
volatile unsigned int DebugLanRxFifoPointers; /* 0x20510 */
|
||||
volatile unsigned int DebugLanCtlStateMachine; /* 0x20514 */
|
||||
};
|
||||
typedef volatile struct lan_saa9730_regmap t_lan_saa9730_regmap;
|
||||
|
||||
|
||||
/* EVM interrupt control registers. */
|
||||
#define EVM_LAN_INT 0x00010000
|
||||
#define EVM_MASTER_EN 0x00000001
|
||||
|
||||
/* The SAA9730 (EVM) controller register map, as seen via the PCI-bus. */
|
||||
#define SAA9730_EVM_REGS_ADDR 0x02000
|
||||
#define SAA9730_EVM_REGS_SIZE 0x00400
|
||||
|
||||
struct evm_saa9730_regmap {
|
||||
volatile unsigned int InterruptStatus1; /* 0x2000 */
|
||||
volatile unsigned int InterruptEnable1; /* 0x2004 */
|
||||
volatile unsigned int InterruptMonitor1; /* 0x2008 */
|
||||
volatile unsigned int Counter; /* 0x200c */
|
||||
volatile unsigned int CounterThreshold; /* 0x2010 */
|
||||
volatile unsigned int CounterControl; /* 0x2014 */
|
||||
volatile unsigned int GpioControl1; /* 0x2018 */
|
||||
volatile unsigned int InterruptStatus2; /* 0x201c */
|
||||
volatile unsigned int InterruptEnable2; /* 0x2020 */
|
||||
volatile unsigned int InterruptMonitor2; /* 0x2024 */
|
||||
volatile unsigned int GpioControl2; /* 0x2028 */
|
||||
volatile unsigned int InterruptBlock1; /* 0x202c */
|
||||
volatile unsigned int InterruptBlock2; /* 0x2030 */
|
||||
};
|
||||
typedef volatile struct evm_saa9730_regmap t_evm_saa9730_regmap;
|
||||
|
||||
|
||||
struct lan_saa9730_private {
|
||||
/*
|
||||
* Rx/Tx packet buffers.
|
||||
* The Rx and Tx packets must be PACKET_SIZE aligned.
|
||||
*/
|
||||
void *buffer_start;
|
||||
unsigned int buffer_size;
|
||||
|
||||
/*
|
||||
* DMA address of beginning of this object, returned
|
||||
* by pci_alloc_consistent().
|
||||
*/
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
/* Pointer to the associated pci device structure */
|
||||
struct pci_dev *pci_dev;
|
||||
|
||||
/* Pointer for the SAA9730 LAN controller register set. */
|
||||
t_lan_saa9730_regmap *lan_saa9730_regs;
|
||||
|
||||
/* Pointer to the SAA9730 EVM register. */
|
||||
t_evm_saa9730_regmap *evm_saa9730_regs;
|
||||
|
||||
/* Rcv buffer Index. */
|
||||
unsigned char NextRcvPacketIndex;
|
||||
/* Next buffer index. */
|
||||
unsigned char NextRcvBufferIndex;
|
||||
|
||||
/* Index of next packet to use in that buffer. */
|
||||
unsigned char NextTxmPacketIndex;
|
||||
/* Next buffer index. */
|
||||
unsigned char NextTxmBufferIndex;
|
||||
|
||||
/* Index of first pending packet ready to send. */
|
||||
unsigned char PendingTxmPacketIndex;
|
||||
/* Pending buffer index. */
|
||||
unsigned char PendingTxmBufferIndex;
|
||||
|
||||
unsigned char DmaRcvPackets;
|
||||
unsigned char DmaTxmPackets;
|
||||
|
||||
void *TxmBuffer[LAN_SAA9730_BUFFERS][LAN_SAA9730_TXM_Q_SIZE];
|
||||
void *RcvBuffer[LAN_SAA9730_BUFFERS][LAN_SAA9730_RCV_Q_SIZE];
|
||||
unsigned int TxBufferFree[LAN_SAA9730_BUFFERS];
|
||||
|
||||
unsigned char PhysicalAddress[LAN_SAA9730_CAM_ENTRIES][6];
|
||||
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#endif /* _SAA9730_H */
|
@ -3500,11 +3500,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
|
||||
|
||||
dev->stats.rx_bytes += length;
|
||||
/* Send the packet up the stack */
|
||||
#ifdef CONFIG_UGETH_NAPI
|
||||
netif_receive_skb(skb);
|
||||
#else
|
||||
netif_rx(skb);
|
||||
#endif /* CONFIG_UGETH_NAPI */
|
||||
}
|
||||
|
||||
ugeth->dev->last_rx = jiffies;
|
||||
@ -3580,7 +3576,6 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UGETH_NAPI
|
||||
static int ucc_geth_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
|
||||
@ -3607,7 +3602,6 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
return howmany;
|
||||
}
|
||||
#endif /* CONFIG_UGETH_NAPI */
|
||||
|
||||
static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
|
||||
{
|
||||
@ -3617,9 +3611,6 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
|
||||
struct ucc_geth_info *ug_info;
|
||||
register u32 ucce;
|
||||
register u32 uccm;
|
||||
#ifndef CONFIG_UGETH_NAPI
|
||||
register u32 rx_mask;
|
||||
#endif
|
||||
register u32 tx_mask;
|
||||
u8 i;
|
||||
|
||||
@ -3636,21 +3627,11 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
|
||||
|
||||
/* check for receive events that require processing */
|
||||
if (ucce & UCCE_RX_EVENTS) {
|
||||
#ifdef CONFIG_UGETH_NAPI
|
||||
if (netif_rx_schedule_prep(dev, &ugeth->napi)) {
|
||||
uccm &= ~UCCE_RX_EVENTS;
|
||||
out_be32(uccf->p_uccm, uccm);
|
||||
__netif_rx_schedule(dev, &ugeth->napi);
|
||||
}
|
||||
#else
|
||||
rx_mask = UCCE_RXBF_SINGLE_MASK;
|
||||
for (i = 0; i < ug_info->numQueuesRx; i++) {
|
||||
if (ucce & rx_mask)
|
||||
ucc_geth_rx(ugeth, i, (int)ugeth->ug_info->bdRingLenRx[i]);
|
||||
ucce &= ~rx_mask;
|
||||
rx_mask <<= 1;
|
||||
}
|
||||
#endif /* CONFIG_UGETH_NAPI */
|
||||
}
|
||||
|
||||
/* Tx event processing */
|
||||
@ -3720,9 +3701,8 @@ static int ucc_geth_open(struct net_device *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UGETH_NAPI
|
||||
napi_enable(&ugeth->napi);
|
||||
#endif
|
||||
|
||||
err = ucc_geth_startup(ugeth);
|
||||
if (err) {
|
||||
if (netif_msg_ifup(ugeth))
|
||||
@ -3783,9 +3763,8 @@ static int ucc_geth_open(struct net_device *dev)
|
||||
return err;
|
||||
|
||||
out_err:
|
||||
#ifdef CONFIG_UGETH_NAPI
|
||||
napi_disable(&ugeth->napi);
|
||||
#endif
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -3796,9 +3775,7 @@ static int ucc_geth_close(struct net_device *dev)
|
||||
|
||||
ugeth_vdbg("%s: IN", __FUNCTION__);
|
||||
|
||||
#ifdef CONFIG_UGETH_NAPI
|
||||
napi_disable(&ugeth->napi);
|
||||
#endif
|
||||
|
||||
ucc_geth_stop(ugeth);
|
||||
|
||||
@ -4050,9 +4027,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
|
||||
dev->hard_start_xmit = ucc_geth_start_xmit;
|
||||
dev->tx_timeout = ucc_geth_timeout;
|
||||
dev->watchdog_timeo = TX_TIMEOUT;
|
||||
#ifdef CONFIG_UGETH_NAPI
|
||||
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
|
||||
#endif /* CONFIG_UGETH_NAPI */
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
dev->poll_controller = ucc_netpoll;
|
||||
#endif
|
||||
|
@ -73,12 +73,7 @@ static const int multicast_filter_limit = 32;
|
||||
There are no ill effects from too-large receive rings. */
|
||||
#define TX_RING_SIZE 16
|
||||
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
#define RX_RING_SIZE 64
|
||||
#else
|
||||
#define RX_RING_SIZE 16
|
||||
#endif
|
||||
|
||||
|
||||
/* Operational parameters that usually are not changed. */
|
||||
|
||||
@ -583,7 +578,6 @@ static void rhine_poll(struct net_device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
static int rhine_napipoll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
|
||||
@ -604,7 +598,6 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
|
||||
}
|
||||
return work_done;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
|
||||
{
|
||||
@ -784,9 +777,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
dev->poll_controller = rhine_poll;
|
||||
#endif
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
|
||||
#endif
|
||||
|
||||
if (rp->quirks & rqRhineI)
|
||||
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
|
||||
|
||||
@ -1056,9 +1048,7 @@ static void init_registers(struct net_device *dev)
|
||||
|
||||
rhine_set_rx_mode(dev);
|
||||
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
napi_enable(&rp->napi);
|
||||
#endif
|
||||
|
||||
/* Enable interrupts by setting the interrupt mask. */
|
||||
iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
|
||||
@ -1193,9 +1183,7 @@ static void rhine_tx_timeout(struct net_device *dev)
|
||||
/* protect against concurrent rx interrupts */
|
||||
disable_irq(rp->pdev->irq);
|
||||
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
napi_disable(&rp->napi);
|
||||
#endif
|
||||
|
||||
spin_lock(&rp->lock);
|
||||
|
||||
@ -1319,16 +1307,12 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
|
||||
|
||||
if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
|
||||
IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
iowrite16(IntrTxAborted |
|
||||
IntrTxDone | IntrTxError | IntrTxUnderrun |
|
||||
IntrPCIErr | IntrStatsMax | IntrLinkChange,
|
||||
ioaddr + IntrEnable);
|
||||
|
||||
netif_rx_schedule(dev, &rp->napi);
|
||||
#else
|
||||
rhine_rx(dev, RX_RING_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
|
||||
@ -1520,11 +1504,7 @@ static int rhine_rx(struct net_device *dev, int limit)
|
||||
PCI_DMA_FROMDEVICE);
|
||||
}
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
netif_receive_skb(skb);
|
||||
#else
|
||||
netif_rx(skb);
|
||||
#endif
|
||||
dev->last_rx = jiffies;
|
||||
rp->stats.rx_bytes += pkt_len;
|
||||
rp->stats.rx_packets++;
|
||||
@ -1836,9 +1816,7 @@ static int rhine_close(struct net_device *dev)
|
||||
spin_lock_irq(&rp->lock);
|
||||
|
||||
netif_stop_queue(dev);
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
napi_disable(&rp->napi);
|
||||
#endif
|
||||
|
||||
if (debug > 1)
|
||||
printk(KERN_DEBUG "%s: Shutting down ethercard, "
|
||||
@ -1937,9 +1915,8 @@ static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_VIA_RHINE_NAPI
|
||||
napi_disable(&rp->napi);
|
||||
#endif
|
||||
|
||||
netif_device_detach(dev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
|
@ -1102,61 +1102,41 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
|
||||
|
||||
static int velocity_init_rings(struct velocity_info *vptr)
|
||||
{
|
||||
int i;
|
||||
unsigned int psize;
|
||||
unsigned int tsize;
|
||||
struct velocity_opt *opt = &vptr->options;
|
||||
const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
|
||||
const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
|
||||
struct pci_dev *pdev = vptr->pdev;
|
||||
dma_addr_t pool_dma;
|
||||
u8 *pool;
|
||||
|
||||
/*
|
||||
* Allocate all RD/TD rings a single pool
|
||||
*/
|
||||
|
||||
psize = vptr->options.numrx * sizeof(struct rx_desc) +
|
||||
vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
|
||||
void *pool;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Allocate all RD/TD rings a single pool.
|
||||
*
|
||||
* pci_alloc_consistent() fulfills the requirement for 64 bytes
|
||||
* alignment
|
||||
*/
|
||||
pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma);
|
||||
|
||||
if (pool == NULL) {
|
||||
printk(KERN_ERR "%s : DMA memory allocation failed.\n",
|
||||
vptr->dev->name);
|
||||
pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq +
|
||||
rx_ring_size, &pool_dma);
|
||||
if (!pool) {
|
||||
dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
|
||||
vptr->dev->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(pool, 0, psize);
|
||||
|
||||
vptr->rd_ring = (struct rx_desc *) pool;
|
||||
|
||||
vptr->rd_ring = pool;
|
||||
vptr->rd_pool_dma = pool_dma;
|
||||
|
||||
tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
|
||||
vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize,
|
||||
&vptr->tx_bufs_dma);
|
||||
pool += rx_ring_size;
|
||||
pool_dma += rx_ring_size;
|
||||
|
||||
if (vptr->tx_bufs == NULL) {
|
||||
printk(KERN_ERR "%s: DMA memory allocation failed.\n",
|
||||
vptr->dev->name);
|
||||
pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq);
|
||||
|
||||
i = vptr->options.numrx * sizeof(struct rx_desc);
|
||||
pool += i;
|
||||
pool_dma += i;
|
||||
for (i = 0; i < vptr->num_txq; i++) {
|
||||
int offset = vptr->options.numtx * sizeof(struct tx_desc);
|
||||
|
||||
vptr->td_rings[i] = pool;
|
||||
vptr->td_pool_dma[i] = pool_dma;
|
||||
vptr->td_rings[i] = (struct tx_desc *) pool;
|
||||
pool += offset;
|
||||
pool_dma += offset;
|
||||
pool += tx_ring_size;
|
||||
pool_dma += tx_ring_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1169,19 +1149,13 @@ static int velocity_init_rings(struct velocity_info *vptr)
|
||||
|
||||
static void velocity_free_rings(struct velocity_info *vptr)
|
||||
{
|
||||
int size;
|
||||
|
||||
size = vptr->options.numrx * sizeof(struct rx_desc) +
|
||||
vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
|
||||
const int size = vptr->options.numrx * sizeof(struct rx_desc) +
|
||||
vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
|
||||
|
||||
pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
|
||||
|
||||
size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
|
||||
|
||||
pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
|
||||
}
|
||||
|
||||
static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
|
||||
static void velocity_give_many_rx_descs(struct velocity_info *vptr)
|
||||
{
|
||||
struct mac_regs __iomem *regs = vptr->mac_regs;
|
||||
int avail, dirty, unusable;
|
||||
@ -1208,7 +1182,7 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
|
||||
|
||||
static int velocity_rx_refill(struct velocity_info *vptr)
|
||||
{
|
||||
int dirty = vptr->rd_dirty, done = 0, ret = 0;
|
||||
int dirty = vptr->rd_dirty, done = 0;
|
||||
|
||||
do {
|
||||
struct rx_desc *rd = vptr->rd_ring + dirty;
|
||||
@ -1218,8 +1192,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
|
||||
break;
|
||||
|
||||
if (!vptr->rd_info[dirty].skb) {
|
||||
ret = velocity_alloc_rx_buf(vptr, dirty);
|
||||
if (ret < 0)
|
||||
if (velocity_alloc_rx_buf(vptr, dirty) < 0)
|
||||
break;
|
||||
}
|
||||
done++;
|
||||
@ -1229,10 +1202,14 @@ static int velocity_rx_refill(struct velocity_info *vptr)
|
||||
if (done) {
|
||||
vptr->rd_dirty = dirty;
|
||||
vptr->rd_filled += done;
|
||||
velocity_give_many_rx_descs(vptr);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return done;
|
||||
}
|
||||
|
||||
static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
|
||||
{
|
||||
vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1245,25 +1222,24 @@ static int velocity_rx_refill(struct velocity_info *vptr)
|
||||
|
||||
static int velocity_init_rd_ring(struct velocity_info *vptr)
|
||||
{
|
||||
int ret;
|
||||
int mtu = vptr->dev->mtu;
|
||||
|
||||
vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
vptr->rd_info = kcalloc(vptr->options.numrx,
|
||||
sizeof(struct velocity_rd_info), GFP_KERNEL);
|
||||
if (!vptr->rd_info)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
|
||||
vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;
|
||||
|
||||
ret = velocity_rx_refill(vptr);
|
||||
if (ret < 0) {
|
||||
if (velocity_rx_refill(vptr) != vptr->options.numrx) {
|
||||
VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
|
||||
"%s: failed to allocate RX buffer.\n", vptr->dev->name);
|
||||
velocity_free_rd_ring(vptr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1313,10 +1289,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
|
||||
|
||||
static int velocity_init_td_ring(struct velocity_info *vptr)
|
||||
{
|
||||
int i, j;
|
||||
dma_addr_t curr;
|
||||
struct tx_desc *td;
|
||||
struct velocity_td_info *td_info;
|
||||
unsigned int j;
|
||||
|
||||
/* Init the TD ring entries */
|
||||
for (j = 0; j < vptr->num_txq; j++) {
|
||||
@ -1331,14 +1305,6 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
|
||||
td = &(vptr->td_rings[j][i]);
|
||||
td_info = &(vptr->td_infos[j][i]);
|
||||
td_info->buf = vptr->tx_bufs +
|
||||
(j * vptr->options.numtx + i) * PKT_BUF_SZ;
|
||||
td_info->buf_dma = vptr->tx_bufs_dma +
|
||||
(j * vptr->options.numtx + i) * PKT_BUF_SZ;
|
||||
}
|
||||
vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
|
||||
}
|
||||
return 0;
|
||||
@ -1448,10 +1414,8 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
|
||||
|
||||
vptr->rd_curr = rd_curr;
|
||||
|
||||
if (works > 0 && velocity_rx_refill(vptr) < 0) {
|
||||
VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
|
||||
"%s: rx buf allocation failure\n", vptr->dev->name);
|
||||
}
|
||||
if ((works > 0) && (velocity_rx_refill(vptr) > 0))
|
||||
velocity_give_many_rx_descs(vptr);
|
||||
|
||||
VAR_USED(stats);
|
||||
return works;
|
||||
@ -1867,7 +1831,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
|
||||
/*
|
||||
* Don't unmap the pre-allocated tx_bufs
|
||||
*/
|
||||
if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) {
|
||||
if (tdinfo->skb_dma) {
|
||||
|
||||
for (i = 0; i < tdinfo->nskb_dma; i++) {
|
||||
#ifdef VELOCITY_ZERO_COPY_SUPPORT
|
||||
@ -1898,6 +1862,8 @@ static int velocity_open(struct net_device *dev)
|
||||
struct velocity_info *vptr = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
velocity_set_rxbufsize(vptr, dev->mtu);
|
||||
|
||||
ret = velocity_init_rings(vptr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@ -1913,6 +1879,8 @@ static int velocity_open(struct net_device *dev)
|
||||
/* Ensure chip is running */
|
||||
pci_set_power_state(vptr->pdev, PCI_D0);
|
||||
|
||||
velocity_give_many_rx_descs(vptr);
|
||||
|
||||
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
|
||||
|
||||
ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
|
||||
@ -1977,6 +1945,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
|
||||
velocity_set_rxbufsize(vptr, new_mtu);
|
||||
|
||||
ret = velocity_init_rd_ring(vptr);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
@ -2063,9 +2033,19 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
struct tx_desc *td_ptr;
|
||||
struct velocity_td_info *tdinfo;
|
||||
unsigned long flags;
|
||||
int index;
|
||||
int pktlen = skb->len;
|
||||
__le16 len = cpu_to_le16(pktlen);
|
||||
__le16 len;
|
||||
int index;
|
||||
|
||||
|
||||
|
||||
if (skb->len < ETH_ZLEN) {
|
||||
if (skb_padto(skb, ETH_ZLEN))
|
||||
goto out;
|
||||
pktlen = ETH_ZLEN;
|
||||
}
|
||||
|
||||
len = cpu_to_le16(pktlen);
|
||||
|
||||
#ifdef VELOCITY_ZERO_COPY_SUPPORT
|
||||
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
|
||||
@ -2083,23 +2063,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
td_ptr->tdesc1.TCR = TCR0_TIC;
|
||||
td_ptr->td_buf[0].size &= ~TD_QUEUE;
|
||||
|
||||
/*
|
||||
* Pad short frames.
|
||||
*/
|
||||
if (pktlen < ETH_ZLEN) {
|
||||
/* Cannot occur until ZC support */
|
||||
pktlen = ETH_ZLEN;
|
||||
len = cpu_to_le16(ETH_ZLEN);
|
||||
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
|
||||
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
|
||||
tdinfo->skb = skb;
|
||||
tdinfo->skb_dma[0] = tdinfo->buf_dma;
|
||||
td_ptr->tdesc0.len = len;
|
||||
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
|
||||
td_ptr->td_buf[0].pa_high = 0;
|
||||
td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
|
||||
tdinfo->nskb_dma = 1;
|
||||
} else
|
||||
#ifdef VELOCITY_ZERO_COPY_SUPPORT
|
||||
if (skb_shinfo(skb)->nr_frags > 0) {
|
||||
int nfrags = skb_shinfo(skb)->nr_frags;
|
||||
@ -2191,7 +2154,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
dev->trans_start = jiffies;
|
||||
spin_unlock_irqrestore(&vptr->lock, flags);
|
||||
return 0;
|
||||
out:
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -236,10 +236,8 @@ struct velocity_rd_info {
|
||||
|
||||
struct velocity_td_info {
|
||||
struct sk_buff *skb;
|
||||
u8 *buf;
|
||||
int nskb_dma;
|
||||
dma_addr_t skb_dma[7];
|
||||
dma_addr_t buf_dma;
|
||||
};
|
||||
|
||||
enum velocity_owner {
|
||||
@ -1506,9 +1504,6 @@ struct velocity_info {
|
||||
dma_addr_t rd_pool_dma;
|
||||
dma_addr_t td_pool_dma[TX_QUEUE_NO];
|
||||
|
||||
dma_addr_t tx_bufs_dma;
|
||||
u8 *tx_bufs;
|
||||
|
||||
struct vlan_group *vlgrp;
|
||||
u8 ip_addr[4];
|
||||
enum chip_type chip_id;
|
||||
|
@ -550,7 +550,8 @@ static struct virtio_device_id id_table[] = {
|
||||
};
|
||||
|
||||
static unsigned int features[] = {
|
||||
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
|
||||
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
|
||||
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
|
||||
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
|
||||
VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY,
|
||||
};
|
||||
|
@ -828,6 +828,19 @@ static inline void netif_napi_add(struct net_device *dev,
|
||||
set_bit(NAPI_STATE_SCHED, &napi->state);
|
||||
}
|
||||
|
||||
/**
|
||||
* netif_napi_del - remove a napi context
|
||||
* @napi: napi context
|
||||
*
|
||||
* netif_napi_del() removes a napi context from the network device napi list
|
||||
*/
|
||||
static inline void netif_napi_del(struct napi_struct *napi)
|
||||
{
|
||||
#ifdef CONFIG_NETPOLL
|
||||
list_del(&napi->dev_list);
|
||||
#endif
|
||||
}
|
||||
|
||||
struct packet_type {
|
||||
__be16 type; /* This is really htons(ether_type). */
|
||||
struct net_device *dev; /* NULL is wildcarded here */
|
||||
|
Loading…
Reference in New Issue
Block a user