e1000: Remove Multiqueue code until we have support for MSI-X in our hardware

Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: John Ronciak <john.ronciak@intel.com>
This commit is contained in:
Jeff Kirsher 2006-03-02 18:16:38 -08:00 committed by root
parent 2ade43618b
commit c1605eb37a
3 changed files with 4 additions and 292 deletions

View File

@ -83,10 +83,6 @@
struct e1000_adapter;
#include "e1000_hw.h"
#ifdef CONFIG_E1000_MQ
#include <linux/cpu.h>
#include <linux/smp.h>
#endif
#ifdef DBG
#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@ -169,12 +165,6 @@ struct e1000_buffer {
uint16_t next_to_watch;
};
#ifdef CONFIG_E1000_MQ
struct e1000_queue_stats {
uint64_t packets;
uint64_t bytes;
};
#endif
struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
@ -198,12 +188,7 @@ struct e1000_tx_ring {
spinlock_t tx_lock;
uint16_t tdh;
uint16_t tdt;
boolean_t last_tx_tso;
#ifdef CONFIG_E1000_MQ
struct e1000_queue_stats tx_stats;
#endif
};
struct e1000_rx_ring {
@ -230,9 +215,6 @@ struct e1000_rx_ring {
uint16_t rdh;
uint16_t rdt;
#ifdef CONFIG_E1000_MQ
struct e1000_queue_stats rx_stats;
#endif
};
#define E1000_DESC_UNUSED(R) \
@ -278,9 +260,6 @@ struct e1000_adapter {
/* TX */
struct e1000_tx_ring *tx_ring; /* One per active queue */
#ifdef CONFIG_E1000_MQ
struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
#endif
unsigned long tx_queue_len;
uint32_t txd_cmd;
uint32_t tx_int_delay;
@ -313,11 +292,6 @@ struct e1000_adapter {
struct e1000_rx_ring *rx_ring; /* One per active queue */
#ifdef CONFIG_E1000_NAPI
struct net_device *polling_netdev; /* One per active queue */
#endif
#ifdef CONFIG_E1000_MQ
struct net_device **cpu_netdev; /* per-cpu */
struct call_async_data_struct rx_sched_call_data;
cpumask_t cpumask;
#endif
int num_tx_queues;
int num_rx_queues;

View File

@ -97,14 +97,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
};
#ifdef CONFIG_E1000_MQ
#define E1000_QUEUE_STATS_LEN \
(((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
* (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
#else
#define E1000_QUEUE_STATS_LEN 0
#endif
#define E1000_GLOBAL_STATS_LEN \
sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
@ -1799,11 +1792,6 @@ e1000_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
#ifdef CONFIG_E1000_MQ
uint64_t *queue_stat;
int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
int j, k;
#endif
int i;
e1000_update_stats(adapter);
@ -1812,29 +1800,12 @@ e1000_get_ethtool_stats(struct net_device *netdev,
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
}
#ifdef CONFIG_E1000_MQ
for (j = 0; j < adapter->num_tx_queues; j++) {
queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
#endif
/* BUG_ON(i != E1000_STATS_LEN); */
}
static void
e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
{
#ifdef CONFIG_E1000_MQ
struct e1000_adapter *adapter = netdev_priv(netdev);
#endif
uint8_t *p = data;
int i;
@ -1849,20 +1820,6 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
#ifdef CONFIG_E1000_MQ
for (i = 0; i < adapter->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
#endif
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}

View File

@ -103,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
#define DRV_VERSION "6.3.9-k4"DRIVERNAPI
#define DRV_VERSION "7.0.33-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
@ -191,9 +191,6 @@ static void e1000_exit_module(void);
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void __devexit e1000_remove(struct pci_dev *pdev);
static int e1000_alloc_queues(struct e1000_adapter *adapter);
#ifdef CONFIG_E1000_MQ
static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
#endif
static int e1000_sw_init(struct e1000_adapter *adapter);
static int e1000_open(struct net_device *netdev);
static int e1000_close(struct net_device *netdev);
@ -265,10 +262,6 @@ static int e1000_resume(struct pci_dev *pdev);
static void e1000_netpoll (struct net_device *netdev);
#endif
#ifdef CONFIG_E1000_MQ
/* for multiple Rx queues */
void e1000_rx_schedule(void *data);
#endif
/* Exported from other modules */
@ -502,10 +495,6 @@ e1000_up(struct e1000_adapter *adapter)
return err;
}
#ifdef CONFIG_E1000_MQ
e1000_setup_queue_mapping(adapter);
#endif
adapter->tx_queue_len = netdev->tx_queue_len;
mod_timer(&adapter->watchdog_timer, jiffies);
@ -526,9 +515,7 @@ e1000_down(struct e1000_adapter *adapter)
e1000_check_mng_mode(&adapter->hw);
e1000_irq_disable(adapter);
#ifdef CONFIG_E1000_MQ
while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
#endif
free_irq(adapter->pdev->irq, netdev);
#ifdef CONFIG_PCI_MSI
if (adapter->hw.mac_type > e1000_82547_rev_2 &&
@ -972,10 +959,6 @@ e1000_remove(struct pci_dev *pdev)
iounmap(adapter->hw.hw_addr);
pci_release_regions(pdev);
#ifdef CONFIG_E1000_MQ
free_percpu(adapter->cpu_netdev);
free_percpu(adapter->cpu_tx_ring);
#endif
free_netdev(netdev);
pci_disable_device(pdev);
@ -1056,40 +1039,8 @@ e1000_sw_init(struct e1000_adapter *adapter)
hw->master_slave = E1000_MASTER_SLAVE;
}
#ifdef CONFIG_E1000_MQ
/* Number of supported queues */
switch (hw->mac_type) {
case e1000_82571:
case e1000_82572:
/* These controllers support 2 tx queues, but with a single
* qdisc implementation, multiple tx queues aren't quite as
* interesting. If we can find a logical way of mapping
* flows to a queue, then perhaps we can up the num_tx_queue
* count back to its default. Until then, we run the risk of
* terrible performance due to SACK overload. */
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 2;
break;
default:
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
break;
}
adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
adapter->num_rx_queues,
((adapter->num_rx_queues == 1)
? ((num_online_cpus() > 1)
? "(due to unsupported feature in current adapter)"
: "(due to unsupported system configuration)")
: ""));
DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
adapter->num_tx_queues);
#else
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
#endif
if (e1000_alloc_queues(adapter)) {
DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
@ -1152,51 +1103,9 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
memset(adapter->polling_netdev, 0, size);
#endif
#ifdef CONFIG_E1000_MQ
adapter->rx_sched_call_data.func = e1000_rx_schedule;
adapter->rx_sched_call_data.info = adapter->netdev;
adapter->cpu_netdev = alloc_percpu(struct net_device *);
adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
#endif
return E1000_SUCCESS;
}
#ifdef CONFIG_E1000_MQ
static void __devinit
e1000_setup_queue_mapping(struct e1000_adapter *adapter)
{
int i, cpu;
adapter->rx_sched_call_data.func = e1000_rx_schedule;
adapter->rx_sched_call_data.info = adapter->netdev;
cpus_clear(adapter->rx_sched_call_data.cpumask);
adapter->cpu_netdev = alloc_percpu(struct net_device *);
adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
lock_cpu_hotplug();
i = 0;
for_each_online_cpu(cpu) {
*per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
/* This is incomplete because we'd like to assign separate
* physical cpus to these netdev polling structures and
* avoid saturating a subset of cpus.
*/
if (i < adapter->num_rx_queues) {
*per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
adapter->rx_ring[i].cpu = cpu;
cpu_set(cpu, adapter->cpumask);
} else
*per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
i++;
}
unlock_cpu_hotplug();
}
#endif
/**
* e1000_open - Called when a network interface is made active
* @netdev: network interface device structure
@ -1435,18 +1344,6 @@ e1000_configure_tx(struct e1000_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
switch (adapter->num_tx_queues) {
case 2:
tdba = adapter->tx_ring[1].dma;
tdlen = adapter->tx_ring[1].count *
sizeof(struct e1000_tx_desc);
E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
E1000_WRITE_REG(hw, TDLEN1, tdlen);
E1000_WRITE_REG(hw, TDH1, 0);
E1000_WRITE_REG(hw, TDT1, 0);
adapter->tx_ring[1].tdh = E1000_TDH1;
adapter->tx_ring[1].tdt = E1000_TDT1;
/* Fall Through */
case 1:
default:
tdba = adapter->tx_ring[0].dma;
@ -1790,10 +1687,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
uint64_t rdba;
struct e1000_hw *hw = &adapter->hw;
uint32_t rdlen, rctl, rxcsum, ctrl_ext;
#ifdef CONFIG_E1000_MQ
uint32_t reta, mrqc;
int i;
#endif
if (adapter->rx_ps_pages) {
rdlen = adapter->rx_ring[0].count *
@ -1837,18 +1730,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
switch (adapter->num_rx_queues) {
#ifdef CONFIG_E1000_MQ
case 2:
rdba = adapter->rx_ring[1].dma;
E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32));
E1000_WRITE_REG(hw, RDLEN1, rdlen);
E1000_WRITE_REG(hw, RDH1, 0);
E1000_WRITE_REG(hw, RDT1, 0);
adapter->rx_ring[1].rdh = E1000_RDH1;
adapter->rx_ring[1].rdt = E1000_RDT1;
/* Fall Through */
#endif
case 1:
default:
rdba = adapter->rx_ring[0].dma;
@ -1862,46 +1743,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
break;
}
#ifdef CONFIG_E1000_MQ
if (adapter->num_rx_queues > 1) {
uint32_t random[10];
get_random_bytes(&random[0], 40);
if (hw->mac_type <= e1000_82572) {
E1000_WRITE_REG(hw, RSSIR, 0);
E1000_WRITE_REG(hw, RSSIM, 0);
}
switch (adapter->num_rx_queues) {
case 2:
default:
reta = 0x00800080;
mrqc = E1000_MRQC_ENABLE_RSS_2Q;
break;
}
/* Fill out redirection table */
for (i = 0; i < 32; i++)
E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP);
E1000_WRITE_REG(hw, MRQC, mrqc);
}
/* Multiqueue and packet checksumming are mutually exclusive. */
if (hw->mac_type >= e1000_82571) {
rxcsum = E1000_READ_REG(hw, RXCSUM);
rxcsum |= E1000_RXCSUM_PCSD;
E1000_WRITE_REG(hw, RXCSUM, rxcsum);
}
#else
/* Enable 82543 Receive Checksum Offload for TCP and UDP */
if (hw->mac_type >= e1000_82543) {
rxcsum = E1000_READ_REG(hw, RXCSUM);
@ -1920,7 +1761,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
}
E1000_WRITE_REG(hw, RXCSUM, rxcsum);
}
#endif /* CONFIG_E1000_MQ */
if (hw->mac_type == e1000_82573)
E1000_WRITE_REG(hw, ERT, 0x0100);
@ -2465,9 +2305,6 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
e1000_update_adaptive(&adapter->hw);
#ifdef CONFIG_E1000_MQ
txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
#endif
if (!netif_carrier_ok(netdev)) {
if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
/* We've lost link, so the controller stops DMA,
@ -2881,11 +2718,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int f;
len -= skb->data_len;
#ifdef CONFIG_E1000_MQ
tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
#else
tx_ring = adapter->tx_ring;
#endif
if (unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
@ -3288,29 +3121,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
#ifdef CONFIG_E1000_MQ
void
e1000_rx_schedule(void *data)
{
struct net_device *poll_dev, *netdev = data;
struct e1000_adapter *adapter = netdev->priv;
int this_cpu = get_cpu();
poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
if (poll_dev == NULL) {
put_cpu();
return;
}
if (likely(netif_rx_schedule_prep(poll_dev)))
__netif_rx_schedule(poll_dev);
else
e1000_irq_enable(adapter);
put_cpu();
}
#endif
/**
* e1000_intr - Interrupt Handler
* @irq: interrupt number
@ -3355,26 +3165,11 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
#ifdef CONFIG_E1000_MQ
if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
/* We must setup the cpumask once count == 0 since
* each cpu bit is cleared when the work is done. */
adapter->rx_sched_call_data.cpumask = adapter->cpumask;
atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
atomic_set(&adapter->rx_sched_call_data.count,
adapter->num_rx_queues);
smp_call_async_mask(&adapter->rx_sched_call_data);
} else {
printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
}
#else /* if !CONFIG_E1000_MQ */
if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
__netif_rx_schedule(&adapter->polling_netdev[0]);
else
e1000_irq_enable(adapter);
#endif /* CONFIG_E1000_MQ */
#else /* if !CONFIG_E1000_NAPI */
#else
/* Writing IMC and IMS is needed for 82547.
* Due to Hub Link bus being occupied, an interrupt
* de-assertion message is not able to be sent.
@ -3398,7 +3193,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
e1000_irq_enable(adapter);
#endif /* CONFIG_E1000_NAPI */
#endif
return IRQ_HANDLED;
}
@ -3486,18 +3281,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
#ifdef CONFIG_E1000_MQ
tx_ring->tx_stats.bytes += buffer_info->length;
#endif
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
if (unlikely(++i == tx_ring->count)) i = 0;
}
#ifdef CONFIG_E1000_MQ
tx_ring->tx_stats.packets++;
#endif
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
@ -3733,10 +3522,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
}
#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
#ifdef CONFIG_E1000_MQ
rx_ring->rx_stats.packets++;
rx_ring->rx_stats.bytes += length;
#endif
next_desc:
rx_desc->status = 0;
@ -3878,10 +3663,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
}
#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
#ifdef CONFIG_E1000_MQ
rx_ring->rx_stats.packets++;
rx_ring->rx_stats.bytes += length;
#endif
next_desc:
rx_desc->wb.middle.status_error &= ~0xFF;