forked from Minki/linux
sfc: Remove remnants of on-load self-test
The out-of-tree version of the sfc driver used to run a self-test on each device before registering it. Although this was never included in-tree, some functions have checks for this special case which is not really possible. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
This commit is contained in:
parent
b249513e8b
commit
73ba7b68e9
@ -855,10 +855,8 @@ int __efx_reconfigure_port(struct efx_nic *efx)
|
||||
WARN_ON(!mutex_is_locked(&efx->mac_lock));
|
||||
|
||||
/* Serialise the promiscuous flag with efx_set_multicast_list. */
|
||||
if (efx_dev_registered(efx)) {
|
||||
netif_addr_lock_bh(efx->net_dev);
|
||||
netif_addr_unlock_bh(efx->net_dev);
|
||||
}
|
||||
netif_addr_lock_bh(efx->net_dev);
|
||||
netif_addr_unlock_bh(efx->net_dev);
|
||||
|
||||
/* Disable PHY transmit in mac level loopbacks */
|
||||
phy_mode = efx->phy_mode;
|
||||
@ -981,10 +979,8 @@ static void efx_stop_port(struct efx_nic *efx)
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
/* Serialise against efx_set_multicast_list() */
|
||||
if (efx_dev_registered(efx)) {
|
||||
netif_addr_lock_bh(efx->net_dev);
|
||||
netif_addr_unlock_bh(efx->net_dev);
|
||||
}
|
||||
netif_addr_lock_bh(efx->net_dev);
|
||||
netif_addr_unlock_bh(efx->net_dev);
|
||||
}
|
||||
|
||||
static void efx_fini_port(struct efx_nic *efx)
|
||||
@ -1394,14 +1390,14 @@ static void efx_start_all(struct efx_nic *efx)
|
||||
return;
|
||||
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
|
||||
return;
|
||||
if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
|
||||
if (!netif_running(efx->net_dev))
|
||||
return;
|
||||
|
||||
/* Mark the port as enabled so port reconfigurations can start, then
|
||||
* restart the transmit interface early so the watchdog timer stops */
|
||||
efx_start_port(efx);
|
||||
|
||||
if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
|
||||
if (netif_device_present(efx->net_dev))
|
||||
netif_tx_wake_all_queues(efx->net_dev);
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
@ -1492,11 +1488,9 @@ static void efx_stop_all(struct efx_nic *efx)
|
||||
|
||||
/* Stop the kernel transmit interface late, so the watchdog
|
||||
* timer isn't ticking over the flush */
|
||||
if (efx_dev_registered(efx)) {
|
||||
netif_tx_stop_all_queues(efx->net_dev);
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
}
|
||||
netif_tx_stop_all_queues(efx->net_dev);
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
}
|
||||
|
||||
static void efx_remove_all(struct efx_nic *efx)
|
||||
@ -2018,11 +2012,9 @@ static void efx_unregister_netdev(struct efx_nic *efx)
|
||||
efx_release_tx_buffers(tx_queue);
|
||||
}
|
||||
|
||||
if (efx_dev_registered(efx)) {
|
||||
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
|
||||
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
|
||||
unregister_netdev(efx->net_dev);
|
||||
}
|
||||
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
|
||||
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
|
||||
unregister_netdev(efx->net_dev);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -2438,7 +2430,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
|
||||
/* NIC initialisation
|
||||
*
|
||||
* This is called at module load (or hotplug insertion,
|
||||
* theoretically). It sets up PCI mappings, tests and resets the NIC,
|
||||
* theoretically). It sets up PCI mappings, resets the NIC,
|
||||
* sets up and registers the network devices with the kernel and hooks
|
||||
* the interrupt service routine. It does not prepare the device for
|
||||
* transmission; this is left to the first time one of the network
|
||||
|
@ -726,11 +726,9 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
|
||||
tx_queue = efx_channel_get_tx_queue(
|
||||
channel, tx_ev_q_label % EFX_TXQ_TYPES);
|
||||
|
||||
if (efx_dev_registered(efx))
|
||||
netif_tx_lock(efx->net_dev);
|
||||
netif_tx_lock(efx->net_dev);
|
||||
efx_notify_tx_desc(tx_queue);
|
||||
if (efx_dev_registered(efx))
|
||||
netif_tx_unlock(efx->net_dev);
|
||||
netif_tx_unlock(efx->net_dev);
|
||||
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
|
||||
EFX_WORKAROUND_10727(efx)) {
|
||||
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
|
||||
|
@ -397,11 +397,9 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
|
||||
* interrupt handler. */
|
||||
smp_wmb();
|
||||
|
||||
if (efx_dev_registered(efx))
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
rc = efx_enqueue_skb(tx_queue, skb);
|
||||
if (efx_dev_registered(efx))
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
if (rc != NETDEV_TX_OK) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
@ -442,8 +440,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
|
||||
int tx_done = 0, rx_good, rx_bad;
|
||||
int i, rc = 0;
|
||||
|
||||
if (efx_dev_registered(efx))
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
|
||||
/* Count the number of tx completions, and decrement the refcnt. Any
|
||||
* skbs not already completed will be free'd when the queue is flushed */
|
||||
@ -454,8 +451,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
if (efx_dev_registered(efx))
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
/* Check TX completion and received packet counts */
|
||||
rx_good = atomic_read(&state->rx_good);
|
||||
|
@ -446,10 +446,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
likely(efx->port_enabled) &&
|
||||
likely(netif_device_present(efx->net_dev))) {
|
||||
fill_level = tx_queue->insert_count - tx_queue->read_count;
|
||||
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
|
||||
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
|
||||
if (fill_level < EFX_TXQ_THRESHOLD(efx))
|
||||
netif_tx_wake_queue(tx_queue->core_txq);
|
||||
}
|
||||
}
|
||||
|
||||
/* Check whether the hardware queue is now empty */
|
||||
|
Loading…
Reference in New Issue
Block a user