sfc: Test all event queues in parallel

In case all event queues are broken for some reason, this means it
will only take about a second to check them all, rather than up to 32
seconds.  This may also speed up testing in the successful case.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
This commit is contained in:
Ben Hutchings 2012-02-28 20:40:54 +00:00
parent 93e5dfa59b
commit ed74f48087

View File

@ -178,69 +178,88 @@ static int efx_test_interrupts(struct efx_nic *efx,
}
/* Test generation and receipt of interrupting events */
static int efx_test_eventq_irq(struct efx_channel *channel,
static int efx_test_eventq_irq(struct efx_nic *efx,
struct efx_self_tests *tests)
{
struct efx_nic *efx = channel->efx;
unsigned int read_ptr;
bool napi_ran, dma_seen, int_seen;
struct efx_channel *channel;
unsigned int read_ptr[EFX_MAX_CHANNELS];
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
unsigned long timeout, wait;
read_ptr = channel->eventq_read_ptr;
channel->last_irq_cpu = -1;
smp_wmb();
BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
efx_for_each_channel(channel, efx) {
read_ptr[channel->channel] = channel->eventq_read_ptr;
set_bit(channel->channel, &dma_pend);
set_bit(channel->channel, &int_pend);
channel->last_irq_cpu = -1;
smp_wmb();
efx_nic_generate_test_event(channel);
}
efx_nic_generate_test_event(channel);
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
/* Wait for arrival of interrupt. NAPI processing may or may
/* Wait for arrival of interrupts. NAPI processing may or may
* not complete in time, but we can cope in any case.
*/
do {
schedule_timeout_uninterruptible(wait);
napi_disable(&channel->napi_str);
if (channel->eventq_read_ptr != read_ptr) {
napi_ran = true;
dma_seen = true;
int_seen = true;
} else {
napi_ran = false;
dma_seen = efx_nic_event_present(channel);
int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0;
efx_for_each_channel(channel, efx) {
napi_disable(&channel->napi_str);
if (channel->eventq_read_ptr !=
read_ptr[channel->channel]) {
set_bit(channel->channel, &napi_ran);
clear_bit(channel->channel, &dma_pend);
clear_bit(channel->channel, &int_pend);
} else {
if (efx_nic_event_present(channel))
clear_bit(channel->channel, &dma_pend);
if (ACCESS_ONCE(channel->last_irq_cpu) >= 0)
clear_bit(channel->channel, &int_pend);
}
napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel);
}
napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel);
wait *= 2;
} while (!(dma_seen && int_seen) && time_before(jiffies, timeout));
} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
efx_for_each_channel(channel, efx) {
bool dma_seen = !test_bit(channel->channel, &dma_pend);
bool int_seen = !test_bit(channel->channel, &int_pend);
if (dma_seen && int_seen) {
netif_dbg(efx, drv, efx->net_dev,
"channel %d event queue passed (with%s NAPI)\n",
channel->channel, napi_ran ? "" : "out");
return 0;
} else {
/* Report failure and whether either interrupt or DMA worked */
netif_err(efx, drv, efx->net_dev,
"channel %d timed out waiting for event queue\n",
channel->channel);
if (int_seen)
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
if (dma_seen && int_seen) {
netif_dbg(efx, drv, efx->net_dev,
"channel %d event queue passed (with%s NAPI)\n",
channel->channel,
test_bit(channel->channel, &napi_ran) ?
"" : "out");
} else {
/* Report failure and whether either interrupt or DMA
* worked
*/
netif_err(efx, drv, efx->net_dev,
"channel %d saw interrupt "
"during event queue test\n",
"channel %d timed out waiting for event queue\n",
channel->channel);
if (dma_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n",
channel->channel);
return -ETIMEDOUT;
if (int_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d saw interrupt "
"during event queue test\n",
channel->channel);
if (dma_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n",
channel->channel);
}
}
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
}
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
@ -687,7 +706,6 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc;
/* Online (i.e. non-disruptive) testing
@ -705,11 +723,9 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
if (rc && !rc_test)
rc_test = rc;
efx_for_each_channel(channel, efx) {
rc = efx_test_eventq_irq(channel, tests);
if (rc && !rc_test)
rc_test = rc;
}
rc = efx_test_eventq_irq(efx, tests);
if (rc && !rc_test)
rc_test = rc;
if (rc_test)
return rc_test;