iwlwifi: pcie: Disable softirqs during Rx queue init

When Rx queues are configured during module init, NAPI is enabled
while the Rx queue lock is held. However, since softirqs are not
disabled, it is possible that and IRQ would fire and call
iwl_pcie_rx_handle() which would also try to acquire the Rx lock.

Prevent this by disabling softirqs during Rx queue configuration,
as part of module init flow.

Signed-off-by: Ilan Peer <ilan.peer@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20210205110447.d206ac428823.Ia19339efb09f9d80143f0d0e398a158180754cfa@changeid
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
This commit is contained in:
Ilan Peer 2021-02-05 11:06:40 +02:00 committed by Luca Coelho
parent df8ba77ef4
commit 47ef328c20

View File

@ -301,7 +301,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
spin_lock(&rxq->lock);
spin_lock_bh(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
__le32 *bd = (__le32 *)rxq->bd;
/* The overwritten rxb must be a used one */
@ -320,14 +320,14 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock(&rxq->lock);
spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
}
}
@ -433,28 +433,28 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
while (1) {
unsigned int offset;
spin_lock(&rxq->lock);
spin_lock_bh(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
return;
}
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
if (!page)
return;
spin_lock(&rxq->lock);
spin_lock_bh(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
@ -466,19 +466,19 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
spin_lock(&rxq->lock);
spin_lock_bh(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
spin_lock(&rxq->lock);
spin_lock_bh(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
}
}
@ -1112,7 +1112,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
spin_lock(&rxq->lock);
spin_lock_bh(&rxq->lock);
/*
* Set read write pointer to reflect that we have processed
* and used all buffers, but have not restocked the Rx queue
@ -1148,7 +1148,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
napi_enable(&rxq->napi);
}
spin_unlock(&rxq->lock);
spin_unlock_bh(&rxq->lock);
}
/* move the pool to the default queue and allocator ownerships */
@ -1190,9 +1190,9 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
spin_lock(&trans_pcie->rxq->lock);
spin_lock_bh(&trans_pcie->rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
spin_unlock(&trans_pcie->rxq->lock);
spin_unlock_bh(&trans_pcie->rxq->lock);
return 0;
}