iwlagn: add an API for TX stop

Tx stop moves to transport layer.

Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Emmanuel Grumbach 2011-07-08 08:46:12 -07:00 committed by John W. Linville
parent c2c52e8bed
commit c170b867cc
7 changed files with 59 additions and 55 deletions

View File

@ -2310,7 +2310,7 @@ void iwlagn_stop_device(struct iwl_priv *priv)
* already dead.
*/
if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
iwlagn_txq_ctx_stop(priv);
priv->trans.ops->tx_stop(priv);
priv->trans.ops->rx_stop(priv);
/* Power-down device's busmaster DMA clocks */

View File

@ -851,39 +851,6 @@ static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
memset(ptr, 0, sizeof(*ptr));
}
/**
* iwlagn_txq_ctx_stop - Stop all Tx DMA channels
*/
void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
{
int ch, txq_id;
unsigned long flags;
/* Turn off all Tx DMA fifos */
spin_lock_irqsave(&priv->lock, flags);
iwlagn_txq_set_sched(priv, 0);
/* Stop each Tx DMA channel, and wait for it to be idle */
for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
1000))
IWL_ERR(priv, "Failing on timeout while stopping"
" DMA channel %d [0x%08x]", ch,
iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
}
spin_unlock_irqrestore(&priv->lock, flags);
if (!priv->txq)
return;
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
iwl_tx_queue_unmap(priv, txq_id);
}
/*
* Find first available (lowest unused) Tx Queue, mark it "active".
* Called only when finding queue for aggregation.

View File

@ -217,7 +217,6 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
void iwlagn_txq_ctx_stop(struct iwl_priv *priv);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{

View File

@ -384,7 +384,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
int count, int slots_num, u32 id);
void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
void iwl_setup_watchdog(struct iwl_priv *priv);
/*****************************************************
* TX power

View File

@ -694,8 +694,6 @@ struct iwl_hw_params {
****************************************************************************/
extern void iwl_update_chain_flags(struct iwl_priv *priv);
extern const u8 iwl_bcast_addr[ETH_ALEN];
extern int iwl_rxq_stop(struct iwl_priv *priv);
extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl_queue_space(const struct iwl_queue *q);
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
@ -1234,6 +1232,7 @@ struct iwl_trans;
* @rx_stop: stop the rx
* @rx_free: frees the rx memory
* @tx_init:inits the tx memory, allocate if needed
* @tx_stop: stop the tx
* @tx_free: frees the tx memory
*/
struct iwl_trans_ops {
@ -1242,6 +1241,7 @@ struct iwl_trans_ops {
void (*rx_free)(struct iwl_priv *priv);
int (*tx_init)(struct iwl_priv *priv);
int (*tx_stop)(struct iwl_priv *priv);
void (*tx_free)(struct iwl_priv *priv);
};

View File

@ -327,6 +327,24 @@ static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
return 0;
}
/**
* iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
*/
static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
if (!q->n_bd)
return;
while (q->write_ptr != q->read_ptr) {
/* The read_ptr needs to bound by q->n_window */
iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
}
/**
* iwl_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
@ -497,12 +515,50 @@ error:
return ret;
}
/**
* iwlagn_txq_ctx_stop - Stop all Tx DMA channels
*/
static int iwl_trans_tx_stop(struct iwl_priv *priv)
{
int ch, txq_id;
unsigned long flags;
/* Turn off all Tx DMA fifos */
spin_lock_irqsave(&priv->lock, flags);
iwlagn_txq_set_sched(priv, 0);
/* Stop each Tx DMA channel, and wait for it to be idle */
for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
1000))
IWL_ERR(priv, "Failing on timeout while stopping"
" DMA channel %d [0x%08x]", ch,
iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
}
spin_unlock_irqrestore(&priv->lock, flags);
if (!priv->txq) {
IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
return 0;
}
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
iwl_tx_queue_unmap(priv, txq_id);
return 0;
}
static const struct iwl_trans_ops trans_ops = {
.rx_init = iwl_trans_rx_init,
.rx_stop = iwl_trans_rx_stop,
.rx_free = iwl_trans_rx_free,
.tx_init = iwl_trans_tx_init,
.tx_stop = iwl_trans_tx_stop,
.tx_free = iwl_trans_tx_free,
};

View File

@ -221,23 +221,6 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
return 0;
}
/**
* iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
*/
void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
if (q->n_bd == 0)
return;
while (q->write_ptr != q->read_ptr) {
iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
}
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
*