ath9k: fix tx locking issues

The commit "ath9k: simplify tx locking" introduced a soft lockup triggered
by mac80211 sending a BAR frame triggered by a driver call to
ieee80211_tx_send_bar or ieee80211_tx_status.
Fix these issues by queueing processed tx status skbs and submitting them
to mac80211 outside of the lock.

Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Mohammed Shafi Shajakhan <mohammed@qca.qualcomm.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Felix Fietkau 2011-12-19 16:45:54 +01:00 committed by John W. Linville
parent 8a30930563
commit 23de5dc9be
2 changed files with 65 additions and 32 deletions

View File

@ -196,6 +196,7 @@ struct ath_txq {
u8 txq_headidx; u8 txq_headidx;
u8 txq_tailidx; u8 txq_tailidx;
int pending_frames; int pending_frames;
struct sk_buff_head complete_q;
}; };
struct ath_atx_ac { struct ath_atx_ac {

View File

@ -104,6 +104,29 @@ static int ath_max_4ms_framelen[4][32] = {
/* Aggregation logic */ /* Aggregation logic */
/*********************/ /*********************/
static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
{
spin_lock_bh(&txq->axq_lock);
}
static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
{
spin_unlock_bh(&txq->axq_lock);
}
static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
{
struct sk_buff_head q;
struct sk_buff *skb;
__skb_queue_head_init(&q);
skb_queue_splice_init(&txq->complete_q, &q);
spin_unlock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&q)))
ieee80211_tx_status(sc->hw, skb);
}
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
{ {
struct ath_atx_ac *ac = tid->ac; struct ath_atx_ac *ac = tid->ac;
@ -130,7 +153,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
WARN_ON(!tid->paused); WARN_ON(!tid->paused);
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
tid->paused = false; tid->paused = false;
if (skb_queue_empty(&tid->buf_q)) if (skb_queue_empty(&tid->buf_q))
@ -139,7 +162,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
ath_tx_queue_tid(txq, tid); ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq); ath_txq_schedule(sc, txq);
unlock: unlock:
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock_complete(sc, txq);
} }
static struct ath_frame_info *get_frame_info(struct sk_buff *skb) static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@ -189,8 +212,11 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
tid->state &= ~AGGR_CLEANUP; tid->state &= ~AGGR_CLEANUP;
} }
if (sendbar) if (sendbar) {
ath_txq_unlock(sc, txq);
ath_send_bar(tid, tid->seq_start); ath_send_bar(tid, tid->seq_start);
ath_txq_lock(sc, txq);
}
} }
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
@ -554,13 +580,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf = bf_next; bf = bf_next;
} }
if (bar_index >= 0) {
u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
}
/* prepend un-acked frames to the beginning of the pending frame queue */ /* prepend un-acked frames to the beginning of the pending frame queue */
if (!skb_queue_empty(&bf_pending)) { if (!skb_queue_empty(&bf_pending)) {
if (an->sleeping) if (an->sleeping)
@ -575,6 +594,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} }
} }
if (bar_index >= 0) {
u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
ath_txq_unlock(sc, txq);
ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
ath_txq_lock(sc, txq);
}
if (tid->state & AGGR_CLEANUP) if (tid->state & AGGR_CLEANUP)
ath_tx_flush_tid(sc, tid); ath_tx_flush_tid(sc, tid);
@ -1172,7 +1202,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
return; return;
} }
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
txtid->paused = true; txtid->paused = true;
/* /*
@ -1187,7 +1217,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
txtid->state &= ~AGGR_ADDBA_COMPLETE; txtid->state &= ~AGGR_ADDBA_COMPLETE;
ath_tx_flush_tid(sc, txtid); ath_tx_flush_tid(sc, txtid);
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock_complete(sc, txq);
} }
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@ -1208,7 +1238,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
ac = tid->ac; ac = tid->ac;
txq = ac->txq; txq = ac->txq;
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
buffered = !skb_queue_empty(&tid->buf_q); buffered = !skb_queue_empty(&tid->buf_q);
@ -1220,7 +1250,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
list_del(&ac->list); list_del(&ac->list);
} }
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock(sc, txq);
ieee80211_sta_set_buffered(sta, tidno, buffered); ieee80211_sta_set_buffered(sta, tidno, buffered);
} }
@ -1239,7 +1269,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ac = tid->ac; ac = tid->ac;
txq = ac->txq; txq = ac->txq;
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
ac->clear_ps_filter = true; ac->clear_ps_filter = true;
if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
@ -1247,7 +1277,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_schedule(sc, txq); ath_txq_schedule(sc, txq);
} }
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock_complete(sc, txq);
} }
} }
@ -1347,6 +1377,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_qnum = axq_qnum; txq->axq_qnum = axq_qnum;
txq->mac80211_qnum = -1; txq->mac80211_qnum = -1;
txq->axq_link = NULL; txq->axq_link = NULL;
__skb_queue_head_init(&txq->complete_q);
INIT_LIST_HEAD(&txq->axq_q); INIT_LIST_HEAD(&txq->axq_q);
INIT_LIST_HEAD(&txq->axq_acq); INIT_LIST_HEAD(&txq->axq_acq);
spin_lock_init(&txq->axq_lock); spin_lock_init(&txq->axq_lock);
@ -1471,7 +1502,8 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
*/ */
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{ {
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
int idx = txq->txq_tailidx; int idx = txq->txq_tailidx;
@ -1492,7 +1524,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx) if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
ath_txq_drain_pending_buffers(sc, txq); ath_txq_drain_pending_buffers(sc, txq);
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock_complete(sc, txq);
} }
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@ -1924,7 +1956,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
*/ */
q = skb_get_queue_mapping(skb); q = skb_get_queue_mapping(skb);
spin_lock_bh(&txq->axq_lock);
ath_txq_lock(sc, txq);
if (txq == sc->tx.txq_map[q] && if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
ieee80211_stop_queue(sc->hw, q); ieee80211_stop_queue(sc->hw, q);
@ -1933,7 +1966,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
ath_tx_start_dma(sc, skb, txctl); ath_tx_start_dma(sc, skb, txctl);
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock(sc, txq);
return 0; return 0;
} }
@ -1945,7 +1978,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq) int tx_flags, struct ath_txq *txq)
{ {
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
@ -1989,7 +2021,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
} }
} }
ieee80211_tx_status(hw, skb); __skb_queue_tail(&txq->complete_q, skb);
} }
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@ -2125,7 +2157,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
txq->axq_link); txq->axq_link);
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
for (;;) { for (;;) {
if (work_pending(&sc->hw_reset_work)) if (work_pending(&sc->hw_reset_work))
break; break;
@ -2184,7 +2216,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
} }
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock_complete(sc, txq);
} }
static void ath_tx_complete_poll_work(struct work_struct *work) static void ath_tx_complete_poll_work(struct work_struct *work)
@ -2201,17 +2233,17 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i)) { if (ATH_TXQ_SETUP(sc, i)) {
txq = &sc->tx.txq[i]; txq = &sc->tx.txq[i];
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
if (txq->axq_depth) { if (txq->axq_depth) {
if (txq->axq_tx_inprogress) { if (txq->axq_tx_inprogress) {
needreset = true; needreset = true;
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock(sc, txq);
break; break;
} else { } else {
txq->axq_tx_inprogress = true; txq->axq_tx_inprogress = true;
} }
} }
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock_complete(sc, txq);
} }
if (needreset) { if (needreset) {
@ -2268,10 +2300,10 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
txq = &sc->tx.txq[ts.qid]; txq = &sc->tx.txq[ts.qid];
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock(sc, txq);
return; return;
} }
@ -2297,7 +2329,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
} }
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock_complete(sc, txq);
} }
} }
@ -2435,7 +2467,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
ac = tid->ac; ac = tid->ac;
txq = ac->txq; txq = ac->txq;
spin_lock_bh(&txq->axq_lock); ath_txq_lock(sc, txq);
if (tid->sched) { if (tid->sched) {
list_del(&tid->list); list_del(&tid->list);
@ -2451,6 +2483,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
tid->state &= ~AGGR_ADDBA_COMPLETE; tid->state &= ~AGGR_ADDBA_COMPLETE;
tid->state &= ~AGGR_CLEANUP; tid->state &= ~AGGR_CLEANUP;
spin_unlock_bh(&txq->axq_lock); ath_txq_unlock(sc, txq);
} }
} }