ath9k: move the sequence number from ath_frame_info to ath_buf

It is only necessary for BAW tracking and moving it to the ath_buf
makes it easier to add further improvements, such as deferring
seqno allocation in the aggregation path.

Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Felix Fietkau 2011-08-28 00:32:23 +02:00 committed by John W. Linville
parent 56dc633692
commit 6a0ddaef7c
2 changed files with 24 additions and 33 deletions

View File

@ -211,12 +211,12 @@ struct ath_frame_info {
enum ath9k_key_type keytype;
u8 keyix;
u8 retries;
u16 seqno;
};
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
u16 seqno;
unsigned long bfs_paprd_timestamp;
};

View File

@ -168,7 +168,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
spin_unlock_bh(&txq->axq_lock);
if (fi->retries) {
ath_tx_update_baw(sc, tid, fi->seqno);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
} else {
ath_tx_send_normal(sc, txq, NULL, &bf_head);
@ -237,7 +237,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
list_add_tail(&bf->list, &bf_head);
if (fi->retries)
ath_tx_update_baw(sc, tid, fi->seqno);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
spin_unlock(&txq->axq_lock);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
@ -327,7 +327,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
while (bf) {
fi = get_frame_info(bf->bf_mpdu);
ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
(*nframes)++;
if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
@ -428,6 +428,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
while (bf) {
u16 seqno = bf->bf_state.seqno;
txfail = txpending = sendbar = 0;
bf_next = bf->bf_next;
@ -435,7 +437,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
tx_info = IEEE80211_SKB_CB(skb);
fi = get_frame_info(skb);
if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
/* transmit completion, subframe is
* acked by block ack */
acked_cnt++;
@ -479,7 +481,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* block-ack window
*/
spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, fi->seqno);
ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
@ -507,7 +509,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
*/
if (!tbf) {
spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, fi->seqno);
ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
bf->bf_state.bf_type |=
@ -752,17 +754,19 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
u16 seqno;
do {
skb = skb_peek(&tid->buf_q);
fi = get_frame_info(skb);
bf = fi->bf;
seqno = bf->bf_state.seqno;
if (!bf_first)
bf_first = bf;
/* do not step over block-ack window */
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
status = ATH_AGGR_BAW_CLOSED;
break;
}
@ -810,7 +814,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
/* link buffers of this frame to the aggregate */
if (!fi->retries)
ath_tx_addto_baw(sc, tid, fi->seqno);
ath_tx_addto_baw(sc, tid, seqno);
ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
__skb_unlink(skb, &tid->buf_q);
@ -1434,6 +1438,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
{
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
struct list_head bf_head;
u16 seqno = bf->bf_state.seqno;
bf->bf_state.bf_type |= BUF_AMPDU;
@ -1445,7 +1450,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
* - h/w queue depth exceeds low water mark
*/
if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
/*
* Add this frame to software queue for scheduling later
@ -1463,7 +1468,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
/* Add sub-frame to BAW */
if (!fi->retries)
ath_tx_addto_baw(sc, tid, fi->seqno);
ath_tx_addto_baw(sc, tid, seqno);
/* Queue to h/w without aggregation */
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
@ -1519,39 +1524,19 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
int framelen)
{
struct ath_softc *sc = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_node *an = NULL;
struct ath_atx_tid *tid;
enum ath9k_key_type keytype;
u16 seqno = 0;
u8 tidno;
keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
if (sta)
an = (struct ath_node *) sta->drv_priv;
hdr = (struct ieee80211_hdr *)skb->data;
if (an && ieee80211_is_data_qos(hdr->frame_control) &&
conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
/*
* Override seqno set by upper layer with the one
* in tx aggregation state.
*/
tid = ATH_AN_2_TID(an, tidno);
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
memset(fi, 0, sizeof(*fi));
if (hw_key)
fi->keyix = hw_key->hw_key_idx;
@ -1561,7 +1546,6 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
fi->seqno = seqno;
}
static int setup_tx_flags(struct sk_buff *skb)
@ -1797,6 +1781,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct list_head bf_head;
struct ath_atx_tid *tid = NULL;
u16 seqno;
u8 tidno;
spin_lock_bh(&txctl->txq->axq_lock);
@ -1806,6 +1791,12 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(txctl->an, tidno);
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
bf->bf_state.seqno = seqno;
WARN_ON(tid->ac->txq != txctl->txq);
}