linux/drivers/net/wireless/mediatek/mt76/tx.c
Lorenzo Bianconi 7c250f4612 mt76: fix potential NULL pointer dereference in mt76_stop_tx_queues
Starting from mac80211 commit adf8ed01e4 ("mac80211: add an optional
TXQ for other PS-buffered frames") and commit 0eeb2b674f ("mac80211:
add an option for station management TXQ") a new per-sta queue has been
introduced for bufferable management frames.
sta->txq[IEEE80211_NUM_TIDS] is initialized just if the driver reports
the following hw flags:
- IEEE80211_HW_STA_MMPDU_TXQ
- IEEE80211_HW_BUFF_MMPDU_TXQ
This can produce a NULL pointer dereference in mt76_stop_tx_queues
since mt76 iterates on all available sta tx queues assuming they are
initialized by mac80211. This issue has been spotted analyzing the code
(it has not triggered any crash yet)

Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
2018-12-13 16:19:56 +02:00

481 lines
11 KiB
C

/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76.h"
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
dma_addr_t addr;
int size;
size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
if (!t)
return NULL;
addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
DMA_TO_DEVICE);
t->dma_addr = addr;
return t;
}
static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
spin_lock_bh(&dev->lock);
if (!list_empty(&dev->txwi_cache)) {
t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
spin_unlock_bh(&dev->lock);
return t;
}
struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
if (t)
return t;
return mt76_alloc_txwi(dev);
}
void
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
if (!t)
return;
spin_lock_bh(&dev->lock);
list_add(&t->list, &dev->txwi_cache);
spin_unlock_bh(&dev->lock);
}
void mt76_tx_free(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
while ((t = __mt76_get_txwi(dev)) != NULL)
dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
DMA_TO_DEVICE);
}
static int
mt76_txq_get_qid(struct ieee80211_txq *txq)
{
if (!txq->sta)
return MT_TXQ_BE;
return txq->ac;
}
static void
mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (!ieee80211_is_data_qos(hdr->frame_control) ||
!ieee80211_is_data_present(hdr->frame_control))
return;
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
}
void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mt76_queue *q;
int qid = skb_get_queue_mapping(skb);
if (WARN_ON(qid >= MT_TXQ_PSD)) {
qid = MT_TXQ_BE;
skb_set_queue_mapping(skb, qid);
}
if (!wcid->tx_rate_set)
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
u8 tid;
tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
txq = sta->txq[tid];
mtxq = (struct mt76_txq *) txq->drv_priv;
if (mtxq->aggr)
mt76_check_agg_ssn(mtxq, skb);
}
q = &dev->q_tx[qid];
spin_lock_bh(&q->lock);
dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_tx);
static struct sk_buff *
mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct sk_buff *skb;
skb = skb_dequeue(&mtxq->retry_q);
if (skb) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (ps && skb_queue_empty(&mtxq->retry_q))
ieee80211_sta_set_buffered(txq->sta, tid, false);
return skb;
}
skb = ieee80211_tx_dequeue(dev->hw, txq);
if (!skb)
return NULL;
return skb;
}
static void
mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct sk_buff *skb, bool last)
{
struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
if (last)
info->flags |= IEEE80211_TX_STATUS_EOSP;
mt76_skb_set_moredata(skb, !last);
dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
}
void
mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct mt76_dev *dev = hw->priv;
struct sk_buff *last_skb = NULL;
struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
int i;
spin_lock_bh(&hwq->lock);
for (i = 0; tids && nframes; i++, tids >>= 1) {
struct ieee80211_txq *txq = sta->txq[i];
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
struct sk_buff *skb;
if (!(tids & 1))
continue;
do {
skb = mt76_txq_dequeue(dev, mtxq, true);
if (!skb)
break;
if (mtxq->aggr)
mt76_check_agg_ssn(mtxq, skb);
nframes--;
if (last_skb)
mt76_queue_ps_skb(dev, sta, last_skb, false);
last_skb = skb;
} while (nframes);
}
if (last_skb) {
mt76_queue_ps_skb(dev, sta, last_skb, true);
dev->queue_ops->kick(dev, hwq);
}
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
struct mt76_txq *mtxq, bool *empty)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_tx_info *info;
struct mt76_wcid *wcid = mtxq->wcid;
struct sk_buff *skb;
int n_frames = 1, limit;
struct ieee80211_tx_rate tx_rate;
bool ampdu;
bool probe;
int idx;
skb = mt76_txq_dequeue(dev, mtxq, false);
if (!skb) {
*empty = true;
return 0;
}
info = IEEE80211_SKB_CB(skb);
if (!wcid->tx_rate_set)
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
tx_rate = info->control.rates[0];
probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
limit = ampdu ? 16 : 3;
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
if (idx < 0)
return idx;
do {
bool cur_ampdu;
if (probe)
break;
if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
test_bit(MT76_RESET, &dev->state))
return -EBUSY;
skb = mt76_txq_dequeue(dev, mtxq, false);
if (!skb) {
*empty = true;
break;
}
info = IEEE80211_SKB_CB(skb);
cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
if (ampdu != cur_ampdu ||
(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
skb_queue_tail(&mtxq->retry_q, skb);
break;
}
info->control.rates[0] = tx_rate;
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
txq->sta);
if (idx < 0)
return idx;
n_frames++;
} while (n_frames < limit);
if (!probe) {
hwq->swq_queued++;
hwq->entry[idx].schedule = true;
}
dev->queue_ops->kick(dev, hwq);
return n_frames;
}
static int
mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
{
struct mt76_txq *mtxq, *mtxq_last;
int len = 0;
restart:
mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
while (!list_empty(&hwq->swq)) {
bool empty = false;
int cur;
if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
test_bit(MT76_RESET, &dev->state))
return -EBUSY;
mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
struct ieee80211_vif *vif = txq->vif;
u16 agg_ssn = mtxq->agg_ssn;
u8 tid = txq->tid;
mtxq->send_bar = false;
spin_unlock_bh(&hwq->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
spin_lock_bh(&hwq->lock);
goto restart;
}
list_del_init(&mtxq->list);
cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
if (!empty)
list_add_tail(&mtxq->list, &hwq->swq);
if (cur < 0)
return cur;
len += cur;
if (mtxq == mtxq_last)
break;
}
return len;
}
void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
{
int len;
rcu_read_lock();
do {
if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
break;
len = mt76_txq_schedule_list(dev, hwq);
} while (len > 0);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
void mt76_txq_schedule_all(struct mt76_dev *dev)
{
int i;
for (i = 0; i <= MT_TXQ_BK; i++) {
struct mt76_queue *q = &dev->q_tx[i];
spin_lock_bh(&q->lock);
mt76_txq_schedule(dev, q);
spin_unlock_bh(&q->lock);
}
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar)
{
int i;
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i];
struct mt76_txq *mtxq;
if (!txq)
continue;
mtxq = (struct mt76_txq *)txq->drv_priv;
spin_lock_bh(&mtxq->hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
if (!list_empty(&mtxq->list))
list_del_init(&mtxq->list);
spin_unlock_bh(&mtxq->hwq->lock);
}
}
EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
struct mt76_dev *dev = hw->priv;
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
struct mt76_queue *hwq = mtxq->hwq;
spin_lock_bh(&hwq->lock);
if (list_empty(&mtxq->list))
list_add_tail(&mtxq->list, &hwq->swq);
mt76_txq_schedule(dev, hwq);
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
struct mt76_queue *hwq;
struct sk_buff *skb;
if (!txq)
return;
mtxq = (struct mt76_txq *) txq->drv_priv;
hwq = mtxq->hwq;
spin_lock_bh(&hwq->lock);
if (!list_empty(&mtxq->list))
list_del(&mtxq->list);
spin_unlock_bh(&hwq->lock);
while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
ieee80211_free_txskb(dev->hw, skb);
}
EXPORT_SYMBOL_GPL(mt76_txq_remove);
void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
INIT_LIST_HEAD(&mtxq->list);
skb_queue_head_init(&mtxq->retry_q);
mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
}
EXPORT_SYMBOL_GPL(mt76_txq_init);
u8 mt76_ac_to_hwq(u8 ac)
{
static const u8 wmm_queue_map[] = {
[IEEE80211_AC_BE] = 0,
[IEEE80211_AC_BK] = 1,
[IEEE80211_AC_VI] = 2,
[IEEE80211_AC_VO] = 3,
};
if (WARN_ON(ac >= IEEE80211_NUM_ACS))
return 0;
return wmm_queue_map[ac];
}
EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);