mt76: avoid queue/status spinlocks while passing tx status to mac80211

There is some code in the mac80211 tx status processing code that could
potentially call back into the tx codepath.
To avoid deadlocks, make sure that no tx related spinlocks are taken
during the ieee80211_tx_status call.

Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Felix Fietkau 2018-11-05 21:11:39 +01:00
parent 4ece1e0a86
commit 79d1c94c9c
6 changed files with 76 additions and 38 deletions

View File

@ -157,17 +157,20 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (entry.schedule)
q->swq_queued--;
if (entry.skb)
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
if (entry.skb) {
spin_unlock_bh(&q->lock);
dev->drv->tx_complete_skb(dev, q, &entry, flush);
spin_lock_bh(&q->lock);
}
if (entry.txwi) {
mt76_put_txwi(dev, entry.txwi);
wake = true;
}
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
if (!flush && q->tail == last)
last = ioread32(&q->regs->dma_idx);
}

View File

@ -359,7 +359,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
mt76_tx_status_flush(dev, NULL);
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
mt76_tx_free(dev);
}

View File

@ -648,28 +648,22 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key);
void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
__acquires(&dev->status_list.lock);
void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.lock);
int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb);
struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct mt76_wcid *wcid, int pktid);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb);
struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
static inline void
mt76_tx_status_check(struct mt76_dev *dev)
{
spin_lock_bh(&dev->status_list.lock);
mt76_tx_status_skb_get(dev, NULL, 0);
spin_unlock_bh(&dev->status_list.lock);
}
static inline void
mt76_tx_status_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
spin_lock_bh(&dev->status_list.lock);
mt76_tx_status_skb_get(dev, wcid, -1);
spin_unlock_bh(&dev->status_list.lock);
}
void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
bool flush);
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);

View File

@ -438,12 +438,13 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76_wcid *wcid = NULL;
struct mt76x02_sta *msta = NULL;
struct mt76_dev *mdev = &dev->mt76;
struct sk_buff_head list;
if (stat->pktid == MT_PACKET_ID_NO_ACK)
return;
rcu_read_lock();
spin_lock_bh(&mdev->status_list.lock);
mt76_tx_status_lock(mdev, &list);
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@ -459,7 +460,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (wcid) {
if (stat->pktid)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
stat->pktid);
stat->pktid, &list);
if (status.skb)
status.info = IEEE80211_SKB_CB(status.skb);
}
@ -490,12 +491,12 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
}
if (status.skb)
mt76_tx_status_skb_done(mdev, status.skb);
mt76_tx_status_skb_done(mdev, status.skb, &list);
else
ieee80211_tx_status_ext(mt76_hw(dev), &status);
out:
spin_unlock_bh(&mdev->status_list.lock);
mt76_tx_status_unlock(mdev, &list);
rcu_read_unlock();
}
@ -818,7 +819,7 @@ void mt76x02_mac_work(struct work_struct *work)
if (!dev->beacon_mask)
mt76x02_check_mac_err(dev);
mt76_tx_status_check(&dev->mt76);
mt76_tx_status_check(&dev->mt76, NULL, false);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);

View File

@ -217,7 +217,7 @@ int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int i;
mutex_lock(&dev->mt76.mutex);
mt76_tx_status_flush(&dev->mt76, &msta->wcid);
mt76_tx_status_check(&dev->mt76, &msta->wcid, true);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(&dev->mt76, sta->txq[i]);

View File

@ -103,8 +103,33 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
}
void
mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
__acquires(&dev->status_list.lock)
{
__skb_queue_head_init(list);
spin_lock_bh(&dev->status_list.lock);
__acquire(&dev->status_list.lock);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.unlock)
{
struct sk_buff *skb;
spin_unlock_bh(&dev->status_list.lock);
__release(&dev->status_list.unlock);
while ((skb = __skb_dequeue(list)) != NULL)
ieee80211_tx_status(dev->hw, skb);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
static void
__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags)
__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
struct sk_buff_head *list)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
@ -125,13 +150,14 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags)
info->flags |= IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status(dev->hw, skb);
__skb_queue_tail(list, skb);
}
void
mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb)
mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list)
{
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
@ -173,7 +199,8 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
struct sk_buff *
mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid)
mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list)
{
struct sk_buff *skb, *tmp;
@ -194,23 +221,36 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid)
continue;
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
MT_TX_CB_TXS_DONE);
MT_TX_CB_TXS_DONE, list);
}
return NULL;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
void
mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
{
struct sk_buff_head list;
mt76_tx_status_lock(dev, &list);
mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_check);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
{
struct sk_buff_head list;
if (!skb->prev) {
ieee80211_free_txskb(dev->hw, skb);
return;
}
spin_lock_bh(&dev->status_list.lock);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE);
spin_unlock_bh(&dev->status_list.lock);
mt76_tx_status_lock(dev, &list);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);