Now that AQL works well on all mt76 drivers, completely replace the arbitrary burst sizing and number of bursts logic for tx scheduling. For the short period of time in which AQL does not work well yet, limit each stations to 16 in-flight packets that have no estimated tx time. This should avoid filling the queue if a station connects and queues up a large number of packets before rate control information is available, especially with hardware rate control Signed-off-by: Felix Fietkau <nbd@nbd.name>
184 lines
4.6 KiB
C
184 lines
4.6 KiB
C
// SPDX-License-Identifier: ISC
|
|
/* Copyright (C) 2020 MediaTek Inc.
|
|
*
|
|
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
|
* Roy Luo <royluo@google.com>
|
|
* Felix Fietkau <nbd@nbd.name>
|
|
* Lorenzo Bianconi <lorenzo@kernel.org>
|
|
*/
|
|
|
|
#include <linux/etherdevice.h>
|
|
#include <linux/timekeeping.h>
|
|
|
|
#include "mt7615.h"
|
|
#include "../dma.h"
|
|
#include "mac.h"
|
|
|
|
void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
|
|
{
|
|
if (!e->txwi) {
|
|
dev_kfree_skb_any(e->skb);
|
|
return;
|
|
}
|
|
|
|
/* error path */
|
|
if (e->skb == DMA_DUMMY_DATA) {
|
|
struct mt76_txwi_cache *t;
|
|
struct mt7615_dev *dev;
|
|
struct mt7615_txp_common *txp;
|
|
u16 token;
|
|
|
|
dev = container_of(mdev, struct mt7615_dev, mt76);
|
|
txp = mt7615_txwi_to_txp(mdev, e->txwi);
|
|
|
|
if (is_mt7615(&dev->mt76))
|
|
token = le16_to_cpu(txp->fw.token);
|
|
else
|
|
token = le16_to_cpu(txp->hw.msdu_id[0]) &
|
|
~MT_MSDU_ID_VALID;
|
|
|
|
spin_lock_bh(&dev->token_lock);
|
|
t = idr_remove(&dev->token, token);
|
|
spin_unlock_bh(&dev->token_lock);
|
|
e->skb = t ? t->skb : NULL;
|
|
}
|
|
|
|
if (e->skb)
|
|
mt76_tx_complete_skb(mdev, e->wcid, e->skb);
|
|
}
|
|
|
|
static void
|
|
mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
|
|
void *txp_ptr, u32 id)
|
|
{
|
|
struct mt7615_hw_txp *txp = txp_ptr;
|
|
struct mt7615_txp_ptr *ptr = &txp->ptr[0];
|
|
int i, nbuf = tx_info->nbuf - 1;
|
|
u32 last_mask;
|
|
|
|
tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
|
|
tx_info->nbuf = 1;
|
|
|
|
txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
|
|
|
|
if (is_mt7663(&dev->mt76))
|
|
last_mask = MT_TXD_LEN_LAST;
|
|
else
|
|
last_mask = MT_TXD_LEN_AMSDU_LAST |
|
|
MT_TXD_LEN_MSDU_LAST;
|
|
|
|
for (i = 0; i < nbuf; i++) {
|
|
u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
|
|
u32 addr = tx_info->buf[i + 1].addr;
|
|
|
|
if (i == nbuf - 1)
|
|
len |= last_mask;
|
|
|
|
if (i & 1) {
|
|
ptr->buf1 = cpu_to_le32(addr);
|
|
ptr->len1 = cpu_to_le16(len);
|
|
ptr++;
|
|
} else {
|
|
ptr->buf0 = cpu_to_le32(addr);
|
|
ptr->len0 = cpu_to_le16(len);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void
|
|
mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
|
|
void *txp_ptr, u32 id)
|
|
{
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
|
|
struct ieee80211_key_conf *key = info->control.hw_key;
|
|
struct ieee80211_vif *vif = info->control.vif;
|
|
struct mt7615_fw_txp *txp = txp_ptr;
|
|
int nbuf = tx_info->nbuf - 1;
|
|
int i;
|
|
|
|
for (i = 0; i < nbuf; i++) {
|
|
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
|
|
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
|
|
}
|
|
txp->nbuf = nbuf;
|
|
|
|
/* pass partial skb header to fw */
|
|
tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
|
|
tx_info->buf[1].len = MT_CT_PARSE_LEN;
|
|
tx_info->buf[1].skip_unmap = true;
|
|
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
|
|
|
|
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
|
|
|
|
if (!key)
|
|
txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
|
|
|
|
if (ieee80211_is_mgmt(hdr->frame_control))
|
|
txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
|
|
|
|
if (vif) {
|
|
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
|
|
|
txp->bss_idx = mvif->idx;
|
|
}
|
|
|
|
txp->token = cpu_to_le16(id);
|
|
txp->rept_wds_wcid = 0xff;
|
|
}
|
|
|
|
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
|
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
|
struct ieee80211_sta *sta,
|
|
struct mt76_tx_info *tx_info)
|
|
{
|
|
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
|
struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
|
|
struct ieee80211_key_conf *key = info->control.hw_key;
|
|
int pid, id;
|
|
u8 *txwi = (u8 *)txwi_ptr;
|
|
struct mt76_txwi_cache *t;
|
|
void *txp;
|
|
|
|
if (!wcid)
|
|
wcid = &dev->mt76.global_wcid;
|
|
|
|
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
|
|
struct mt7615_phy *phy = &dev->phy;
|
|
|
|
if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
|
|
phy = mdev->phy2->priv;
|
|
|
|
spin_lock_bh(&dev->mt76.lock);
|
|
mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
|
|
msta->rates);
|
|
spin_unlock_bh(&dev->mt76.lock);
|
|
}
|
|
|
|
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
|
|
t->skb = tx_info->skb;
|
|
|
|
spin_lock_bh(&dev->token_lock);
|
|
id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
|
|
spin_unlock_bh(&dev->token_lock);
|
|
if (id < 0)
|
|
return id;
|
|
|
|
mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
|
|
pid, key, false);
|
|
|
|
txp = txwi + MT_TXD_SIZE;
|
|
memset(txp, 0, sizeof(struct mt7615_txp_common));
|
|
if (is_mt7615(&dev->mt76))
|
|
mt7615_write_fw_txp(dev, tx_info, txp, id);
|
|
else
|
|
mt7615_write_hw_txp(dev, tx_info, txp, id);
|
|
|
|
tx_info->skb = DMA_DUMMY_DATA;
|
|
|
|
return 0;
|
|
}
|