mt76: move token utilities in mt76 common module

Move token management in mt76 common module since it is shared between
mt7615, mt7915 and mt7921 drivers

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Lorenzo Bianconi 2021-04-20 23:05:32 +02:00 committed by Felix Fietkau
parent b17aff3368
commit d089692bc7
14 changed files with 132 additions and 113 deletions

View File

@ -17,12 +17,14 @@
#include "util.h"
#include "testmode.h"
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
#define MT_SKB_HEAD_LEN 128
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
#define MT_SKB_HEAD_LEN 128
#define MT_MAX_NON_AQL_PKT 16
#define MT_TXQ_FREE_THR 32
#define MT_MAX_NON_AQL_PKT 16
#define MT_TXQ_FREE_THR 32
#define MT76_TOKEN_FREE_THR 64
struct mt76_dev;
struct mt76_phy;
@ -332,6 +334,7 @@ struct mt76_driver_ops {
u32 drv_flags;
u32 survey_flags;
u16 txwi_size;
u16 token_size;
u8 mcs_rates;
void (*update_survey)(struct mt76_dev *dev);
@ -1215,4 +1218,41 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_power_limits *dest,
s8 target_power);
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
void mt76_token_init(struct mt76_dev *dev);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
spin_lock_bh(&dev->token_lock);
__mt76_set_tx_blocked(dev, blocked);
spin_unlock_bh(&dev->token_lock);
}
static inline int
mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
{
int token;
spin_lock_bh(&dev->token_lock);
token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
GFP_ATOMIC);
spin_unlock_bh(&dev->token_lock);
return token;
}
static inline struct mt76_txwi_cache *
mt76_token_put(struct mt76_dev *dev, int token)
{
struct mt76_txwi_cache *txwi;
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
return txwi;
}
#endif

View File

@ -1465,11 +1465,7 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
u8 wcid;
trace_mac_tx_free(dev, token);
spin_lock_bh(&mdev->token_lock);
txwi = idr_remove(&mdev->token, token);
spin_unlock_bh(&mdev->token_lock);
txwi = mt76_token_put(mdev, token);
if (!txwi)
return;

View File

@ -190,6 +190,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
.rx_skb = mt7615_queue_rx_skb,

View File

@ -40,8 +40,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->mcu_work, mt7615_pci_init_work);
spin_lock_init(&dev->mt76.token_lock);
idr_init(&dev->mt76.token);
mt76_token_init(&dev->mt76);
ret = mt7615_eeprom_init(dev, addr);
if (ret < 0)

View File

@ -37,9 +37,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
token = le16_to_cpu(txp->hw.msdu_id[0]) &
~MT_MSDU_ID_VALID;
spin_lock_bh(&mdev->token_lock);
t = idr_remove(&mdev->token, token);
spin_unlock_bh(&mdev->token_lock);
t = mt76_token_put(mdev, token);
e->skb = t ? t->skb : NULL;
}
@ -161,9 +159,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
spin_lock_bh(&mdev->token_lock);
id = idr_alloc(&mdev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
spin_unlock_bh(&mdev->token_lock);
id = mt76_token_get(mdev, &t);
if (id < 0)
return id;

View File

@ -351,8 +351,7 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
spin_lock_init(&dev->mt76.token_lock);
idr_init(&dev->mt76.token);
mt76_token_init(&dev->mt76);
dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));

View File

@ -974,26 +974,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
}
static void
mt7915_set_tx_blocked(struct mt7915_dev *dev, bool blocked)
{
struct mt76_phy *mphy = &dev->mphy, *mphy2 = dev->mt76.phy2;
struct mt76_queue *q, *q2 = NULL;
q = mphy->q_tx[0];
if (blocked == q->blocked)
return;
q->blocked = blocked;
if (mphy2) {
q2 = mphy2->q_tx[0];
q2->blocked = blocked;
}
if (!blocked)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@ -1046,15 +1026,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
spin_lock_bh(&mdev->token_lock);
id = idr_alloc(&mdev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC);
if (id >= 0)
mdev->token_count++;
if (mdev->token_count >= MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR)
mt7915_set_tx_blocked(dev, true);
spin_unlock_bh(&mdev->token_lock);
id = mt76_token_consume(mdev, &t);
if (id < 0)
return id;
@ -1218,15 +1190,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
spin_lock_bh(&mdev->token_lock);
txwi = idr_remove(&mdev->token, msdu);
if (txwi)
mdev->token_count--;
if (mdev->token_count < MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR &&
dev->mphy.q_tx[0]->blocked)
wake = true;
spin_unlock_bh(&mdev->token_lock);
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
@ -1256,11 +1220,8 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_mac_sta_poll(dev);
if (wake) {
spin_lock_bh(&mdev->token_lock);
mt7915_set_tx_blocked(dev, false);
spin_unlock_bh(&mdev->token_lock);
}
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
mt76_worker_schedule(&dev->mt76.tx_worker);
@ -1289,10 +1250,7 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
struct mt7915_txp *txp;
txp = mt7915_txwi_to_txp(mdev, e->txwi);
spin_lock_bh(&mdev->token_lock);
t = idr_remove(&mdev->token, le16_to_cpu(txp->token));
spin_unlock_bh(&mdev->token_lock);
t = mt76_token_put(mdev, le16_to_cpu(txp->token));
e->skb = t ? t->skb : NULL;
}

View File

@ -32,7 +32,6 @@
#define MT7915_EEPROM_SIZE 3584
#define MT7915_TOKEN_SIZE 8192
#define MT7915_TOKEN_FREE_THR 64
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */

View File

@ -212,6 +212,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7915_TOKEN_SIZE,
.tx_prepare_skb = mt7915_tx_prepare_skb,
.tx_complete_skb = mt7915_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,

View File

@ -170,9 +170,7 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
{
int ret, idx;
spin_lock_init(&dev->mt76.token_lock);
idr_init(&dev->mt76.token);
mt76_token_init(&dev->mt76);
ret = mt7921_dma_init(dev);
if (ret)
return ret;

View File

@ -785,20 +785,6 @@ mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
}
}
static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked)
{
struct mt76_phy *mphy = &dev->mphy;
struct mt76_queue *q;
q = mphy->q_tx[0];
if (blocked == q->blocked)
return;
q->blocked = blocked;
if (!blocked)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@ -824,15 +810,7 @@ int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
spin_lock_bh(&mdev->token_lock);
id = idr_alloc(&mdev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC);
if (id >= 0)
mdev->token_count++;
if (mdev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR)
mt7921_set_tx_blocked(dev, true);
spin_unlock_bh(&mdev->token_lock);
id = mt76_token_consume(mdev, &t);
if (id < 0)
return id;
@ -994,15 +972,7 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
spin_lock_bh(&mdev->token_lock);
txwi = idr_remove(&mdev->token, msdu);
if (txwi)
mdev->token_count--;
if (mdev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR &&
dev->mphy.q_tx[0]->blocked)
wake = true;
spin_unlock_bh(&mdev->token_lock);
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
@ -1030,11 +1000,8 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_put_txwi(mdev, txwi);
}
if (wake) {
spin_lock_bh(&mdev->token_lock);
mt7921_set_tx_blocked(dev, false);
spin_unlock_bh(&mdev->token_lock);
}
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
napi_consume_skb(skb, 1);
@ -1065,11 +1032,8 @@ void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
u16 token;
txp = mt7921_txwi_to_txp(mdev, e->txwi);
token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
spin_lock_bh(&mdev->token_lock);
t = idr_remove(&mdev->token, token);
spin_unlock_bh(&mdev->token_lock);
t = mt76_token_put(mdev, token);
e->skb = t ? t->skb : NULL;
}

View File

@ -35,7 +35,6 @@
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
#define MT7921_TOKEN_FREE_THR 64
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */

View File

@ -99,6 +99,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921_tx_prepare_skb,
.tx_complete_skb = mt7921_tx_complete_skb,
.rx_skb = mt7921_queue_rx_skb,

View File

@ -648,3 +648,71 @@ void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
void mt76_token_init(struct mt76_dev *dev)
{
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
}
EXPORT_SYMBOL_GPL(mt76_token_init);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
struct mt76_queue *q, *q2 = NULL;
q = phy->q_tx[0];
if (blocked == q->blocked)
return;
q->blocked = blocked;
if (phy2) {
q2 = phy2->q_tx[0];
q2->blocked = blocked;
}
if (!blocked)
mt76_worker_schedule(&dev->tx_worker);
}
EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
{
int token;
spin_lock_bh(&dev->token_lock);
token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
GFP_ATOMIC);
if (token >= 0)
dev->token_count++;
if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
__mt76_set_tx_blocked(dev, true);
spin_unlock_bh(&dev->token_lock);
return token;
}
EXPORT_SYMBOL_GPL(mt76_token_consume);
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
{
struct mt76_txwi_cache *txwi;
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, token);
if (txwi)
dev->token_count--;
if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
dev->phy.q_tx[0]->blocked)
*wake = true;
spin_unlock_bh(&dev->token_lock);
return txwi;
}
EXPORT_SYMBOL_GPL(mt76_token_release);