forked from Minki/linux
iwlwifi: mvm: support aggregations on A000 HW
On A000 HW, the SCD rdptr has only 8 bits allocated for it, thus when checking if a queue is full, or when checking if the SSN is equal to the TID's next_reclaimed, A000 HW should trim the SSN. Fix this by "normalizing" the SSN to wrap around 0xFF when comparing to the next_reclaimed on A000 HW. Signed-off-by: Liad Kaufman <liad.kaufman@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
This commit is contained in:
parent
87afe9b0f4
commit
dd32162da4
@ -2395,7 +2395,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
||||
|
||||
__set_bit(tid_data->txq_id, &txqs);
|
||||
|
||||
if (iwl_mvm_tid_queued(tid_data) == 0)
|
||||
if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
|
||||
continue;
|
||||
|
||||
__set_bit(tid, &tids);
|
||||
|
@ -1323,7 +1323,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
|
||||
* for offloading in order to prevent reuse of the same
|
||||
* qos seq counters.
|
||||
*/
|
||||
if (iwl_mvm_tid_queued(tid_data))
|
||||
if (iwl_mvm_tid_queued(mvm, tid_data))
|
||||
continue;
|
||||
|
||||
if (tid_data->state != IWL_AGG_OFF)
|
||||
|
@ -2529,6 +2529,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_tid_data *tid_data;
|
||||
u16 normalized_ssn;
|
||||
int txq_id;
|
||||
int ret;
|
||||
|
||||
@ -2616,7 +2617,15 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
mvmsta->sta_id, tid, txq_id, tid_data->ssn,
|
||||
tid_data->next_reclaimed);
|
||||
|
||||
if (tid_data->ssn == tid_data->next_reclaimed) {
|
||||
/*
|
||||
* In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
|
||||
* to align the wrap around of ssn so we compare relevant values.
|
||||
*/
|
||||
normalized_ssn = tid_data->ssn;
|
||||
if (mvm->trans->cfg->gen2)
|
||||
normalized_ssn &= 0xff;
|
||||
|
||||
if (normalized_ssn == tid_data->next_reclaimed) {
|
||||
tid_data->state = IWL_AGG_STARTING;
|
||||
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
} else {
|
||||
@ -3540,7 +3549,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
||||
return;
|
||||
}
|
||||
|
||||
n_queued = iwl_mvm_tid_queued(tid_data);
|
||||
n_queued = iwl_mvm_tid_queued(mvm, tid_data);
|
||||
if (n_queued > remaining) {
|
||||
more_data = true;
|
||||
remaining = 0;
|
||||
@ -3722,3 +3731,17 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
|
||||
{
|
||||
u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
|
||||
|
||||
/*
|
||||
* In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
|
||||
* to align the wrap around of ssn so we compare relevant values.
|
||||
*/
|
||||
if (mvm->trans->cfg->gen2)
|
||||
sn &= 0xff;
|
||||
|
||||
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
|
||||
}
|
||||
|
@ -341,12 +341,6 @@ struct iwl_mvm_tid_data {
|
||||
bool is_tid_active;
|
||||
};
|
||||
|
||||
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
|
||||
{
|
||||
return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
|
||||
tid_data->next_reclaimed);
|
||||
}
|
||||
|
||||
struct iwl_mvm_key_pn {
|
||||
struct rcu_head rcu_head;
|
||||
struct {
|
||||
@ -447,6 +441,8 @@ struct iwl_mvm_sta {
|
||||
u8 avg_energy;
|
||||
};
|
||||
|
||||
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
|
||||
|
||||
static inline struct iwl_mvm_sta *
|
||||
iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
|
||||
{
|
||||
|
@ -1129,13 +1129,14 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
|
||||
struct ieee80211_vif *vif = mvmsta->vif;
|
||||
u16 normalized_ssn;
|
||||
|
||||
lockdep_assert_held(&mvmsta->lock);
|
||||
|
||||
if ((tid_data->state == IWL_AGG_ON ||
|
||||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
|
||||
iwl_mvm_is_dqa_supported(mvm)) &&
|
||||
iwl_mvm_tid_queued(tid_data) == 0) {
|
||||
iwl_mvm_tid_queued(mvm, tid_data) == 0) {
|
||||
/*
|
||||
* Now that this aggregation or DQA queue is empty tell
|
||||
* mac80211 so it knows we no longer have frames buffered for
|
||||
@ -1144,7 +1145,15 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
|
||||
ieee80211_sta_set_buffered(sta, tid, false);
|
||||
}
|
||||
|
||||
if (tid_data->ssn != tid_data->next_reclaimed)
|
||||
/*
|
||||
* In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
|
||||
* to align the wrap around of ssn so we compare relevant values.
|
||||
*/
|
||||
normalized_ssn = tid_data->ssn;
|
||||
if (mvm->trans->cfg->gen2)
|
||||
normalized_ssn &= 0xff;
|
||||
|
||||
if (normalized_ssn != tid_data->next_reclaimed)
|
||||
return;
|
||||
|
||||
switch (tid_data->state) {
|
||||
@ -1488,7 +1497,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
if (mvmsta->sleep_tx_count) {
|
||||
mvmsta->sleep_tx_count--;
|
||||
if (mvmsta->sleep_tx_count &&
|
||||
!iwl_mvm_tid_queued(tid_data)) {
|
||||
!iwl_mvm_tid_queued(mvm, tid_data)) {
|
||||
/*
|
||||
* The number of frames in the queue
|
||||
* dropped to 0 even if we sent less
|
||||
|
@ -1181,7 +1181,7 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
||||
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
|
||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
/* If some TFDs are still queued - don't mark TID as inactive */
|
||||
if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid]))
|
||||
if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
|
||||
tid_bitmap &= ~BIT(tid);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user