From 258afa78661178d16288537ffe8ef863c7e5918a Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Mon, 8 Feb 2021 12:33:56 +0100 Subject: [PATCH 01/13] cfg80211: remove unused callback The ieee80211 class registers a callback which actually does nothing. Given that the callback is optional, and all its accesses are protected by a NULL check, remove it entirely. Signed-off-by: Matteo Croce Link: https://lore.kernel.org/r/20210208113356.4105-1-mcroce@linux.microsoft.com Signed-off-by: Johannes Berg --- net/wireless/sysfs.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 043762354a66..9b959e3b09c6 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -82,12 +82,6 @@ static void wiphy_dev_release(struct device *dev) cfg80211_dev_free(rdev); } -static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - /* TODO, we probably need stuff here */ - return 0; -} - #ifdef CONFIG_PM_SLEEP static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) { @@ -162,7 +156,6 @@ struct class ieee80211_class = { .owner = THIS_MODULE, .dev_release = wiphy_dev_release, .dev_groups = ieee80211_groups, - .dev_uevent = wiphy_uevent, .pm = WIPHY_PM_OPS, .ns_type = &net_ns_type_operations, .namespace = wiphy_namespace, From 10cb8e617560fc050a759a897a2dde07a5fe04cb Mon Sep 17 00:00:00 2001 From: Markus Theil Date: Sat, 6 Feb 2021 12:51:12 +0100 Subject: [PATCH 02/13] mac80211: enable QoS support for nl80211 ctrl port This patch unifies sending control port frames over nl80211 and AF_PACKET sockets a little more. Before this patch, EAPOL frames got QoS prioritization only when using AF_PACKET sockets. __ieee80211_select_queue only selects a QoS-enabled queue for control port frames, when the control port protocol is set correctly on the skb. For the AF_PACKET path this works, but the nl80211 path used ETH_P_802_3. Another check for injected frames in wme.c then prevented the QoS TID to be copied in the frame. In order to fix this, get rid of the frame injection marking for nl80211 ctrl port and set the correct ethernet protocol. Please note: An erlier version of this path tried to prevent frame aggregation for control port frames in order to speed up the initial connection setup a little. This seemed to cause issues on my older Intel dvm-based hardware, and was therefore removed again. Future commits which try to reintroduce this have to check carefully how hw behaves with aggregated and non-aggregated traffic for the same TID. My NIC: Intel(R) Centrino(R) Ultimate-N 6300 AGN, REV=0x74 Reported-by: kernel test robot Signed-off-by: Markus Theil Link: https://lore.kernel.org/r/20210206115112.567881-1-markus.theil@tu-ilmenau.de Signed-off-by: Johannes Berg --- net/mac80211/status.c | 8 ++------ net/mac80211/tx.c | 25 +++++++++++++++++++------ 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 3485610755ef..9baf185ee4c7 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -628,16 +628,12 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie; struct ieee80211_sub_if_data *sdata; struct ieee80211_hdr *hdr = (void *)skb->data; - __be16 ethertype = 0; - - if (skb->len >= ETH_HLEN && skb->protocol == cpu_to_be16(ETH_P_802_3)) - skb_copy_bits(skb, 2 * ETH_ALEN, ðertype, ETH_TLEN); rcu_read_lock(); sdata = ieee80211_sdata_from_skb(local, skb); if (sdata) { - if (ethertype == sdata->control_port_protocol || - ethertype == cpu_to_be16(ETH_P_PREAUTH)) + if (skb->protocol == sdata->control_port_protocol || + skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) cfg80211_control_port_tx_status(&sdata->wdev, cookie, skb->data, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index d626e6808bef..6ec415f043e1 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1182,9 +1182,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, tx->sta = rcu_dereference(sdata->u.vlan.sta); if (!tx->sta && sdata->wdev.use_4addr) return TX_DROP; - } else if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX | - IEEE80211_TX_CTL_INJECTED) || - tx->sdata->control_port_protocol == tx->skb->protocol) { + } else if (tx->sdata->control_port_protocol == tx->skb->protocol) { tx->sta = sta_info_get_bss(sdata, hdr->addr1); } if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) @@ -5404,6 +5402,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; + struct sta_info *sta; struct sk_buff *skb; struct ethhdr *ehdr; u32 ctrl_flags = 0; @@ -5426,8 +5425,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, if (cookie) ctrl_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX | - IEEE80211_TX_CTL_INJECTED; + flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(struct ethhdr) + len); @@ -5444,10 +5442,25 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, ehdr->h_proto = proto; skb->dev = dev; - skb->protocol = htons(ETH_P_802_3); + skb->protocol = proto; skb_reset_network_header(skb); skb_reset_mac_header(skb); + /* update QoS header to prioritize control port frames if possible, + * priorization also happens for control port frames send over + * AF_PACKET + */ + rcu_read_lock(); + + if (ieee80211_lookup_ra_sta(sdata, skb, &sta) == 0 && !IS_ERR(sta)) { + u16 queue = __ieee80211_select_queue(sdata, sta, skb); + + skb_set_queue_mapping(skb, queue); + skb_get_hash(skb); + } + + rcu_read_unlock(); + /* mutex lock is only needed for incrementing the cookie counter */ mutex_lock(&local->mtx); From 6194f7e6473be78acdc5d03edd116944bdbb2c4e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 5 Feb 2021 17:53:52 +0000 Subject: [PATCH 03/13] mac80211: fix potential overflow when multiplying to u32 integers The multiplication of the u32 variables tx_time and estimated_retx is performed using a 32 bit multiplication and the result is stored in a u64 result. This has a potential u32 overflow issue, so avoid this by casting tx_time to a u64 to force a 64 bit multiply. Addresses-Coverity: ("Unintentional integer overflow") Fixes: 050ac52cbe1f ("mac80211: code for on-demand Hybrid Wireless Mesh Protocol") Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20210205175352.208841-1-colin.king@canonical.com Signed-off-by: Johannes Berg --- net/mac80211/mesh_hwmp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 313eee12410e..3db514c4c63a 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -356,7 +356,7 @@ u32 airtime_link_metric_get(struct ieee80211_local *local, */ tx_time = (device_constant + 10 * test_frame_len / rate); estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); - result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT); + result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT); return (u32)result; } From 9e6d51265b0aba145ab03b30dcdf3b88902e71f0 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 4 Feb 2021 15:44:39 +0200 Subject: [PATCH 04/13] cfg80211: initialize reg_rule in __freq_reg_info() Sparse started warning on this function because we can potentially return an uninitialized value. The reason is that if the caller passes a min_bw value that is higher then the last value in bws[], we will not go into the loop and reg_rule will remain initialized. This cannot happen because the only caller of this function uses either 1 or 20 in min_bw, but the function will be more robust if we pre-initialize the value. Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20210204154439.6c884ea7281c.I257278d03b0c1ae0aa6631672cfa48f1a95d5996@changeid Signed-off-by: Johannes Berg --- net/wireless/reg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 452b698f42be..21536c48deec 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1629,7 +1629,7 @@ __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw) { const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy); static const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20}; - const struct ieee80211_reg_rule *reg_rule; + const struct ieee80211_reg_rule *reg_rule = ERR_PTR(-ERANGE); int i = ARRAY_SIZE(bws) - 1; u32 bw; From a42fa256f66c425021038f40d9255d377a2d1a8d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 27 Jan 2021 06:57:30 +0100 Subject: [PATCH 05/13] mac80211: minstrel_ht: use bitfields to encode rate indexes Get rid of a lot of divisions and modulo operations Reduces code size and improves performance Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20210127055735.78599-1-nbd@nbd.name Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 126 +++++++++++---------- net/mac80211/rc80211_minstrel_ht.h | 13 +++ net/mac80211/rc80211_minstrel_ht_debugfs.c | 4 +- 3 files changed, 79 insertions(+), 64 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index bfa550068de4..8974c3510489 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -379,13 +379,13 @@ out: static inline struct minstrel_rate_stats * minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) { - return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES]; + return &mi->groups[MI_RATE_GROUP(index)].rates[MI_RATE_IDX(index)]; } static inline int minstrel_get_duration(int index) { - const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; - unsigned int duration = group->duration[index % MCS_GROUP_RATES]; + const struct mcs_group *group = &minstrel_mcs_groups[MI_RATE_GROUP(index)]; + unsigned int duration = group->duration[MI_RATE_IDX(index)]; return duration << group->shift; } @@ -398,7 +398,7 @@ minstrel_ht_avg_ampdu_len(struct minstrel_ht_sta *mi) if (mi->avg_ampdu_len) return MINSTREL_TRUNC(mi->avg_ampdu_len); - if (minstrel_ht_is_legacy_group(mi->max_tp_rate[0] / MCS_GROUP_RATES)) + if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_tp_rate[0]))) return 1; duration = minstrel_get_duration(mi->max_tp_rate[0]); @@ -465,14 +465,14 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index, int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob; int j = MAX_THR_RATES; - cur_group = index / MCS_GROUP_RATES; - cur_idx = index % MCS_GROUP_RATES; + cur_group = MI_RATE_GROUP(index); + cur_idx = MI_RATE_IDX(index); cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg; cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob); do { - tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; - tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; + tmp_group = MI_RATE_GROUP(tp_list[j - 1]); + tmp_idx = MI_RATE_IDX(tp_list[j - 1]); tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg; tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); @@ -504,23 +504,23 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 *dest, u16 index) int max_gpr_group, max_gpr_idx; int max_gpr_tp_avg, max_gpr_prob; - cur_group = index / MCS_GROUP_RATES; - cur_idx = index % MCS_GROUP_RATES; - mg = &mi->groups[index / MCS_GROUP_RATES]; - mrs = &mg->rates[index % MCS_GROUP_RATES]; + cur_group = MI_RATE_GROUP(index); + cur_idx = MI_RATE_IDX(index); + mg = &mi->groups[cur_group]; + mrs = &mg->rates[cur_idx]; - tmp_group = *dest / MCS_GROUP_RATES; - tmp_idx = *dest % MCS_GROUP_RATES; + tmp_group = MI_RATE_GROUP(*dest); + tmp_idx = MI_RATE_IDX(*dest); tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg; tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */ - max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES; - max_tp_idx = mi->max_tp_rate[0] % MCS_GROUP_RATES; + max_tp_group = MI_RATE_GROUP(mi->max_tp_rate[0]); + max_tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]); max_tp_prob = mi->groups[max_tp_group].rates[max_tp_idx].prob_avg; - if (minstrel_ht_is_legacy_group(index / MCS_GROUP_RATES) && + if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index)) && !minstrel_ht_is_legacy_group(max_tp_group)) return; @@ -529,8 +529,8 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 *dest, u16 index) mrs->prob_avg < max_tp_prob) return; - max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES; - max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; + max_gpr_group = MI_RATE_GROUP(mg->max_group_prob_rate); + max_gpr_idx = MI_RATE_IDX(mg->max_group_prob_rate); max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg; if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) { @@ -567,13 +567,13 @@ minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi, unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob; int i; - tmp_group = tmp_legacy_tp_rate[0] / MCS_GROUP_RATES; - tmp_idx = tmp_legacy_tp_rate[0] % MCS_GROUP_RATES; + tmp_group = MI_RATE_GROUP(tmp_legacy_tp_rate[0]); + tmp_idx = MI_RATE_IDX(tmp_legacy_tp_rate[0]); tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg; tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); - tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES; - tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES; + tmp_group = MI_RATE_GROUP(tmp_mcs_tp_rate[0]); + tmp_idx = MI_RATE_IDX(tmp_mcs_tp_rate[0]); tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg; tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob); @@ -600,14 +600,14 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) if (!mi->sta->ht_cap.ht_supported) return; - tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / - MCS_GROUP_RATES].streams; + group = MI_RATE_GROUP(mi->max_tp_rate[0]); + tmp_max_streams = minstrel_mcs_groups[group].streams; for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { mg = &mi->groups[group]; if (!mi->supported[group] || group == MINSTREL_CCK_GROUP) continue; - tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; + tmp_idx = MI_RATE_IDX(mg->max_group_prob_rate); tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg; if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) && @@ -644,8 +644,8 @@ minstrel_ht_find_probe_rates(struct minstrel_ht_sta *mi, u16 *rates, int *n_rate int i, g, max_dur; int tp_idx; - tp_group = &minstrel_mcs_groups[mi->max_tp_rate[0] / MCS_GROUP_RATES]; - tp_idx = mi->max_tp_rate[0] % MCS_GROUP_RATES; + tp_group = &minstrel_mcs_groups[MI_RATE_GROUP(mi->max_tp_rate[0])]; + tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]); max_dur = minstrel_get_duration(mi->max_tp_rate[0]); if (faster_rate) @@ -670,7 +670,7 @@ minstrel_ht_find_probe_rates(struct minstrel_ht_sta *mi, u16 *rates, int *n_rate if ((group->duration[i] << group->shift) > max_dur) continue; - idx = g * MCS_GROUP_RATES + i; + idx = MI_RATE(g, i); if (idx == mi->max_tp_rate[0]) continue; @@ -712,10 +712,10 @@ minstrel_ht_rate_sample_switch(struct minstrel_priv *mp, /* If no suitable rate was found, try to pick the next one in the group */ if (!n_rates) { - int g_idx = mi->max_tp_rate[0] / MCS_GROUP_RATES; + int g_idx = MI_RATE_GROUP(mi->max_tp_rate[0]); u16 supported = mi->supported[g_idx]; - supported >>= mi->max_tp_rate[0] % MCS_GROUP_RATES; + supported >>= MI_RATE_IDX(mi->max_tp_rate[0]); for (i = 0; supported; supported >>= 1, i++) { if (!(supported & 1)) continue; @@ -854,24 +854,27 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, mi->sample_slow = 0; mi->sample_count = 0; - memset(tmp_mcs_tp_rate, 0, sizeof(tmp_mcs_tp_rate)); - memset(tmp_legacy_tp_rate, 0, sizeof(tmp_legacy_tp_rate)); if (mi->supported[MINSTREL_CCK_GROUP]) - for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++) - tmp_legacy_tp_rate[j] = MINSTREL_CCK_GROUP * MCS_GROUP_RATES; + group = MINSTREL_CCK_GROUP; else if (mi->supported[MINSTREL_OFDM_GROUP]) - for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++) - tmp_legacy_tp_rate[j] = MINSTREL_OFDM_GROUP * MCS_GROUP_RATES; + group = MINSTREL_OFDM_GROUP; + else + group = 0; + + index = MI_RATE(group, 0); + for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++) + tmp_legacy_tp_rate[j] = index; if (mi->supported[MINSTREL_VHT_GROUP_0]) - index = MINSTREL_VHT_GROUP_0 * MCS_GROUP_RATES; + group = MINSTREL_VHT_GROUP_0; else if (ht_supported) - index = MINSTREL_HT_GROUP_0 * MCS_GROUP_RATES; + group = MINSTREL_HT_GROUP_0; else if (mi->supported[MINSTREL_CCK_GROUP]) - index = MINSTREL_CCK_GROUP * MCS_GROUP_RATES; + group = MINSTREL_CCK_GROUP; else - index = MINSTREL_OFDM_GROUP * MCS_GROUP_RATES; + group = MINSTREL_OFDM_GROUP; + index = MI_RATE(group, 0); tmp_max_prob_rate = index; for (j = 0; j < ARRAY_SIZE(tmp_mcs_tp_rate); j++) tmp_mcs_tp_rate[j] = index; @@ -888,7 +891,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, /* (re)Initialize group rate indexes */ for(j = 0; j < MAX_THR_RATES; j++) - tmp_group_tp_rate[j] = MCS_GROUP_RATES * group; + tmp_group_tp_rate[j] = MI_RATE(group, 0); if (group == MINSTREL_CCK_GROUP && ht_supported) tp_rate = tmp_legacy_tp_rate; @@ -897,7 +900,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, if (!(mi->supported[group] & BIT(i))) continue; - index = MCS_GROUP_RATES * group + i; + index = MI_RATE(group, i); mrs = &mg->rates[i]; mrs->retry_updated = false; @@ -929,13 +932,13 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, continue; mg = &mi->groups[group]; - mg->max_group_prob_rate = MCS_GROUP_RATES * group; + mg->max_group_prob_rate = MI_RATE(group, 0); for (i = 0; i < MCS_GROUP_RATES; i++) { if (!(mi->supported[group] & BIT(i))) continue; - index = MCS_GROUP_RATES * group + i; + index = MI_RATE(group, i); /* Find max probability rate per group and global */ minstrel_ht_set_best_prob_rate(mi, &tmp_max_prob_rate, @@ -1022,7 +1025,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary) { int group, orig_group; - orig_group = group = *idx / MCS_GROUP_RATES; + orig_group = group = MI_RATE_GROUP(*idx); while (group > 0) { group--; @@ -1206,7 +1209,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, ctime += (t_slot * cw) >> 1; cw = min((cw << 1) | 1, mp->cw_max); - if (minstrel_ht_is_legacy_group(index / MCS_GROUP_RATES)) { + if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index))) { overhead = mi->overhead_legacy; overhead_rtscts = mi->overhead_legacy_rtscts; } else { @@ -1239,7 +1242,7 @@ static void minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, struct ieee80211_sta_rates *ratetbl, int offset, int index) { - int group_idx = index / MCS_GROUP_RATES; + int group_idx = MI_RATE_GROUP(index); const struct mcs_group *group = &minstrel_mcs_groups[group_idx]; struct minstrel_rate_stats *mrs; u8 idx; @@ -1259,7 +1262,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts; } - index %= MCS_GROUP_RATES; + index = MI_RATE_IDX(index); if (group_idx == MINSTREL_CCK_GROUP) idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)]; else if (group_idx == MINSTREL_OFDM_GROUP) @@ -1289,17 +1292,17 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, static inline int minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate) { - int group = rate / MCS_GROUP_RATES; - rate %= MCS_GROUP_RATES; + int group = MI_RATE_GROUP(rate); + rate = MI_RATE_IDX(rate); return mi->groups[group].rates[rate].prob_avg; } static int minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi) { - int group = mi->max_prob_rate / MCS_GROUP_RATES; + int group = MI_RATE_GROUP(mi->max_prob_rate); const struct mcs_group *g = &minstrel_mcs_groups[group]; - int rate = mi->max_prob_rate % MCS_GROUP_RATES; + int rate = MI_RATE_IDX(mi->max_prob_rate); unsigned int duration; /* Disable A-MSDU if max_prob_rate is bad */ @@ -1405,7 +1408,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) return -1; mrs = &mg->rates[sample_idx]; - sample_idx += sample_group * MCS_GROUP_RATES; + sample_idx += MI_RATE(sample_group, 0); tp_rate1 = mi->max_tp_rate[0]; @@ -1455,8 +1458,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) * if the link is working perfectly. */ - cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 / - MCS_GROUP_RATES].streams; + cur_max_tp_streams = minstrel_mcs_groups[MI_RATE_GROUP(tp_rate1)].streams; if (sample_dur >= minstrel_get_duration(tp_rate2) && (cur_max_tp_streams - 1 < minstrel_mcs_groups[sample_group].streams || @@ -1484,7 +1486,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, int sample_idx; if (!(info->flags & IEEE80211_TX_CTL_AMPDU) && - !minstrel_ht_is_legacy_group(mi->max_prob_rate / MCS_GROUP_RATES)) + !minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_prob_rate))) minstrel_aggr_check(sta, txrc->skb); info->flags |= mi->tx_flags; @@ -1512,8 +1514,8 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, if (sample_idx < 0) return; - sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES]; - sample_idx %= MCS_GROUP_RATES; + sample_group = &minstrel_mcs_groups[MI_RATE_GROUP(sample_idx)]; + sample_idx = MI_RATE_IDX(sample_idx); if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] && (sample_idx >= 4) != txrc->short_preamble) @@ -1529,7 +1531,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, int idx = sample_idx % ARRAY_SIZE(mp->ofdm_rates[0]); rate->idx = mp->ofdm_rates[mi->band][idx]; } else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) { - ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES, + ieee80211_rate_set_vht(rate, MI_RATE_IDX(sample_idx), sample_group->streams); } else { rate->idx = sample_idx + (sample_group->streams - 1) * 8; @@ -1898,8 +1900,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) struct minstrel_ht_sta *mi = priv_sta; int i, j, prob, tp_avg; - i = mi->max_tp_rate[0] / MCS_GROUP_RATES; - j = mi->max_tp_rate[0] % MCS_GROUP_RATES; + i = MI_RATE_GROUP(mi->max_tp_rate[0]); + j = MI_RATE_IDX(mi->max_tp_rate[0]); prob = mi->groups[i].rates[j].prob_avg; /* convert tp_avg from pkt per second in kbps */ diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h index 7d6d0b720f6d..5912f7dc5267 100644 --- a/net/mac80211/rc80211_minstrel_ht.h +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -6,6 +6,8 @@ #ifndef __RC_MINSTREL_HT_H #define __RC_MINSTREL_HT_H +#include + /* number of highest throughput rates to consider*/ #define MAX_THR_RATES 4 #define SAMPLE_COLUMNS 10 /* number of columns in sample table */ @@ -57,6 +59,17 @@ #define MCS_GROUP_RATES 10 +#define MI_RATE_IDX_MASK GENMASK(3, 0) +#define MI_RATE_GROUP_MASK GENMASK(15, 4) + +#define MI_RATE(_group, _idx) \ + (FIELD_PREP(MI_RATE_GROUP_MASK, _group) | \ + FIELD_PREP(MI_RATE_IDX_MASK, _idx)) + +#define MI_RATE_IDX(_rate) FIELD_GET(MI_RATE_IDX_MASK, _rate) +#define MI_RATE_GROUP(_rate) FIELD_GET(MI_RATE_GROUP_MASK, _rate) + + struct minstrel_priv { struct ieee80211_hw *hw; bool has_mrr; diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index 3b7af242cde6..067d10383fa7 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -56,7 +56,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) for (j = 0; j < MCS_GROUP_RATES; j++) { struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; - int idx = i * MCS_GROUP_RATES + j; + int idx = MI_RATE(i, j); unsigned int duration; if (!(mi->supported[i] & BIT(j))) @@ -201,7 +201,7 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) for (j = 0; j < MCS_GROUP_RATES; j++) { struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; - int idx = i * MCS_GROUP_RATES + j; + int idx = MI_RATE(i, j); unsigned int duration; if (!(mi->supported[i] & BIT(j))) From 2012a2f7bcd2aa515430a75f1227471ab4ebd7df Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 27 Jan 2021 06:57:31 +0100 Subject: [PATCH 06/13] mac80211: minstrel_ht: update total packets counter in tx status path Keep the update in one place and prepare for further rework Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20210127055735.78599-2-nbd@nbd.name Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 8974c3510489..7846782840a9 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -1092,6 +1092,16 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, info->status.ampdu_len = 1; } + /* wraparound */ + if (mi->total_packets >= ~0 - info->status.ampdu_len) { + mi->total_packets = 0; + mi->sample_packets = 0; + } + + mi->total_packets += info->status.ampdu_len; + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + mi->sample_packets += info->status.ampdu_len; + mi->ampdu_packets++; mi->ampdu_len += info->status.ampdu_len; @@ -1103,9 +1113,6 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, mi->sample_count--; } - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - mi->sample_packets += info->status.ampdu_len; - if (mi->sample_mode != MINSTREL_SAMPLE_IDLE) rate_sample = minstrel_get_ratestats(mi, mi->sample_rate); @@ -1503,14 +1510,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, else sample_idx = minstrel_get_sample_rate(mp, mi); - mi->total_packets++; - - /* wraparound */ - if (mi->total_packets == ~0) { - mi->total_packets = 0; - mi->sample_packets = 0; - } - if (sample_idx < 0) return; From 7aece471a0e6e3cb84a89ce09de075c91f58d357 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 27 Jan 2021 06:57:32 +0100 Subject: [PATCH 07/13] mac80211: minstrel_ht: reduce the need to sample slower rates In order to more gracefully be able to fall back to lower rates without too much throughput fluctuations, initialize all untested rates below tested ones to the maximum probabilty of higher rates. Usually this leads to untested lower rates getting initialized with a probability value of 100%, making them better candidates for fallback without having to rely on random probing Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20210127055735.78599-3-nbd@nbd.name Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 18 ++++++++---------- net/mac80211/rc80211_minstrel_ht.h | 2 -- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 7846782840a9..4d4eb4aa46bd 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -791,14 +791,11 @@ minstrel_ht_calc_rate_stats(struct minstrel_priv *mp, unsigned int cur_prob; if (unlikely(mrs->attempts > 0)) { - mrs->sample_skipped = 0; cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts); minstrel_filter_avg_add(&mrs->prob_avg, &mrs->prob_avg_1, cur_prob); mrs->att_hist += mrs->attempts; mrs->succ_hist += mrs->success; - } else { - mrs->sample_skipped++; } mrs->last_success = mrs->success; @@ -851,7 +848,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, mi->ampdu_packets = 0; } - mi->sample_slow = 0; mi->sample_count = 0; if (mi->supported[MINSTREL_CCK_GROUP]) @@ -882,6 +878,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, /* Find best rate sets within all MCS groups*/ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { u16 *tp_rate = tmp_mcs_tp_rate; + u16 last_prob = 0; mg = &mi->groups[group]; if (!mi->supported[group]) @@ -896,7 +893,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, if (group == MINSTREL_CCK_GROUP && ht_supported) tp_rate = tmp_legacy_tp_rate; - for (i = 0; i < MCS_GROUP_RATES; i++) { + for (i = MCS_GROUP_RATES - 1; i >= 0; i--) { if (!(mi->supported[group] & BIT(i))) continue; @@ -905,6 +902,11 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, mrs = &mg->rates[i]; mrs->retry_updated = false; minstrel_ht_calc_rate_stats(mp, mrs); + + if (mrs->att_hist) + last_prob = max(last_prob, mrs->prob_avg); + else + mrs->prob_avg = max(last_prob, mrs->prob_avg); cur_prob = mrs->prob_avg; if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0) @@ -1469,13 +1471,9 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) if (sample_dur >= minstrel_get_duration(tp_rate2) && (cur_max_tp_streams - 1 < minstrel_mcs_groups[sample_group].streams || - sample_dur >= minstrel_get_duration(mi->max_prob_rate))) { - if (mrs->sample_skipped < 20) + sample_dur >= minstrel_get_duration(mi->max_prob_rate))) return -1; - if (mi->sample_slow++ > 2) - return -1; - } mi->sample_tries--; return sample_idx; diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h index 5912f7dc5267..ebb2b88f44d9 100644 --- a/net/mac80211/rc80211_minstrel_ht.h +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -123,7 +123,6 @@ struct minstrel_rate_stats { u8 retry_count; u8 retry_count_rtscts; - u8 sample_skipped; bool retry_updated; }; @@ -179,7 +178,6 @@ struct minstrel_ht_sta { u8 sample_wait; u8 sample_tries; u8 sample_count; - u8 sample_slow; enum minstrel_sample_mode sample_mode; u16 sample_rate; From 80d55154b2f8f5298f14fb83a0fb99cacb043c07 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 27 Jan 2021 06:57:33 +0100 Subject: [PATCH 08/13] mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 550 +++++++++++++++++------------ net/mac80211/rc80211_minstrel_ht.h | 25 +- 2 files changed, 336 insertions(+), 239 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 4d4eb4aa46bd..e29d96474efa 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -266,6 +266,14 @@ const struct mcs_group minstrel_mcs_groups[] = { const s16 minstrel_cck_bitrates[4] = { 10, 20, 55, 110 }; const s16 minstrel_ofdm_bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 }; static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly; +static const u8 minstrel_sample_seq[] = { + MINSTREL_SAMPLE_TYPE_INC, + MINSTREL_SAMPLE_TYPE_JUMP, + MINSTREL_SAMPLE_TYPE_INC, + MINSTREL_SAMPLE_TYPE_JUMP, + MINSTREL_SAMPLE_TYPE_INC, + MINSTREL_SAMPLE_TYPE_SLOW, +}; static void minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi); @@ -620,77 +628,31 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) } } -static bool -minstrel_ht_probe_group(struct minstrel_ht_sta *mi, const struct mcs_group *tp_group, - int tp_idx, const struct mcs_group *group) +static u16 +__minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi, + enum minstrel_sample_type type) { - if (group->bw < tp_group->bw) - return false; + u16 *rates = mi->sample[type].sample_rates; + u16 cur; + int i; - if (group->streams == tp_group->streams) - return true; - - if (tp_idx < 4 && group->streams == tp_group->streams - 1) - return true; - - return group->streams == tp_group->streams + 1; -} - -static void -minstrel_ht_find_probe_rates(struct minstrel_ht_sta *mi, u16 *rates, int *n_rates, - bool faster_rate) -{ - const struct mcs_group *group, *tp_group; - int i, g, max_dur; - int tp_idx; - - tp_group = &minstrel_mcs_groups[MI_RATE_GROUP(mi->max_tp_rate[0])]; - tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]); - - max_dur = minstrel_get_duration(mi->max_tp_rate[0]); - if (faster_rate) - max_dur -= max_dur / 16; - - for (g = 0; g < MINSTREL_GROUPS_NB; g++) { - u16 supported = mi->supported[g]; - - if (!supported) + for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) { + if (!rates[i]) continue; - group = &minstrel_mcs_groups[g]; - if (!minstrel_ht_probe_group(mi, tp_group, tp_idx, group)) - continue; - - for (i = 0; supported; supported >>= 1, i++) { - int idx; - - if (!(supported & 1)) - continue; - - if ((group->duration[i] << group->shift) > max_dur) - continue; - - idx = MI_RATE(g, i); - if (idx == mi->max_tp_rate[0]) - continue; - - rates[(*n_rates)++] = idx; - break; - } + cur = rates[i]; + rates[i] = 0; + return cur; } + + return 0; } static void minstrel_ht_rate_sample_switch(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { - struct minstrel_rate_stats *mrs; - u16 rates[MINSTREL_GROUPS_NB]; - int n_rates = 0; - int probe_rate = 0; - bool faster_rate; - int i; - u8 random; + u16 rate; /* * Use rate switching instead of probing packets for devices with @@ -699,43 +661,11 @@ minstrel_ht_rate_sample_switch(struct minstrel_priv *mp, if (mp->hw->max_rates > 1) return; - /* - * If the current EWMA prob is >75%, look for a rate that's 6.25% - * faster than the max tp rate. - * If that fails, look again for a rate that is at least as fast - */ - mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); - faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100); - minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate); - if (!n_rates && faster_rate) - minstrel_ht_find_probe_rates(mi, rates, &n_rates, false); - - /* If no suitable rate was found, try to pick the next one in the group */ - if (!n_rates) { - int g_idx = MI_RATE_GROUP(mi->max_tp_rate[0]); - u16 supported = mi->supported[g_idx]; - - supported >>= MI_RATE_IDX(mi->max_tp_rate[0]); - for (i = 0; supported; supported >>= 1, i++) { - if (!(supported & 1)) - continue; - - probe_rate = mi->max_tp_rate[0] + i; - goto out; - } - + rate = __minstrel_ht_get_sample_rate(mi, MINSTREL_SAMPLE_TYPE_INC); + if (!rate) return; - } - i = 0; - if (n_rates > 1) { - random = prandom_u32(); - i = random % n_rates; - } - probe_rate = rates[i]; - -out: - mi->sample_rate = probe_rate; + mi->sample_rate = rate; mi->sample_mode = MINSTREL_SAMPLE_ACTIVE; } @@ -804,6 +734,274 @@ minstrel_ht_calc_rate_stats(struct minstrel_priv *mp, mrs->attempts = 0; } +static bool +minstrel_ht_find_sample_rate(struct minstrel_ht_sta *mi, int type, int idx) +{ + int i; + + for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) { + u16 cur = mi->sample[type].sample_rates[i]; + + if (cur == idx) + return true; + + if (!cur) + break; + } + + return false; +} + +static int +minstrel_ht_move_sample_rates(struct minstrel_ht_sta *mi, int type, + u32 fast_rate_dur, u32 slow_rate_dur) +{ + u16 *rates = mi->sample[type].sample_rates; + int i, j; + + for (i = 0, j = 0; i < MINSTREL_SAMPLE_RATES; i++) { + u32 duration; + bool valid = false; + u16 cur; + + cur = rates[i]; + if (!cur) + continue; + + duration = minstrel_get_duration(cur); + switch (type) { + case MINSTREL_SAMPLE_TYPE_SLOW: + valid = duration > fast_rate_dur && + duration < slow_rate_dur; + break; + case MINSTREL_SAMPLE_TYPE_INC: + case MINSTREL_SAMPLE_TYPE_JUMP: + valid = duration < fast_rate_dur; + break; + default: + valid = false; + break; + } + + if (!valid) { + rates[i] = 0; + continue; + } + + if (i == j) + continue; + + rates[j++] = cur; + rates[i] = 0; + } + + return j; +} + +static int +minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group, + u32 max_duration) +{ + u16 supported = mi->supported[group]; + int i; + + for (i = 0; i < MCS_GROUP_RATES && supported; i++, supported >>= 1) { + if (!(supported & BIT(0))) + continue; + + if (minstrel_get_duration(MI_RATE(group, i)) >= max_duration) + continue; + + return i; + } + + return -1; +} + +/* + * Incremental update rates: + * Flip through groups and pick the first group rate that is faster than the + * highest currently selected rate + */ +static u16 +minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur) +{ + struct minstrel_mcs_group_data *mg; + u8 type = MINSTREL_SAMPLE_TYPE_INC; + int i, index = 0; + u8 group; + + group = mi->sample[type].sample_group; + for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { + group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups); + mg = &mi->groups[group]; + + index = minstrel_ht_group_min_rate_offset(mi, group, + fast_rate_dur); + if (index < 0) + continue; + + index = MI_RATE(group, index & 0xf); + if (!minstrel_ht_find_sample_rate(mi, type, index)) + goto out; + } + index = 0; + +out: + mi->sample[type].sample_group = group; + + return index; +} + +static int +minstrel_ht_next_group_sample_rate(struct minstrel_ht_sta *mi, int group, + u16 supported, int offset) +{ + struct minstrel_mcs_group_data *mg = &mi->groups[group]; + u16 idx; + int i; + + for (i = 0; i < MCS_GROUP_RATES; i++) { + idx = sample_table[mg->column][mg->index]; + if (++mg->index >= MCS_GROUP_RATES) { + mg->index = 0; + if (++mg->column >= ARRAY_SIZE(sample_table)) + mg->column = 0; + } + + if (idx < offset) + continue; + + if (!(supported & BIT(idx))) + continue; + + return MI_RATE(group, idx); + } + + return -1; +} + +/* + * Jump rates: + * Sample random rates, use those that are faster than the highest + * currently selected rate. Rates between the fastest and the slowest + * get sorted into the slow sample bucket, but only if it has room + */ +static u16 +minstrel_ht_next_jump_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur, + u32 slow_rate_dur, int *slow_rate_ofs) +{ + struct minstrel_mcs_group_data *mg; + struct minstrel_rate_stats *mrs; + u32 max_duration = slow_rate_dur; + int i, index, offset; + u16 *slow_rates; + u16 supported; + u32 duration; + u8 group; + + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES) + max_duration = fast_rate_dur; + + slow_rates = mi->sample[MINSTREL_SAMPLE_TYPE_SLOW].sample_rates; + group = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group; + for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { + u8 type; + + group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups); + mg = &mi->groups[group]; + + supported = mi->supported[group]; + if (!supported) + continue; + + offset = minstrel_ht_group_min_rate_offset(mi, group, + max_duration); + if (offset < 0) + continue; + + index = minstrel_ht_next_group_sample_rate(mi, group, supported, + offset); + if (index < 0) + continue; + + duration = minstrel_get_duration(index); + if (duration < fast_rate_dur) + type = MINSTREL_SAMPLE_TYPE_JUMP; + else + type = MINSTREL_SAMPLE_TYPE_SLOW; + + if (minstrel_ht_find_sample_rate(mi, type, index)) + continue; + + if (type == MINSTREL_SAMPLE_TYPE_JUMP) + goto found; + + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES) + continue; + + if (duration >= slow_rate_dur) + continue; + + /* skip slow rates with high success probability */ + mrs = minstrel_get_ratestats(mi, index); + if (mrs->prob_avg > MINSTREL_FRAC(95, 100)) + continue; + + slow_rates[(*slow_rate_ofs)++] = index; + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES) + max_duration = fast_rate_dur; + } + index = 0; + +found: + mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group = group; + + return index; +} + +static void +minstrel_ht_refill_sample_rates(struct minstrel_ht_sta *mi) +{ + u32 prob_dur = minstrel_get_duration(mi->max_prob_rate); + u32 tp_dur = minstrel_get_duration(mi->max_tp_rate[0]); + u32 tp2_dur = minstrel_get_duration(mi->max_tp_rate[1]); + u32 fast_rate_dur = min(min(tp_dur, tp2_dur), prob_dur); + u32 slow_rate_dur = max(max(tp_dur, tp2_dur), prob_dur); + u16 *rates; + int i, j; + + rates = mi->sample[MINSTREL_SAMPLE_TYPE_INC].sample_rates; + i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_INC, + fast_rate_dur, slow_rate_dur); + while (i < MINSTREL_SAMPLE_RATES) { + rates[i] = minstrel_ht_next_inc_rate(mi, tp_dur); + if (!rates[i]) + break; + + i++; + } + + rates = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_rates; + i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_JUMP, + fast_rate_dur, slow_rate_dur); + j = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_SLOW, + fast_rate_dur, slow_rate_dur); + while (i < MINSTREL_SAMPLE_RATES) { + rates[i] = minstrel_ht_next_jump_rate(mi, fast_rate_dur, + slow_rate_dur, &j); + if (!rates[i]) + break; + + i++; + } + + for (i = 0; i < ARRAY_SIZE(mi->sample); i++) + memcpy(mi->sample[i].cur_sample_rates, mi->sample[i].sample_rates, + sizeof(mi->sample[i].cur_sample_rates)); +} + + /* * Update rate statistics and select new primary rates * @@ -848,8 +1046,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, mi->ampdu_packets = 0; } - mi->sample_count = 0; - if (mi->supported[MINSTREL_CCK_GROUP]) group = MINSTREL_CCK_GROUP; else if (mi->supported[MINSTREL_OFDM_GROUP]) @@ -884,8 +1080,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, if (!mi->supported[group]) continue; - mi->sample_count++; - /* (re)Initialize group rate indexes */ for(j = 0; j < MAX_THR_RATES; j++) tmp_group_tp_rate[j] = MI_RATE(group, 0); @@ -952,9 +1146,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, /* Try to increase robustness of max_prob_rate*/ minstrel_ht_prob_rate_reduce_streams(mi); - - /* try to sample half of all available rates during each interval */ - mi->sample_count *= 4; + minstrel_ht_refill_sample_rates(mi); if (sample) minstrel_ht_rate_sample_switch(mp, mi); @@ -971,6 +1163,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, /* Reset update timer */ mi->last_stats_update = jiffies; + mi->sample_time = jiffies; } static bool @@ -1000,28 +1193,6 @@ minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, return false; } -static void -minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi) -{ - struct minstrel_mcs_group_data *mg; - - for (;;) { - mi->sample_group++; - mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups); - mg = &mi->groups[mi->sample_group]; - - if (!mi->supported[mi->sample_group]) - continue; - - if (++mg->index >= MCS_GROUP_RATES) { - mg->index = 0; - if (++mg->column >= ARRAY_SIZE(sample_table)) - mg->column = 0; - } - break; - } -} - static void minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary) { @@ -1107,14 +1278,6 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, mi->ampdu_packets++; mi->ampdu_len += info->status.ampdu_len; - if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { - int avg_ampdu_len = minstrel_ht_avg_ampdu_len(mi); - - mi->sample_wait = 16 + 2 * avg_ampdu_len; - mi->sample_tries = 1; - mi->sample_count--; - } - if (mi->sample_mode != MINSTREL_SAMPLE_IDLE) rate_sample = minstrel_get_ratestats(mi, mi->sample_rate); @@ -1386,97 +1549,20 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) rate_control_set_rates(mp->hw, mi->sta, rates); } -static int -minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) +static u16 +minstrel_ht_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { - struct minstrel_rate_stats *mrs; - struct minstrel_mcs_group_data *mg; - unsigned int sample_dur, sample_group, cur_max_tp_streams; - int tp_rate1, tp_rate2; - int sample_idx = 0; + u8 seq; - if (mp->hw->max_rates == 1 && mp->sample_switch && - (mi->total_packets_cur >= SAMPLE_SWITCH_THR || - mp->sample_switch == 1)) - return -1; - - if (mi->sample_wait > 0) { - mi->sample_wait--; - return -1; - } - - if (!mi->sample_tries) - return -1; - - sample_group = mi->sample_group; - mg = &mi->groups[sample_group]; - sample_idx = sample_table[mg->column][mg->index]; - minstrel_set_next_sample_idx(mi); - - if (!(mi->supported[sample_group] & BIT(sample_idx))) - return -1; - - mrs = &mg->rates[sample_idx]; - sample_idx += MI_RATE(sample_group, 0); - - tp_rate1 = mi->max_tp_rate[0]; - - /* Set tp_rate2 to the second highest max_tp_rate */ - if (minstrel_get_duration(mi->max_tp_rate[0]) > - minstrel_get_duration(mi->max_tp_rate[1])) { - tp_rate2 = mi->max_tp_rate[0]; + if (mp->hw->max_rates > 1) { + seq = mi->sample_seq; + mi->sample_seq = (seq + 1) % ARRAY_SIZE(minstrel_sample_seq); + seq = minstrel_sample_seq[seq]; } else { - tp_rate2 = mi->max_tp_rate[1]; + seq = MINSTREL_SAMPLE_TYPE_INC; } - /* - * Sampling might add some overhead (RTS, no aggregation) - * to the frame. Hence, don't use sampling for the highest currently - * used highest throughput or probability rate. - */ - if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate) - return -1; - - /* - * Do not sample if the probability is already higher than 95%, - * or if the rate is 3 times slower than the current max probability - * rate, to avoid wasting airtime. - */ - sample_dur = minstrel_get_duration(sample_idx); - if (mrs->prob_avg > MINSTREL_FRAC(95, 100) || - minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur) - return -1; - - - /* - * For devices with no configurable multi-rate retry, skip sampling - * below the per-group max throughput rate, and only use one sampling - * attempt per rate - */ - if (mp->hw->max_rates == 1 && - (minstrel_get_duration(mg->max_group_tp_rate[0]) < sample_dur || - mrs->attempts)) - return -1; - - /* Skip already sampled slow rates */ - if (sample_dur >= minstrel_get_duration(tp_rate1) && mrs->attempts) - return -1; - - /* - * Make sure that lower rates get sampled only occasionally, - * if the link is working perfectly. - */ - - cur_max_tp_streams = minstrel_mcs_groups[MI_RATE_GROUP(tp_rate1)].streams; - if (sample_dur >= minstrel_get_duration(tp_rate2) && - (cur_max_tp_streams - 1 < - minstrel_mcs_groups[sample_group].streams || - sample_dur >= minstrel_get_duration(mi->max_prob_rate))) - return -1; - - mi->sample_tries--; - - return sample_idx; + return __minstrel_ht_get_sample_rate(mi, seq); } static void @@ -1488,7 +1574,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate *rate = &info->status.rates[0]; struct minstrel_ht_sta *mi = priv_sta; struct minstrel_priv *mp = priv; - int sample_idx; + u16 sample_idx; if (!(info->flags & IEEE80211_TX_CTL_AMPDU) && !minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_prob_rate))) @@ -1504,11 +1590,19 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, /* Don't use EAPOL frames for sampling on non-mrr hw */ if (mp->hw->max_rates == 1 && (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) - sample_idx = -1; - else - sample_idx = minstrel_get_sample_rate(mp, mi); + return; - if (sample_idx < 0) + if (mp->hw->max_rates == 1 && mp->sample_switch && + (mi->total_packets_cur >= SAMPLE_SWITCH_THR || + mp->sample_switch == 1)) + return; + + if (time_is_before_jiffies(mi->sample_time)) + return; + + mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL; + sample_idx = minstrel_ht_get_sample_rate(mp, mi); + if (!sample_idx) return; sample_group = &minstrel_mcs_groups[MI_RATE_GROUP(sample_idx)]; @@ -1629,16 +1723,6 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); - /* When using MRR, sample more on the first attempt, without delay */ - if (mp->has_mrr) { - mi->sample_count = 16; - mi->sample_wait = 0; - } else { - mi->sample_count = 8; - mi->sample_wait = 8; - } - mi->sample_tries = 4; - if (!use_vht) { stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >> IEEE80211_HT_CAP_RX_STBC_SHIFT; diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h index ebb2b88f44d9..0d8c15f83f5d 100644 --- a/net/mac80211/rc80211_minstrel_ht.h +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -69,6 +69,8 @@ #define MI_RATE_IDX(_rate) FIELD_GET(MI_RATE_IDX_MASK, _rate) #define MI_RATE_GROUP(_rate) FIELD_GET(MI_RATE_GROUP_MASK, _rate) +#define MINSTREL_SAMPLE_RATES 5 /* rates per sample type */ +#define MINSTREL_SAMPLE_INTERVAL (HZ / 50) struct minstrel_priv { struct ieee80211_hw *hw; @@ -126,6 +128,13 @@ struct minstrel_rate_stats { bool retry_updated; }; +enum minstrel_sample_type { + MINSTREL_SAMPLE_TYPE_INC, + MINSTREL_SAMPLE_TYPE_JUMP, + MINSTREL_SAMPLE_TYPE_SLOW, + __MINSTREL_SAMPLE_TYPE_MAX +}; + struct minstrel_mcs_group_data { u8 index; u8 column; @@ -144,6 +153,12 @@ enum minstrel_sample_mode { MINSTREL_SAMPLE_PENDING, }; +struct minstrel_sample_category { + u8 sample_group; + u16 sample_rates[MINSTREL_SAMPLE_RATES]; + u16 cur_sample_rates[MINSTREL_SAMPLE_RATES]; +}; + struct minstrel_ht_sta { struct ieee80211_sta *sta; @@ -175,16 +190,14 @@ struct minstrel_ht_sta { /* tx flags to add for frames for this sta */ u32 tx_flags; - u8 sample_wait; - u8 sample_tries; - u8 sample_count; + unsigned long sample_time; + struct minstrel_sample_category sample[__MINSTREL_SAMPLE_TYPE_MAX]; + + u8 sample_seq; enum minstrel_sample_mode sample_mode; u16 sample_rate; - /* current MCS group to be sampled */ - u8 sample_group; - u8 band; /* Bitfield of supported MCS rates of all groups */ From 4a8d0c999fede59b75045ea5ee40c8a6098a45b2 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 27 Jan 2021 06:57:34 +0100 Subject: [PATCH 09/13] mac80211: minstrel_ht: show sampling rates in debugfs This makes it easier to see what rates are going to be tested next Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20210127055735.78599-5-nbd@nbd.name Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht_debugfs.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index 067d10383fa7..25b8a67a63a4 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -32,6 +32,18 @@ minstrel_stats_release(struct inode *inode, struct file *file) return 0; } +static bool +minstrel_ht_is_sample_rate(struct minstrel_ht_sta *mi, int idx) +{ + int type, i; + + for (type = 0; type < ARRAY_SIZE(mi->sample); type++) + for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) + if (mi->sample[type].cur_sample_rates[i] == idx) + return true; + return false; +} + static char * minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) { @@ -84,6 +96,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) *(p++) = (idx == mi->max_tp_rate[2]) ? 'C' : ' '; *(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' '; *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' '; + *(p++) = minstrel_ht_is_sample_rate(mi, idx) ? 'S' : ' '; if (gflags & IEEE80211_TX_RC_MCS) { p += sprintf(p, " MCS%-2u", (mg->streams - 1) * 8 + j); @@ -145,9 +158,9 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) p += sprintf(p, "\n"); p += sprintf(p, - " best ____________rate__________ ____statistics___ _____last____ ______sum-of________\n"); + " best ____________rate__________ ____statistics___ _____last____ ______sum-of________\n"); p += sprintf(p, - "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n"); + "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n"); p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p); for (i = 0; i < MINSTREL_CCK_GROUP; i++) @@ -228,6 +241,7 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[2]) ? "C" : "")); p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[3]) ? "D" : "")); p += sprintf(p, "%s" ,((idx == mi->max_prob_rate) ? "P" : "")); + p += sprintf(p, "%s", (minstrel_ht_is_sample_rate(mi, idx) ? "S" : "")); if (gflags & IEEE80211_TX_RC_MCS) { p += sprintf(p, ",MCS%-2u,", (mg->streams - 1) * 8 + j); From c0eb09aa7e1cf141f8a623fe46fec8d9a9e74268 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 27 Jan 2021 06:57:35 +0100 Subject: [PATCH 10/13] mac80211: minstrel_ht: remove sample rate switching code for constrained devices This was added to mitigate the effects of too much sampling on devices that use a static global fallback table instead of configurable multi-rate retry. Now that the sampling algorithm is improved, this code path no longer performs any better than the standard probing on affected devices. Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20210127055735.78599-6-nbd@nbd.name Signed-off-by: Johannes Berg --- net/mac80211/rc80211_minstrel_ht.c | 95 ++---------------------------- net/mac80211/rc80211_minstrel_ht.h | 17 +----- 2 files changed, 9 insertions(+), 103 deletions(-) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index e29d96474efa..2f44f4919789 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -648,27 +648,6 @@ __minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi, return 0; } -static void -minstrel_ht_rate_sample_switch(struct minstrel_priv *mp, - struct minstrel_ht_sta *mi) -{ - u16 rate; - - /* - * Use rate switching instead of probing packets for devices with - * little control over retry fallback behavior - */ - if (mp->hw->max_rates > 1) - return; - - rate = __minstrel_ht_get_sample_rate(mi, MINSTREL_SAMPLE_TYPE_INC); - if (!rate) - return; - - mi->sample_rate = rate; - mi->sample_mode = MINSTREL_SAMPLE_ACTIVE; -} - static inline int minstrel_ewma(int old, int new, int weight) { @@ -1012,8 +991,7 @@ minstrel_ht_refill_sample_rates(struct minstrel_ht_sta *mi) * higher throughput rates, even if the probablity is a bit lower */ static void -minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, - bool sample) +minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mrs; @@ -1023,18 +1001,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, u16 index; bool ht_supported = mi->sta->ht_cap.ht_supported; - mi->sample_mode = MINSTREL_SAMPLE_IDLE; - - if (sample) { - mi->total_packets_cur = mi->total_packets - - mi->total_packets_last; - mi->total_packets_last = mi->total_packets; - } - if (!mp->sample_switch) - sample = false; - if (mi->total_packets_cur < SAMPLE_SWITCH_THR && mp->sample_switch != 1) - sample = false; - if (mi->ampdu_packets > 0) { if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN)) mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, @@ -1148,16 +1114,12 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, minstrel_ht_prob_rate_reduce_streams(mi); minstrel_ht_refill_sample_rates(mi); - if (sample) - minstrel_ht_rate_sample_switch(mp, mi); - #ifdef CONFIG_MAC80211_DEBUGFS /* use fixed index if set */ if (mp->fixed_rate_idx != -1) { for (i = 0; i < 4; i++) mi->max_tp_rate[i] = mp->fixed_rate_idx; mi->max_prob_rate = mp->fixed_rate_idx; - mi->sample_mode = MINSTREL_SAMPLE_IDLE; } #endif @@ -1247,11 +1209,10 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_tx_info *info = st->info; struct minstrel_ht_sta *mi = priv_sta; struct ieee80211_tx_rate *ar = info->status.rates; - struct minstrel_rate_stats *rate, *rate2, *rate_sample = NULL; + struct minstrel_rate_stats *rate, *rate2; struct minstrel_priv *mp = priv; u32 update_interval = mp->update_interval; bool last, update = false; - bool sample_status = false; int i; /* This packet was aggregated but doesn't carry status info */ @@ -1278,49 +1239,18 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, mi->ampdu_packets++; mi->ampdu_len += info->status.ampdu_len; - if (mi->sample_mode != MINSTREL_SAMPLE_IDLE) - rate_sample = minstrel_get_ratestats(mi, mi->sample_rate); - last = !minstrel_ht_txstat_valid(mp, mi, &ar[0]); for (i = 0; !last; i++) { last = (i == IEEE80211_TX_MAX_RATES - 1) || !minstrel_ht_txstat_valid(mp, mi, &ar[i + 1]); rate = minstrel_ht_get_stats(mp, mi, &ar[i]); - if (rate == rate_sample) - sample_status = true; - if (last) rate->success += info->status.ampdu_ack_len; rate->attempts += ar[i].count * info->status.ampdu_len; } - switch (mi->sample_mode) { - case MINSTREL_SAMPLE_IDLE: - if (mp->hw->max_rates > 1 || - mi->total_packets_cur < SAMPLE_SWITCH_THR) - update_interval /= 2; - break; - - case MINSTREL_SAMPLE_ACTIVE: - if (!sample_status) - break; - - mi->sample_mode = MINSTREL_SAMPLE_PENDING; - update = true; - break; - - case MINSTREL_SAMPLE_PENDING: - if (sample_status) - break; - - update = true; - minstrel_ht_update_stats(mp, mi, false); - break; - } - - if (mp->hw->max_rates > 1) { /* * check for sudden death of spatial multiplexing, @@ -1343,7 +1273,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, if (time_after(jiffies, mi->last_stats_update + update_interval)) { update = true; - minstrel_ht_update_stats(mp, mi, true); + minstrel_ht_update_stats(mp, mi); } if (update) @@ -1522,18 +1452,14 @@ static void minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct ieee80211_sta_rates *rates; - u16 first_rate = mi->max_tp_rate[0]; int i = 0; - if (mi->sample_mode == MINSTREL_SAMPLE_ACTIVE) - first_rate = mi->sample_rate; - rates = kzalloc(sizeof(*rates), GFP_ATOMIC); if (!rates) return; /* Start with max_tp_rate[0] */ - minstrel_ht_set_rate(mp, mi, rates, i++, first_rate); + minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]); if (mp->hw->max_rates >= 3) { /* At least 3 tx rates supported, use max_tp_rate[1] next */ @@ -1592,11 +1518,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) return; - if (mp->hw->max_rates == 1 && mp->sample_switch && - (mi->total_packets_cur >= SAMPLE_SWITCH_THR || - mp->sample_switch == 1)) - return; - if (time_is_before_jiffies(mi->sample_time)) return; @@ -1810,7 +1731,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, minstrel_ht_update_ofdm(mp, mi, sband, sta); /* create an initial rate table with the lowest supported rates */ - minstrel_ht_update_stats(mp, mi, true); + minstrel_ht_update_stats(mp, mi); minstrel_ht_update_rates(mp, mi); } @@ -1926,8 +1847,6 @@ minstrel_ht_alloc(struct ieee80211_hw *hw) if (!mp) return NULL; - mp->sample_switch = -1; - /* contention window settings * Just an approximation. Using the per-queue values would complicate * the calculations and is probably unnecessary */ @@ -1947,7 +1866,7 @@ minstrel_ht_alloc(struct ieee80211_hw *hw) mp->has_mrr = true; mp->hw = hw; - mp->update_interval = HZ / 10; + mp->update_interval = HZ / 20; minstrel_ht_init_cck_rates(mp); for (i = 0; i < ARRAY_SIZE(mp->hw->wiphy->bands); i++) @@ -1965,8 +1884,6 @@ static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv, mp->fixed_rate_idx = (u32) -1; debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx); - debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir, - &mp->sample_switch); } #endif diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h index 0d8c15f83f5d..06e7126727ad 100644 --- a/net/mac80211/rc80211_minstrel_ht.h +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -75,7 +75,6 @@ struct minstrel_priv { struct ieee80211_hw *hw; bool has_mrr; - u32 sample_switch; unsigned int cw_min; unsigned int cw_max; unsigned int max_retry; @@ -147,12 +146,6 @@ struct minstrel_mcs_group_data { struct minstrel_rate_stats rates[MCS_GROUP_RATES]; }; -enum minstrel_sample_mode { - MINSTREL_SAMPLE_IDLE, - MINSTREL_SAMPLE_ACTIVE, - MINSTREL_SAMPLE_PENDING, -}; - struct minstrel_sample_category { u8 sample_group; u16 sample_rates[MINSTREL_SAMPLE_RATES]; @@ -182,23 +175,19 @@ struct minstrel_ht_sta { unsigned int overhead_legacy; unsigned int overhead_legacy_rtscts; - unsigned int total_packets_last; - unsigned int total_packets_cur; unsigned int total_packets; unsigned int sample_packets; /* tx flags to add for frames for this sta */ u32 tx_flags; - unsigned long sample_time; - struct minstrel_sample_category sample[__MINSTREL_SAMPLE_TYPE_MAX]; + u8 band; u8 sample_seq; - - enum minstrel_sample_mode sample_mode; u16 sample_rate; - u8 band; + unsigned long sample_time; + struct minstrel_sample_category sample[__MINSTREL_SAMPLE_TYPE_MAX]; /* Bitfield of supported MCS rates of all groups */ u16 supported[MINSTREL_GROUPS_NB]; From 549fdd34b5f2dfa63e10855f20796c13a036707b Mon Sep 17 00:00:00 2001 From: Philipp Borgers Date: Mon, 25 Jan 2021 16:07:44 +0100 Subject: [PATCH 11/13] mac80211: add STBC encoding to ieee80211_parse_tx_radiotap This patch adds support for STBC encoding to the radiotap tx parse function. Prior to this change adding the STBC flag to the radiotap header did not encode frames with STBC. Signed-off-by: Philipp Borgers Link: https://lore.kernel.org/r/20210125150744.83065-1-borgers@mi.fu-berlin.de [use u8_get_bits/u32_encode_bits instead of manually shifting] Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 6ec415f043e1..5d06de61047a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2122,6 +2122,15 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb, if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FEC && mcs_flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC) info->flags |= IEEE80211_TX_CTL_LDPC; + + if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_STBC) { + u8 stbc = u8_get_bits(mcs_flags, + IEEE80211_RADIOTAP_MCS_STBC_MASK); + + info->flags |= + u32_encode_bits(stbc, + IEEE80211_TX_CTL_STBC); + } break; case IEEE80211_RADIOTAP_VHT: From b6db0f899a16a23f5a9ea6c8b0fafc7bbd38e03d Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Thu, 4 Feb 2021 06:46:10 -0800 Subject: [PATCH 12/13] cfg80211/mac80211: Support disabling HE mode Allow user to disable HE mode, similar to how VHT and HT can be disabled. Useful for testing. Signed-off-by: Ben Greear Link: https://lore.kernel.org/r/20210204144610.25971-1-greearb@candelatech.com Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 2 ++ net/mac80211/mlme.c | 3 +++ net/wireless/nl80211.c | 7 +++++++ 4 files changed, 14 insertions(+) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4cdd75449d73..911fae42b0c0 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2583,12 +2583,14 @@ struct cfg80211_auth_request { * authentication capability. Drivers can offload authentication to * userspace if this flag is set. Only applicable for cfg80211_connect() * request (connect callback). + * @ASSOC_REQ_DISABLE_HE: Disable HE */ enum cfg80211_assoc_req_flags { ASSOC_REQ_DISABLE_HT = BIT(0), ASSOC_REQ_DISABLE_VHT = BIT(1), ASSOC_REQ_USE_RRM = BIT(2), CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3), + ASSOC_REQ_DISABLE_HE = BIT(4), }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 40832d13c2f1..5188fe581efc 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3045,6 +3045,8 @@ enum nl80211_attrs { NL80211_ATTR_SAR_SPEC, + NL80211_ATTR_DISABLE_HE, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0e4d950cf907..2e33a1263518 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -5754,6 +5754,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, if (req->flags & ASSOC_REQ_DISABLE_VHT) ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; + if (req->flags & ASSOC_REQ_DISABLE_HE) + ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + err = ieee80211_prep_connection(sdata, req->bss, true, override); if (err) goto err_clear; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 3b45a9593e71..521d36bb0803 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -752,6 +752,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { NL80211_SAE_PWE_BOTH), [NL80211_ATTR_RECONNECT_REQUESTED] = { .type = NLA_REJECT }, [NL80211_ATTR_SAR_SPEC] = NLA_POLICY_NESTED(sar_policy), + [NL80211_ATTR_DISABLE_HE] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -10019,6 +10020,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT])) req.flags |= ASSOC_REQ_DISABLE_VHT; + if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HE])) + req.flags |= ASSOC_REQ_DISABLE_HE; + if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) memcpy(&req.vht_capa_mask, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]), @@ -10802,6 +10806,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT])) connect.flags |= ASSOC_REQ_DISABLE_VHT; + if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HE])) + connect.flags |= ASSOC_REQ_DISABLE_HE; + if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) memcpy(&connect.vht_capa_mask, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]), From 735a48481cca453525d9199772f9c3733a47cff4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 12 Feb 2021 10:50:23 +0100 Subject: [PATCH 13/13] nl80211: add documentation for HT/VHT/HE disable attributes These were missed earlier, add the necessary documentation and, while at it, clarify it. Signed-off-by: Johannes Berg Link: https://lore.kernel.org/r/20210212105023.895c3389f063.I46dea3bfc64385bc6f600c50d294007510994f8f@changeid Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5188fe581efc..ac78da99fccd 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1963,8 +1963,15 @@ enum nl80211_commands { * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire * probe-response frame. The DA field in the 802.11 header is zero-ed out, * to be filled by the FW. - * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable - * this feature. Currently, only supported in mac80211 drivers. + * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable + * this feature during association. This is a flag attribute. + * Currently only supported in mac80211 drivers. + * @NL80211_ATTR_DISABLE_VHT: Force VHT capable interfaces to disable + * this feature during association. This is a flag attribute. + * Currently only supported in mac80211 drivers. + * @NL80211_ATTR_DISABLE_HE: Force HE capable interfaces to disable + * this feature during association. This is a flag attribute. + * Currently only supported in mac80211 drivers. * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the * ATTR_HT_CAPABILITY to which attention should be paid. * Currently, only mac80211 NICs support this feature.