2019-06-04 08:11:33 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2010-05-13 14:48:03 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __RC_MINSTREL_HT_H
|
|
|
|
#define __RC_MINSTREL_HT_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The number of streams can be changed to 2 to reduce code
|
|
|
|
* size and memory footprint.
|
|
|
|
*/
|
2019-03-25 08:50:15 +00:00
|
|
|
#define MINSTREL_MAX_STREAMS 4
|
2014-10-20 13:46:00 +00:00
|
|
|
#define MINSTREL_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
|
2014-10-21 08:38:38 +00:00
|
|
|
#define MINSTREL_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
|
2014-10-20 13:46:00 +00:00
|
|
|
|
|
|
|
#define MINSTREL_HT_GROUPS_NB (MINSTREL_MAX_STREAMS * \
|
|
|
|
MINSTREL_HT_STREAM_GROUPS)
|
2014-10-21 08:38:38 +00:00
|
|
|
#define MINSTREL_VHT_GROUPS_NB (MINSTREL_MAX_STREAMS * \
|
|
|
|
MINSTREL_VHT_STREAM_GROUPS)
|
2014-10-20 13:46:00 +00:00
|
|
|
#define MINSTREL_CCK_GROUPS_NB 1
|
|
|
|
#define MINSTREL_GROUPS_NB (MINSTREL_HT_GROUPS_NB + \
|
2014-10-21 08:38:38 +00:00
|
|
|
MINSTREL_VHT_GROUPS_NB + \
|
2014-10-20 13:46:00 +00:00
|
|
|
MINSTREL_CCK_GROUPS_NB)
|
|
|
|
|
|
|
|
#define MINSTREL_HT_GROUP_0 0
|
|
|
|
#define MINSTREL_CCK_GROUP (MINSTREL_HT_GROUP_0 + MINSTREL_HT_GROUPS_NB)
|
2014-10-21 08:38:38 +00:00
|
|
|
#define MINSTREL_VHT_GROUP_0 (MINSTREL_CCK_GROUP + 1)
|
2010-05-13 14:48:03 +00:00
|
|
|
|
2014-10-21 08:38:38 +00:00
|
|
|
#define MCS_GROUP_RATES 10
|
2010-05-13 14:48:03 +00:00
|
|
|
|
|
|
|
struct mcs_group {
|
2018-10-06 17:35:02 +00:00
|
|
|
u16 flags;
|
|
|
|
u8 streams;
|
|
|
|
u8 shift;
|
mac80211: minstrel_ht: improve rate probing for devices with static fallback
On some devices that only support static rate fallback tables sending rate
control probing packets can be really expensive.
Probing lower rates can already hurt throughput quite a bit. What hurts even
more is the fact that on mt76x0/mt76x2, single probing packets can only be
forced by directing packets at a different internal hardware queue, which
causes some heavy reordering and extra latency.
The reordering issue is mainly problematic while pushing lots of packets to
a particular station. If there is little activity, the overhead of probing is
neglegible.
The static fallback behavior is designed to pretty much only handle rate
control algorithms that use only a very limited set of rates on which the
algorithm switches up/down based on packet error rate.
In order to better support that kind of hardware, this patch implements a
different approach to rate probing where it switches to a slightly higher rate,
waits for tx status feedback, then updates the stats and switches back to
the new max throughput rate. This only triggers above a packet rate of 100
per stats interval (~50ms).
For that kind of probing, the code has to reduce the set of probing rates
a lot more compared to single packet probing, so it uses only one packet
per MCS group which is either slightly faster, or as close as possible to
the max throughput rate.
This allows switching between similar rates with different numbers of
streams. The algorithm assumes that the hardware will work its way lower
within an MCS group in case of retransmissions, so that lower rates don't
have to be probed by the high packets per second rate probing code.
To further reduce the search space, it also does not probe rates with lower
channel bandwidth than the max throughput rate.
At the moment, these changes will only affect mt76x0/mt76x2.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
|
|
|
u8 bw;
|
2018-10-06 17:35:02 +00:00
|
|
|
u16 duration[MCS_GROUP_RATES];
|
2010-05-13 14:48:03 +00:00
|
|
|
};
|
|
|
|
|
2010-06-24 17:03:14 +00:00
|
|
|
extern const struct mcs_group minstrel_mcs_groups[];
|
|
|
|
|
2010-05-13 14:48:03 +00:00
|
|
|
struct minstrel_mcs_group_data {
|
|
|
|
u8 index;
|
|
|
|
u8 column;
|
|
|
|
|
2014-09-09 21:22:14 +00:00
|
|
|
/* sorted rate set within a MCS group*/
|
2014-10-20 13:45:59 +00:00
|
|
|
u16 max_group_tp_rate[MAX_THR_RATES];
|
|
|
|
u16 max_group_prob_rate;
|
2010-05-13 14:48:03 +00:00
|
|
|
|
|
|
|
/* MCS rate statistics */
|
|
|
|
struct minstrel_rate_stats rates[MCS_GROUP_RATES];
|
|
|
|
};
|
|
|
|
|
mac80211: minstrel_ht: improve rate probing for devices with static fallback
On some devices that only support static rate fallback tables sending rate
control probing packets can be really expensive.
Probing lower rates can already hurt throughput quite a bit. What hurts even
more is the fact that on mt76x0/mt76x2, single probing packets can only be
forced by directing packets at a different internal hardware queue, which
causes some heavy reordering and extra latency.
The reordering issue is mainly problematic while pushing lots of packets to
a particular station. If there is little activity, the overhead of probing is
neglegible.
The static fallback behavior is designed to pretty much only handle rate
control algorithms that use only a very limited set of rates on which the
algorithm switches up/down based on packet error rate.
In order to better support that kind of hardware, this patch implements a
different approach to rate probing where it switches to a slightly higher rate,
waits for tx status feedback, then updates the stats and switches back to
the new max throughput rate. This only triggers above a packet rate of 100
per stats interval (~50ms).
For that kind of probing, the code has to reduce the set of probing rates
a lot more compared to single packet probing, so it uses only one packet
per MCS group which is either slightly faster, or as close as possible to
the max throughput rate.
This allows switching between similar rates with different numbers of
streams. The algorithm assumes that the hardware will work its way lower
within an MCS group in case of retransmissions, so that lower rates don't
have to be probed by the high packets per second rate probing code.
To further reduce the search space, it also does not probe rates with lower
channel bandwidth than the max throughput rate.
At the moment, these changes will only affect mt76x0/mt76x2.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
|
|
|
enum minstrel_sample_mode {
|
|
|
|
MINSTREL_SAMPLE_IDLE,
|
|
|
|
MINSTREL_SAMPLE_ACTIVE,
|
|
|
|
MINSTREL_SAMPLE_PENDING,
|
|
|
|
};
|
|
|
|
|
2010-05-13 14:48:03 +00:00
|
|
|
struct minstrel_ht_sta {
|
2013-04-22 14:14:42 +00:00
|
|
|
struct ieee80211_sta *sta;
|
|
|
|
|
2010-05-13 14:48:03 +00:00
|
|
|
/* ampdu length (average, per sampling interval) */
|
|
|
|
unsigned int ampdu_len;
|
|
|
|
unsigned int ampdu_packets;
|
|
|
|
|
|
|
|
/* ampdu length (EWMA) */
|
|
|
|
unsigned int avg_ampdu_len;
|
|
|
|
|
2014-09-09 21:22:14 +00:00
|
|
|
/* overall sorted rate set */
|
2014-10-20 13:45:59 +00:00
|
|
|
u16 max_tp_rate[MAX_THR_RATES];
|
|
|
|
u16 max_prob_rate;
|
2010-05-13 14:48:03 +00:00
|
|
|
|
|
|
|
/* time of last status update */
|
2015-03-24 20:09:39 +00:00
|
|
|
unsigned long last_stats_update;
|
2010-05-13 14:48:03 +00:00
|
|
|
|
|
|
|
/* overhead time in usec for each frame */
|
|
|
|
unsigned int overhead;
|
|
|
|
unsigned int overhead_rtscts;
|
|
|
|
|
mac80211: minstrel_ht: improve rate probing for devices with static fallback
On some devices that only support static rate fallback tables sending rate
control probing packets can be really expensive.
Probing lower rates can already hurt throughput quite a bit. What hurts even
more is the fact that on mt76x0/mt76x2, single probing packets can only be
forced by directing packets at a different internal hardware queue, which
causes some heavy reordering and extra latency.
The reordering issue is mainly problematic while pushing lots of packets to
a particular station. If there is little activity, the overhead of probing is
neglegible.
The static fallback behavior is designed to pretty much only handle rate
control algorithms that use only a very limited set of rates on which the
algorithm switches up/down based on packet error rate.
In order to better support that kind of hardware, this patch implements a
different approach to rate probing where it switches to a slightly higher rate,
waits for tx status feedback, then updates the stats and switches back to
the new max throughput rate. This only triggers above a packet rate of 100
per stats interval (~50ms).
For that kind of probing, the code has to reduce the set of probing rates
a lot more compared to single packet probing, so it uses only one packet
per MCS group which is either slightly faster, or as close as possible to
the max throughput rate.
This allows switching between similar rates with different numbers of
streams. The algorithm assumes that the hardware will work its way lower
within an MCS group in case of retransmissions, so that lower rates don't
have to be probed by the high packets per second rate probing code.
To further reduce the search space, it also does not probe rates with lower
channel bandwidth than the max throughput rate.
At the moment, these changes will only affect mt76x0/mt76x2.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
|
|
|
unsigned int total_packets_last;
|
|
|
|
unsigned int total_packets_cur;
|
2010-05-13 14:48:03 +00:00
|
|
|
unsigned int total_packets;
|
|
|
|
unsigned int sample_packets;
|
|
|
|
|
|
|
|
/* tx flags to add for frames for this sta */
|
|
|
|
u32 tx_flags;
|
|
|
|
|
|
|
|
u8 sample_wait;
|
|
|
|
u8 sample_tries;
|
|
|
|
u8 sample_count;
|
|
|
|
u8 sample_slow;
|
|
|
|
|
mac80211: minstrel_ht: improve rate probing for devices with static fallback
On some devices that only support static rate fallback tables sending rate
control probing packets can be really expensive.
Probing lower rates can already hurt throughput quite a bit. What hurts even
more is the fact that on mt76x0/mt76x2, single probing packets can only be
forced by directing packets at a different internal hardware queue, which
causes some heavy reordering and extra latency.
The reordering issue is mainly problematic while pushing lots of packets to
a particular station. If there is little activity, the overhead of probing is
neglegible.
The static fallback behavior is designed to pretty much only handle rate
control algorithms that use only a very limited set of rates on which the
algorithm switches up/down based on packet error rate.
In order to better support that kind of hardware, this patch implements a
different approach to rate probing where it switches to a slightly higher rate,
waits for tx status feedback, then updates the stats and switches back to
the new max throughput rate. This only triggers above a packet rate of 100
per stats interval (~50ms).
For that kind of probing, the code has to reduce the set of probing rates
a lot more compared to single packet probing, so it uses only one packet
per MCS group which is either slightly faster, or as close as possible to
the max throughput rate.
This allows switching between similar rates with different numbers of
streams. The algorithm assumes that the hardware will work its way lower
within an MCS group in case of retransmissions, so that lower rates don't
have to be probed by the high packets per second rate probing code.
To further reduce the search space, it also does not probe rates with lower
channel bandwidth than the max throughput rate.
At the moment, these changes will only affect mt76x0/mt76x2.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
|
|
|
enum minstrel_sample_mode sample_mode;
|
|
|
|
u16 sample_rate;
|
|
|
|
|
2010-05-13 14:48:03 +00:00
|
|
|
/* current MCS group to be sampled */
|
|
|
|
u8 sample_group;
|
|
|
|
|
2013-02-13 09:51:08 +00:00
|
|
|
u8 cck_supported;
|
|
|
|
u8 cck_supported_short;
|
|
|
|
|
2016-12-14 19:46:54 +00:00
|
|
|
/* Bitfield of supported MCS rates of all groups */
|
|
|
|
u16 supported[MINSTREL_GROUPS_NB];
|
|
|
|
|
2010-05-13 14:48:03 +00:00
|
|
|
/* MCS rate group info and statistics */
|
2014-10-20 13:46:00 +00:00
|
|
|
struct minstrel_mcs_group_data groups[MINSTREL_GROUPS_NB];
|
2010-05-13 14:48:03 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct minstrel_ht_sta_priv {
|
|
|
|
union {
|
|
|
|
struct minstrel_ht_sta ht;
|
|
|
|
struct minstrel_sta_info legacy;
|
|
|
|
};
|
|
|
|
void *ratelist;
|
|
|
|
void *sample_table;
|
|
|
|
bool is_ht;
|
|
|
|
};
|
|
|
|
|
|
|
|
void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
|
2015-03-24 20:09:41 +00:00
|
|
|
int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
|
2019-10-08 17:11:39 +00:00
|
|
|
int prob_avg);
|
2010-05-13 14:48:03 +00:00
|
|
|
|
|
|
|
#endif
|