wireless-next patches for v6.2

Third set of patches for v6.2. mt76 has a new driver for mt7996 Wi-Fi 7
 devices and iwlwifi also got initial Wi-Fi 7 support. Otherwise
 smaller features and fixes.
 
 Major changes:
 
 ath10k
 
 * store WLAN firmware version in SMEM image table
 
 mt76
 
 * mt7996: new driver for MediaTek Wi-Fi 7 (802.11be) devices
 
 * mt7986, mt7915: enable Wireless Ethernet Dispatch (WED) offload support
 
 * mt7915: add ack signal support
 
 * mt7915: enable coredump support
 
 * mt7921: remain_on_channel support
 
 * mt7921: channel context support
 
 iwlwifi
 
 * enable Wi-Fi 7 Extremely High Throughput (EHT) PHY capabilities
 
 * 320 MHz channels support
 -----BEGIN PGP SIGNATURE-----
 
 iQFFBAABCgAvFiEEiBjanGPFTz4PRfLobhckVSbrbZsFAmOKcMARHGt2YWxvQGtl
 cm5lbC5vcmcACgkQbhckVSbrbZv3cgf+KjlbxtCZvEIfK+jsd2/VK635ucUdC1d5
 QZB5SCHyVCqTMEsBBw0WCmFdfnqQRQUE9Qe5s0hlwhyrjLP4FQ6/jGTarFvRV43E
 xO8jJd7e4mnVVoQySeKIRfvtYPFKT5GpaDVs4ytfdSs+KYoCE7akMBcvHVO8Fr2M
 MepdqyoJakhRybFUJZMts8W8IsBikv9hdnb2Mr/E32JFLeP6ggs9tKCZKBbpxyXk
 BzfYkDMXffFl95prlmy4rXP223FjvgUuRNWaatseR7S6A/Ik9Xk3B1qv3mtocPZF
 LiTlFtmn3qkgyX5bfm6NRe/2FqgRUYfIrN0XtVw6Sy8WUe1GCf3opA==
 =pkqE
 -----END PGP SIGNATURE-----

Merge tag 'wireless-next-2022-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next

Kalle Valo says:

====================
wireless-next patches for v6.2

Third set of patches for v6.2. mt76 has a new driver for mt7996 Wi-Fi 7
devices and iwlwifi also got initial Wi-Fi 7 support. Otherwise
smaller features and fixes.

Major changes:

ath10k
 - store WLAN firmware version in SMEM image table

mt76
 - mt7996: new driver for MediaTek Wi-Fi 7 (802.11be) devices
 - mt7986, mt7915: enable Wireless Ethernet Dispatch (WED) offload support
 - mt7915: add ack signal support
 - mt7915: enable coredump support
 - mt7921: remain_on_channel support
 - mt7921: channel context support

iwlwifi
 - enable Wi-Fi 7 Extremely High Throughput (EHT) PHY capabilities
 - 320 MHz channels support

* tag 'wireless-next-2022-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next: (144 commits)
  wifi: ath10k: fix QCOM_SMEM dependency
  wifi: mt76: mt7921e: add pci .shutdown() support
  wifi: mt76: mt7915: mmio: fix naming convention
  wifi: mt76: mt7996: add support to configure spatial reuse parameter set
  wifi: mt76: mt7996: enable ack signal support
  wifi: mt76: mt7996: enable use_cts_prot support
  wifi: mt76: mt7915: rely on band_idx of mt76_phy
  wifi: mt76: mt7915: enable per bandwidth power limit support
  wifi: mt76: mt7915: introduce mt7915_get_power_bound()
  mt76: mt7915: Fix PCI device refcount leak in mt7915_pci_init_hif2()
  wifi: mt76: do not send firmware FW_FEATURE_NON_DL region
  wifi: mt76: mt7921: Add missing __packed annotation of struct mt7921_clc
  wifi: mt76: fix coverity overrun-call in mt76_get_txpower()
  wifi: mt76: mt7996: add driver for MediaTek Wi-Fi 7 (802.11be) devices
  wifi: mt76: mt76x0: remove dead code in mt76x0_phy_get_target_power
  wifi: mt76: mt7915: fix band_idx usage
  wifi: mt76: mt7915: enable .sta_set_txpwr support
  wifi: mt76: mt7915: add basedband Txpower info into debugfs
  wifi: mt76: mt7915: add support to configure spatial reuse parameter set
  wifi: mt76: mt7915: add missing MODULE_PARM_DESC
  ...
====================

Link: https://lore.kernel.org/r/20221202214254.D0D3DC433C1@smtp.kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-12-02 20:33:29 -08:00
commit edd4e25a23
157 changed files with 19257 additions and 1673 deletions

View File

@ -44,6 +44,7 @@ config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support"
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_SMEM
select QCOM_SCM
select QCOM_QMI_HELPERS
help

View File

@ -1379,7 +1379,7 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_get_tid(hdr, tid, sizeof(tid)),
is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
"mcast" : "ucast",
(__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
@ -1844,15 +1844,14 @@ static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
}
static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
u16 offset,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
hdr = (struct ieee80211_hdr *)(skb->data + offset);
ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
hdr = (struct ieee80211_hdr *)skb->data;
ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
pn = ehdr[0];
@ -1866,19 +1865,17 @@ static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
}
static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
struct sk_buff *skb,
u16 offset)
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data + offset);
hdr = (struct ieee80211_hdr *)skb->data;
return !is_multicast_ether_addr(hdr->addr1);
}
static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
struct sk_buff *skb,
u16 peer_id,
u16 offset,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_peer *peer;
@ -1893,16 +1890,16 @@ static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
return false;
}
hdr = (struct ieee80211_hdr *)(skb->data + offset);
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
tid = ieee80211_get_tid(hdr);
else
tid = ATH10K_TXRX_NON_QOS_TID;
last_pn = &peer->frag_tids_last_pn[tid];
new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
if (frag_number == 0) {
last_pn->pn48 = new_pn.pn48;
@ -2059,13 +2056,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
msdu,
peer_id,
0,
enctype);
if (frag)
multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
msdu,
0);
msdu);
if (!frag_pn_check || !multicast_check) {
/* Discard the fragment with invalid PN or multicast DA
@ -2824,7 +2819,7 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
hdr_space = ieee80211_hdrlen(hdr->frame_control);
sc = __le16_to_cpu(hdr->seq_ctrl);
seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
seq = IEEE80211_SEQ_TO_SN(sc);
frag = sc & IEEE80211_SCTL_FRAG;
sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?

View File

@ -3792,18 +3792,22 @@ static struct pci_driver ath10k_pci_driver = {
static int __init ath10k_pci_init(void)
{
int ret;
int ret1, ret2;
ret = pci_register_driver(&ath10k_pci_driver);
if (ret)
ret1 = pci_register_driver(&ath10k_pci_driver);
if (ret1)
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
ret);
ret1);
ret = ath10k_ahb_init();
if (ret)
printk(KERN_ERR "ahb init failed: %d\n", ret);
ret2 = ath10k_ahb_init();
if (ret2)
printk(KERN_ERR "ahb init failed: %d\n", ret2);
return ret;
if (ret1 && ret2)
return ret1;
/* registered to at least one bus */
return 0;
}
module_init(ath10k_pci_init);

View File

@ -14,6 +14,7 @@
#include <linux/net.h>
#include <linux/platform_device.h>
#include <linux/qcom_scm.h>
#include <linux/soc/qcom/smem.h>
#include <linux/string.h>
#include <net/sock.h>
@ -22,6 +23,10 @@
#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
#define ATH10K_QMI_TIMEOUT 30
#define SMEM_IMAGE_VERSION_TABLE 469
#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
#define SMEM_IMAGE_VERSION_ENTRY_SIZE 128
#define SMEM_IMAGE_VERSION_NAME_SIZE 75
static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
struct ath10k_msa_mem_info *mem_info)
@ -536,6 +541,33 @@ int ath10k_qmi_wlan_disable(struct ath10k *ar)
return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
}
static void ath10k_qmi_add_wlan_ver_smem(struct ath10k *ar, const char *fw_build_id)
{
u8 *table_ptr;
size_t smem_item_size;
const u32 smem_img_idx_wlan = SMEM_IMAGE_TABLE_CNSS_INDEX *
SMEM_IMAGE_VERSION_ENTRY_SIZE;
table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
SMEM_IMAGE_VERSION_TABLE,
&smem_item_size);
if (IS_ERR(table_ptr)) {
ath10k_err(ar, "smem image version table not found\n");
return;
}
if (smem_img_idx_wlan + SMEM_IMAGE_VERSION_ENTRY_SIZE >
smem_item_size) {
ath10k_err(ar, "smem block size too small: %zu\n",
smem_item_size);
return;
}
strscpy(table_ptr + smem_img_idx_wlan, fw_build_id,
SMEM_IMAGE_VERSION_NAME_SIZE);
}
static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
{
struct wlfw_cap_resp_msg_v01 *resp;
@ -606,6 +638,9 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
}
if (resp->fw_build_id_valid)
ath10k_qmi_add_wlan_ver_smem(ar, qmi->fw_build_id);
kfree(resp);
return 0;
@ -618,7 +653,7 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
{
struct wlfw_host_cap_resp_msg_v01 resp = {};
struct wlfw_host_cap_req_msg_v01 req = {};
struct qmi_elem_info *req_ei;
const struct qmi_elem_info *req_ei;
struct ath10k *ar = qmi->ar;
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;

View File

@ -7,7 +7,7 @@
#include <linux/types.h>
#include "qmi_wlfw_v01.h"
static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -56,7 +56,7 @@ static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -87,7 +87,7 @@ static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
@ -109,7 +109,7 @@ static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -122,7 +122,7 @@ static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -153,7 +153,7 @@ static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -184,7 +184,7 @@ static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -225,7 +225,7 @@ static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -256,7 +256,7 @@ static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -278,7 +278,7 @@ static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -291,7 +291,7 @@ static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -304,7 +304,7 @@ static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
{}
};
static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
static const struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -326,7 +326,7 @@ static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -528,7 +528,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -560,15 +560,15 @@ struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -626,7 +626,7 @@ struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@ -657,7 +657,7 @@ struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -671,7 +671,7 @@ struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -805,7 +805,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -819,11 +819,11 @@ struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -949,7 +949,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@ -1079,7 +1079,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1093,7 +1093,7 @@ struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@ -1133,7 +1133,7 @@ struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1147,7 +1147,7 @@ struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@ -1160,7 +1160,7 @@ struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@ -1272,7 +1272,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1286,7 +1286,7 @@ struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@ -1308,7 +1308,7 @@ struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@ -1330,7 +1330,7 @@ struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1443,7 +1443,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -1465,7 +1465,7 @@ struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1498,11 +1498,11 @@ struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1516,7 +1516,7 @@ struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -1538,7 +1538,7 @@ struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1552,7 +1552,7 @@ struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -1583,7 +1583,7 @@ struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1624,7 +1624,7 @@ struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -1664,7 +1664,7 @@ struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1678,7 +1678,7 @@ struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -1691,7 +1691,7 @@ struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1705,7 +1705,7 @@ struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -1727,7 +1727,7 @@ struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1741,7 +1741,7 @@ struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -1988,7 +1988,7 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -2010,7 +2010,7 @@ struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -2024,7 +2024,7 @@ struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@ -2047,7 +2047,7 @@ struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@ -2070,7 +2070,7 @@ struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -2084,15 +2084,15 @@ struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -2168,11 +2168,11 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -2186,7 +2186,7 @@ struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -2208,7 +2208,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -2258,7 +2258,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -2280,7 +2280,7 @@ struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -2294,7 +2294,7 @@ struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
{}
};
struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,

View File

@ -215,7 +215,7 @@ struct wlfw_ind_register_req_msg_v01 {
};
#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50
extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
struct wlfw_ind_register_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@ -224,21 +224,21 @@ struct wlfw_ind_register_resp_msg_v01 {
};
#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
struct wlfw_fw_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
struct wlfw_msa_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
struct wlfw_pin_connect_result_ind_msg_v01 {
u8 pwr_pin_result_valid;
@ -250,7 +250,7 @@ struct wlfw_pin_connect_result_ind_msg_v01 {
};
#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
struct wlfw_wlan_mode_req_msg_v01 {
enum wlfw_driver_mode_enum_v01 mode;
@ -259,14 +259,14 @@ struct wlfw_wlan_mode_req_msg_v01 {
};
#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
struct wlfw_wlan_mode_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
struct wlfw_wlan_cfg_req_msg_v01 {
u8 host_version_valid;
@ -286,21 +286,21 @@ struct wlfw_wlan_cfg_req_msg_v01 {
};
#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
struct wlfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
struct wlfw_cap_req_msg_v01 {
char placeholder;
};
#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
struct wlfw_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@ -319,7 +319,7 @@ struct wlfw_cap_resp_msg_v01 {
};
#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
struct wlfw_bdf_download_req_msg_v01 {
u8 valid;
@ -339,14 +339,14 @@ struct wlfw_bdf_download_req_msg_v01 {
};
#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
struct wlfw_bdf_download_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
struct wlfw_cal_report_req_msg_v01 {
u32 meta_data_len;
@ -356,21 +356,21 @@ struct wlfw_cal_report_req_msg_v01 {
};
#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
struct wlfw_cal_report_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
struct wlfw_initiate_cal_download_ind_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
};
#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
struct wlfw_cal_download_req_msg_v01 {
u8 valid;
@ -388,14 +388,14 @@ struct wlfw_cal_download_req_msg_v01 {
};
#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
struct wlfw_cal_download_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
struct wlfw_initiate_cal_update_ind_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
@ -403,7 +403,7 @@ struct wlfw_initiate_cal_update_ind_msg_v01 {
};
#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
struct wlfw_cal_update_req_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
@ -411,7 +411,7 @@ struct wlfw_cal_update_req_msg_v01 {
};
#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
struct wlfw_cal_update_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@ -429,7 +429,7 @@ struct wlfw_cal_update_resp_msg_v01 {
};
#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
struct wlfw_msa_info_req_msg_v01 {
u64 msa_addr;
@ -437,7 +437,7 @@ struct wlfw_msa_info_req_msg_v01 {
};
#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
struct wlfw_msa_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@ -446,21 +446,21 @@ struct wlfw_msa_info_resp_msg_v01 {
};
#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
struct wlfw_msa_ready_req_msg_v01 {
char placeholder;
};
#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
struct wlfw_msa_ready_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
struct wlfw_ini_req_msg_v01 {
u8 enablefwlog_valid;
@ -468,14 +468,14 @@ struct wlfw_ini_req_msg_v01 {
};
#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
struct wlfw_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
struct wlfw_athdiag_read_req_msg_v01 {
u32 offset;
@ -484,7 +484,7 @@ struct wlfw_athdiag_read_req_msg_v01 {
};
#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
struct wlfw_athdiag_read_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@ -494,7 +494,7 @@ struct wlfw_athdiag_read_resp_msg_v01 {
};
#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
struct wlfw_athdiag_write_req_msg_v01 {
u32 offset;
@ -504,28 +504,28 @@ struct wlfw_athdiag_write_req_msg_v01 {
};
#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
struct wlfw_athdiag_write_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
struct wlfw_vbatt_req_msg_v01 {
u64 voltage_uv;
};
#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
struct wlfw_vbatt_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
struct wlfw_mac_addr_req_msg_v01 {
u8 mac_addr_valid;
@ -533,14 +533,14 @@ struct wlfw_mac_addr_req_msg_v01 {
};
#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
struct wlfw_mac_addr_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
#define QMI_WLFW_MAX_NUM_GPIO_V01 32
struct wlfw_host_cap_req_msg_v01 {
@ -574,15 +574,15 @@ struct wlfw_host_cap_req_msg_v01 {
};
#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
struct wlfw_request_mem_ind_msg_v01 {
u32 mem_seg_len;
@ -590,7 +590,7 @@ struct wlfw_request_mem_ind_msg_v01 {
};
#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564
extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
struct wlfw_respond_mem_req_msg_v01 {
u32 mem_seg_len;
@ -598,28 +598,28 @@ struct wlfw_respond_mem_req_msg_v01 {
};
#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260
extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
struct wlfw_respond_mem_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
struct wlfw_mem_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
struct wlfw_fw_init_done_ind_msg_v01 {
char placeholder;
};
#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
struct wlfw_rejuvenate_ind_msg_v01 {
u8 cause_for_rejuvenation_valid;
@ -633,21 +633,21 @@ struct wlfw_rejuvenate_ind_msg_v01 {
};
#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
extern struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
struct wlfw_rejuvenate_ack_req_msg_v01 {
char placeholder;
};
#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
struct wlfw_rejuvenate_ack_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
struct wlfw_dynamic_feature_mask_req_msg_v01 {
u8 mask_valid;
@ -655,7 +655,7 @@ struct wlfw_dynamic_feature_mask_req_msg_v01 {
};
#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
extern struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
struct wlfw_dynamic_feature_mask_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@ -666,7 +666,7 @@ struct wlfw_dynamic_feature_mask_resp_msg_v01 {
};
#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
extern struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
struct wlfw_m3_info_req_msg_v01 {
u64 addr;
@ -674,20 +674,20 @@ struct wlfw_m3_info_req_msg_v01 {
};
#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
struct wlfw_m3_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
struct wlfw_xo_cal_ind_msg_v01 {
u8 xo_cal_data;
};
#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
extern const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
#endif

View File

@ -195,6 +195,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
},
{
.name = "qca6390 hw2.0",
@ -277,6 +278,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
},
{
.name = "qcn9074 hw1.0",
@ -356,6 +358,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
},
{
.name = "wcn6855 hw2.0",
@ -438,6 +441,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
},
{
.name = "wcn6855 hw2.1",
@ -519,6 +523,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
},
{
.name = "wcn6750 hw1.0",
@ -597,6 +602,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = false,
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
},
};

View File

@ -219,6 +219,7 @@ struct ath11k_hw_params {
bool tcl_ring_retry;
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
};
struct ath11k_hw_ops {

View File

@ -8010,6 +8010,7 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
int recovery_count;
struct ath11k_vif *arvif;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
@ -8045,6 +8046,12 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n");
}
}
if (ar->ab->hw_params.support_fw_mac_sequence) {
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
ieee80211_hw_restart_disconnect(arvif->vif);
}
}
}
mutex_unlock(&ar->conf_mutex);

View File

@ -163,7 +163,7 @@ void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
u32 ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);

View File

@ -29,7 +29,7 @@ module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644);
MODULE_PARM_DESC(cold_boot_cal,
"Decrease the channel switch time but increase the driver load time (Default: true)");
static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -280,7 +280,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -297,7 +297,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -522,7 +522,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -558,7 +558,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -590,7 +590,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -632,7 +632,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@ -659,7 +659,7 @@ static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -699,7 +699,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@ -726,7 +726,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -744,7 +744,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@ -752,7 +752,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@ -760,7 +760,7 @@ static struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -814,7 +814,7 @@ static struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -840,7 +840,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -857,7 +857,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -873,7 +873,7 @@ static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -899,7 +899,7 @@ static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1100,7 +1100,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@ -1235,7 +1235,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1253,7 +1253,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@ -1277,7 +1277,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1294,7 +1294,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -1347,7 +1347,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -1382,7 +1382,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
@ -1406,7 +1406,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -1423,7 +1423,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@ -1458,7 +1458,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1476,7 +1476,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -1615,7 +1615,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1632,28 +1632,28 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@ -1679,7 +1679,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@ -1697,7 +1697,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
},
};
static struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@ -3090,6 +3090,9 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
.fn = ath11k_qmi_msg_fw_init_done_cb,
},
/* end of list */
{},
};
static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,

View File

@ -585,7 +585,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 *p_gpm = NULL, mismatch = 0, more_data;
u32 *p_gpm = NULL, more_data;
u32 offset;
u8 recv_type = 0, recv_opcode = 0;
bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
@ -656,7 +656,6 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
} else {
ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
*(p_gpm + 1));
mismatch++;
ar9003_mci_process_gpm_extra(ah, recv_type,
recv_opcode, p_gpm);
}

View File

@ -1678,7 +1678,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct list_head bf_q;
struct ath_buf *bf_tail = NULL, *bf = NULL;
int sent = 0;
int i, ret;
INIT_LIST_HEAD(&bf_q);
@ -1707,7 +1706,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
bf_tail = bf;
nframes--;
sent++;
TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw);
if (an->sta && skb_queue_empty(&tid->retry_q))

View File

@ -118,10 +118,10 @@ struct carl9170_reg_list {
} __packed;
struct carl9170_write_reg {
struct {
DECLARE_FLEX_ARRAY(struct {
__le32 addr;
__le32 val;
} regs[0] __packed;
} __packed, regs);
} __packed;
struct carl9170_write_reg_byte {

View File

@ -5784,15 +5784,12 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason)
static void b43_print_driverinfo(void)
{
const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
const char *feat_pci = "", *feat_nphy = "",
*feat_leds = "", *feat_sdio = "";
#ifdef CONFIG_B43_PCI_AUTOSELECT
feat_pci = "P";
#endif
#ifdef CONFIG_B43_PCMCIA
feat_pcmcia = "M";
#endif
#ifdef CONFIG_B43_PHY_N
feat_nphy = "N";
#endif
@ -5803,9 +5800,8 @@ static void b43_print_driverinfo(void)
feat_sdio = "S";
#endif
printk(KERN_INFO "Broadcom 43xx driver loaded "
"[ Features: %s%s%s%s%s ]\n",
feat_pci, feat_pcmcia, feat_nphy,
feat_leds, feat_sdio);
"[ Features: %s%s%s%s ]\n",
feat_pci, feat_nphy, feat_leds, feat_sdio);
}
static int __init b43_init(void)

View File

@ -101,6 +101,9 @@
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
#define BRCMF_MAX_CHANSPEC_LIST \
(BRCMF_DCMD_MEDLEN / sizeof(__le32) - 1)
struct brcmf_dump_survey {
u32 obss;
u32 ibss;
@ -6840,6 +6843,13 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
total = le32_to_cpu(list->count);
if (total > BRCMF_MAX_CHANSPEC_LIST) {
bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
total);
err = -EINVAL;
goto fail_pbuf;
}
for (i = 0; i < total; i++) {
ch.chspec = (u16)le32_to_cpu(list->element[i]);
cfg->d11inf.decchspec(&ch);
@ -6985,6 +6995,13 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
list = (struct brcmf_chanspec_list *)pbuf;
num_chan = le32_to_cpu(list->count);
if (num_chan > BRCMF_MAX_CHANSPEC_LIST) {
bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
num_chan);
kfree(pbuf);
return -EINVAL;
}
for (i = 0; i < num_chan; i++) {
ch.chspec = (u16)le32_to_cpu(list->element[i]);
cfg->d11inf.decchspec(&ch);

View File

@ -110,9 +110,9 @@ static int brcmf_c_download(struct brcmf_if *ifp, u16 flag,
dload_buf->dload_type = cpu_to_le16(DL_TYPE_CLM);
dload_buf->len = cpu_to_le32(len);
dload_buf->crc = cpu_to_le32(0);
len = sizeof(*dload_buf) + len - 1;
err = brcmf_fil_iovar_data_set(ifp, "clmload", dload_buf, len);
err = brcmf_fil_iovar_data_set(ifp, "clmload", dload_buf,
struct_size(dload_buf, data, len));
return err;
}
@ -139,7 +139,8 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
return 0;
}
chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
chunk_buf = kzalloc(struct_size(chunk_buf, data, MAX_CHUNK_LEN),
GFP_KERNEL);
if (!chunk_buf) {
err = -ENOMEM;
goto done;

View File

@ -943,7 +943,7 @@ struct brcmf_dload_data_le {
__le16 dload_type;
__le32 len;
__le32 crc;
u8 data[1];
u8 data[];
};
/**
@ -1049,7 +1049,7 @@ struct brcmf_gscan_config {
u8 count_of_channel_buckets;
u8 retry_threshold;
__le16 lost_ap_window;
struct brcmf_gscan_bucket_config bucket[1];
struct brcmf_gscan_bucket_config bucket[];
};
/**

View File

@ -405,7 +405,7 @@ static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
if (n_buckets < 0)
return n_buckets;
gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
gsz = struct_size(gscan_cfg, bucket, n_buckets);
gscan_cfg = kzalloc(gsz, GFP_KERNEL);
if (!gscan_cfg) {
err = -ENOMEM;
@ -434,8 +434,8 @@ static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
gscan_cfg->count_of_channel_buckets = n_buckets;
memcpy(&gscan_cfg->bucket[0], buckets,
n_buckets * sizeof(*buckets));
memcpy(gscan_cfg->bucket, buckets,
array_size(n_buckets, sizeof(*buckets)));
err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@ -398,7 +398,7 @@ struct iwl_he_backoff_conf {
* @IWL_HE_PKT_EXT_64QAM: 64-QAM
* @IWL_HE_PKT_EXT_256QAM: 256-QAM
* @IWL_HE_PKT_EXT_1024QAM: 1024-QAM
* @IWL_HE_PKT_EXT_RESERVED: reserved value
* @IWL_HE_PKT_EXT_4096QAM: 4096-QAM, for EHT only
* @IWL_HE_PKT_EXT_NONE: not defined
*/
enum iwl_he_pkt_ext_constellations {
@ -408,7 +408,7 @@ enum iwl_he_pkt_ext_constellations {
IWL_HE_PKT_EXT_64QAM,
IWL_HE_PKT_EXT_256QAM,
IWL_HE_PKT_EXT_1024QAM,
IWL_HE_PKT_EXT_RESERVED,
IWL_HE_PKT_EXT_4096QAM,
IWL_HE_PKT_EXT_NONE,
};

View File

@ -13,10 +13,12 @@
#define PHY_BAND_6 (2)
/* Supported channel width, vary if there is VHT support */
#define PHY_VHT_CHANNEL_MODE20 (0x0)
#define PHY_VHT_CHANNEL_MODE40 (0x1)
#define PHY_VHT_CHANNEL_MODE80 (0x2)
#define PHY_VHT_CHANNEL_MODE160 (0x3)
#define IWL_PHY_CHANNEL_MODE20 0x0
#define IWL_PHY_CHANNEL_MODE40 0x1
#define IWL_PHY_CHANNEL_MODE80 0x2
#define IWL_PHY_CHANNEL_MODE160 0x3
/* and 320 MHz for EHT */
#define IWL_PHY_CHANNEL_MODE320 0x4
/*
* Control channel position:
@ -24,20 +26,17 @@
* For VHT - bit-2 marks if the control is lower/upper relative to center-freq
* bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
* center_freq
* |
* 40Mhz |_______|_______|
* 80Mhz |_______|_______|_______|_______|
* 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
* code 011 010 001 000 | 100 101 110 111
* For EHT - bit-3 is used for extended distance
* |
* 40Mhz |____|____|
* 80Mhz |____|____|____|____|
* 160Mhz |____|____|____|____|____|____|____|____|
* 320MHz |____|____|____|____|____|____|____|____|____|____|____|____|____|____|____|____|
* code 1011 1010 1001 1000 0011 0010 0001 0000 0100 0101 0110 0111 1100 1101 1110 1111
*/
#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
#define IWL_PHY_CTRL_POS_ABOVE 0x4
#define IWL_PHY_CTRL_POS_OFFS_EXT 0x8
#define IWL_PHY_CTRL_POS_OFFS_MSK 0x3
/*
* struct iwl_fw_channel_info_v1 - channel information

View File

@ -36,14 +36,14 @@ enum iwl_tlc_mng_cfg_flags {
* @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value
* @IWL_TLC_MNG_CH_WIDTH_320MHZ: 320MHZ channel
*/
enum iwl_tlc_mng_cfg_cw {
IWL_TLC_MNG_CH_WIDTH_20MHZ,
IWL_TLC_MNG_CH_WIDTH_40MHZ,
IWL_TLC_MNG_CH_WIDTH_80MHZ,
IWL_TLC_MNG_CH_WIDTH_160MHZ,
IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
IWL_TLC_MNG_CH_WIDTH_320MHZ,
};
/**
@ -64,8 +64,7 @@ enum iwl_tlc_mng_cfg_chains {
* @IWL_TLC_MNG_MODE_HT: enable HT
* @IWL_TLC_MNG_MODE_VHT: enable VHT
* @IWL_TLC_MNG_MODE_HE: enable HE
* @IWL_TLC_MNG_MODE_INVALID: invalid value
* @IWL_TLC_MNG_MODE_NUM: a count of possible modes
* @IWL_TLC_MNG_MODE_EHT: enable EHT
*/
enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_CCK = 0,
@ -74,8 +73,7 @@ enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_HT,
IWL_TLC_MNG_MODE_VHT,
IWL_TLC_MNG_MODE_HE,
IWL_TLC_MNG_MODE_INVALID,
IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
IWL_TLC_MNG_MODE_EHT,
};
/**

View File

@ -218,6 +218,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
type = "HT";
else if (format == RATE_MCS_HE_MSK)
type = "HE";
else if (format == RATE_MCS_EHT_MSK)
type = "EHT";
else
type = "Unknown"; /* shouldn't happen */

View File

@ -1971,3 +1971,6 @@ MODULE_PARM_DESC(remove_when_gone,
module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
S_IRUGO);
MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
module_param_named(disable_11be, iwlwifi_mod_params.disable_11be, bool, 0444);
MODULE_PARM_DESC(disable_11be, "Disable EHT capabilities (default: false)");

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018 Intel Corporation
* Copyright (C) 2005-2014, 2018, 2020-2022 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#ifndef __iwl_eeprom_parse_h__
@ -31,6 +31,7 @@ struct iwl_nvm_data {
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
bool sku_cap_mimo_disabled;
bool sku_cap_11be_enable;
u16 radio_cfg_type;
u8 radio_cfg_step;

View File

@ -62,6 +62,7 @@ enum iwl_uapsd_disable {
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
* @enable_ini: enable new FW debug infratructure (INI TLVs)
* @disable_11be: disable EHT capabilities, default = false.
*/
struct iwl_mod_params {
int swcrypto;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -546,7 +546,7 @@ static const u8 iwl_vendor_caps[] = {
0x00
};
static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
{
.types_mask = BIT(NL80211_IFTYPE_STATION),
.he_cap = {
@ -571,10 +571,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS |
IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
@ -631,6 +627,78 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
*/
.ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
},
.eht_cap = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
.phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
.phy_cap_info[1] =
IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
.phy_cap_info[3] =
IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
.phy_cap_info[4] =
IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI,
.phy_cap_info[5] =
IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
.phy_cap_info[6] =
IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
.phy_cap_info[8] =
IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA |
IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA,
},
/* For all MCS and bandwidth, set 2 NSS for both Tx and
* Rx - note we don't set the only_20mhz, but due to this
* being a union, it gets set correctly anyway.
*/
.eht_mcs_nss_supp = {
.bw._80 = {
.rx_tx_mcs9_max_nss = 0x22,
.rx_tx_mcs11_max_nss = 0x22,
.rx_tx_mcs13_max_nss = 0x22,
},
.bw._160 = {
.rx_tx_mcs9_max_nss = 0x22,
.rx_tx_mcs11_max_nss = 0x22,
.rx_tx_mcs13_max_nss = 0x22,
},
.bw._320 = {
.rx_tx_mcs9_max_nss = 0x22,
.rx_tx_mcs11_max_nss = 0x22,
.rx_tx_mcs13_max_nss = 0x22,
},
},
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
},
{
.types_mask = BIT(NL80211_IFTYPE_AP),
@ -644,9 +712,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
.phy_cap_info[2] =
@ -687,6 +752,49 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
*/
.ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
},
.eht_cap = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
.phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI,
.phy_cap_info[5] =
IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
},
/* For all MCS and bandwidth, set 2 NSS for both Tx and
* Rx - note we don't set the only_20mhz, but due to this
* being a union, it gets set correctly anyway.
*/
.eht_mcs_nss_supp = {
.bw._80 = {
.rx_tx_mcs9_max_nss = 0x22,
.rx_tx_mcs11_max_nss = 0x22,
.rx_tx_mcs13_max_nss = 0x22,
},
.bw._160 = {
.rx_tx_mcs9_max_nss = 0x22,
.rx_tx_mcs11_max_nss = 0x22,
.rx_tx_mcs13_max_nss = 0x22,
},
.bw._320 = {
.rx_tx_mcs9_max_nss = 0x22,
.rx_tx_mcs11_max_nss = 0x22,
.rx_tx_mcs13_max_nss = 0x22,
},
},
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
},
};
@ -738,6 +846,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
static void
iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
struct ieee80211_sband_iftype_data *iftype_data,
u8 tx_chains, u8 rx_chains,
@ -745,6 +854,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
{
bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
/* Advertise an A-MPDU exponent extension based on
* operating band
*/
@ -755,9 +867,30 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
if (is_ap && iwlwifi_mod_params.nvm_file)
switch (sband->band) {
case NL80211_BAND_2GHZ:
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] |=
u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
break;
case NL80211_BAND_6GHZ:
if (!is_ap || iwlwifi_mod_params.nvm_file)
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |=
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
fallthrough;
case NL80211_BAND_5GHZ:
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
if (!is_ap || iwlwifi_mod_params.nvm_file)
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
break;
default:
WARN_ON(1);
break;
}
if ((tx_chains & rx_chains) == ANT_AB) {
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |=
@ -765,19 +898,44 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |=
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
if (!is_ap)
if (!is_ap) {
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
IEEE80211_HE_PHY_CAP7_MAX_NC_2;
} else if (!is_ap) {
/* If not 2x2, we need to indicate 1x1 in the
* Midamble RX Max NSTS - but not for AP mode
*/
iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
IEEE80211_HE_PHY_CAP7_MAX_NC_1;
if (iftype_data->eht_cap.has_eht) {
/*
* Set the number of sounding dimensions for each
* bandwidth to 1 to indicate the maximal supported
* value of TXVECTOR parameter NUM_STS of 2
*/
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] |= 0x49;
/*
* Set the MAX NC to 1 to indicate sounding feedback of
* 2 supported by the beamfomee.
*/
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] |= 0x10;
}
}
} else {
if (iftype_data->eht_cap.has_eht) {
struct ieee80211_eht_mcs_nss_supp *mcs_nss =
&iftype_data->eht_cap.eht_mcs_nss_supp;
memset(mcs_nss, 0x11, sizeof(*mcs_nss));
}
if (!is_ap) {
/* If not 2x2, we need to indicate 1x1 in the
* Midamble RX Max NSTS - but not for AP mode
*/
iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
IEEE80211_HE_PHY_CAP7_MAX_NC_1;
}
}
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
@ -792,6 +950,29 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
break;
}
if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iftype_data->eht_cap.has_eht) {
iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] &=
~(IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[5] &=
~IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[6] &=
~(IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP);
}
if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BCAST_TWT;
@ -816,8 +997,8 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
if (WARN_ON(sband->iftype_data))
return;
BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_capa));
BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_capa));
BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_eht_capa));
BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_eht_capa));
switch (sband->band) {
case NL80211_BAND_2GHZ:
@ -832,13 +1013,13 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
return;
}
memcpy(iftype_data, iwl_he_capa, sizeof(iwl_he_capa));
memcpy(iftype_data, iwl_he_eht_capa, sizeof(iwl_he_eht_capa));
sband->iftype_data = iftype_data;
sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
sband->n_iftype_data = ARRAY_SIZE(iwl_he_eht_capa);
for (i = 0; i < sband->n_iftype_data; i++)
iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i],
iwl_nvm_fixup_sband_iftd(trans, data, sband, &iftype_data[i],
tx_chains, rx_chains, fw);
iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains);

View File

@ -368,6 +368,7 @@ enum {
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
#define PREG_AUX_BUS_WPROT_0 0xA04CC0

View File

@ -1542,5 +1542,6 @@ void iwl_trans_free(struct iwl_trans *trans);
******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
#endif /* __iwl_trans_h__ */

View File

@ -354,6 +354,20 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
if (mvm->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_AX210) {
/* print these registers regardless of alive fail/success */
IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION));
IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n",
iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION));
IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n",
iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG));
IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n",
iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9));
}
if (ret) {
struct iwl_trans *trans = mvm->trans;
@ -390,7 +404,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
UREG_LMAC2_CURRENT_PC));
}
if (ret == -ETIMEDOUT)
if (ret == -ETIMEDOUT && !mvm->pldr_sync)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
@ -404,7 +418,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
iwl_mei_alive_notif(!ret);
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait);
if (ret) {
@ -1467,18 +1482,22 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
return ret;
sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
if (!(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK) && iwl_mei_pldr_req())
return ret;
mvm->pldr_sync = !(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK);
if (mvm->pldr_sync && iwl_mei_pldr_req())
return -EBUSY;
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
if (ret != -ERFKILL)
if (ret != -ERFKILL && !mvm->pldr_sync)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_DRIVER);
goto error;
}
/* FW loaded successfully */
mvm->pldr_sync = false;
iwl_get_shared_mem_conf(&mvm->fwrt);
ret = iwl_mvm_sf_update(mvm, NULL, false);

View File

@ -1068,6 +1068,16 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
if (!ret)
break;
/*
* In PLDR sync PCI re-enumeration is needed. no point to retry
* mac start before that.
*/
if (mvm->pldr_sync) {
iwl_mei_alive_notif(false);
iwl_trans_pcie_remove(mvm->trans, true);
break;
}
IWL_ERR(mvm, "mac start retry %d\n", retry);
}
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
@ -1825,7 +1835,8 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit)
u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit,
bool inheritance)
{
int i;
@ -1851,14 +1862,25 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
bw++) {
ru_index_tmp >>= 1;
if (!(ru_index_tmp & 1))
continue;
/*
* According to the 11be spec, if for a specific BW the PPE Thresholds
* isn't present - it should inherit the thresholds from the last
* BW for which we had PPE Thresholds. In 11ax though, we don't have
* this inheritance - continue in this case
*/
if (!(ru_index_tmp & 1)) {
if (inheritance)
goto set_thresholds;
else
continue;
}
high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
set_thresholds:
pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
}
@ -1867,7 +1889,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_he_pkt_ext_v2 *pkt_ext)
struct iwl_he_pkt_ext_v2 *pkt_ext,
bool inheritance)
{
u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
u8 *ppe = &sta->deflink.he_cap.ppe_thres[0];
@ -1877,7 +1900,8 @@ static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
/* Starting after PPE header */
u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit);
iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit,
inheritance);
}
static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
@ -1888,16 +1912,18 @@ static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *p
int high_th = -1;
int i;
/* all the macros are the same for EHT and HE */
switch (nominal_padding) {
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_NONE;
break;
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
low_th = IWL_HE_PKT_EXT_BPSK;
high_th = IWL_HE_PKT_EXT_NONE;
break;
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_BPSK;
break;
@ -1920,6 +1946,31 @@ static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *p
}
}
static void iwl_mvm_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext,
u8 nominal_padding)
{
int i;
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
u8 bw;
for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
bw++) {
u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0];
if (nominal_padding >
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
qam_th[1] == IWL_HE_PKT_EXT_NONE)
qam_th[1] = IWL_HE_PKT_EXT_4096QAM;
else if (nominal_padding ==
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
qam_th[0] == IWL_HE_PKT_EXT_NONE &&
qam_th[1] == IWL_HE_PKT_EXT_NONE)
qam_th[0] = IWL_HE_PKT_EXT_4096QAM;
}
}
}
static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, u8 sta_id)
{
@ -1943,6 +1994,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_supported_band *sband;
void *cmd;
u8 nominal_padding;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
ver = 1;
@ -2032,22 +2084,96 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE,
sizeof(sta_ctxt_cmd.pkt_ext));
/* If PPE Thresholds exist, parse them into a FW-familiar format. */
if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
&sta_ctxt_cmd.pkt_ext);
flags |= STA_CTXT_HE_PACKET_EXT;
/* PPE Thresholds doesn't exist - set the API PPE values
* according to Common Nominal Packet Padding fiels. */
} else {
u8 nominal_padding =
u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9],
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
if (sta->deflink.eht_cap.has_eht) {
nominal_padding =
u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
/* If PPE Thresholds exists, parse them into a FW-familiar format. */
if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5] &
IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
u8 nss = (sta->deflink.eht_cap.eht_ppe_thres[0] &
IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1;
u8 *ppe = &sta->deflink.eht_cap.eht_ppe_thres[0];
u8 ru_index_bitmap =
u16_get_bits(*ppe,
IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
/* Starting after PPE header */
u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
iwl_mvm_parse_ppe(mvm,
&sta_ctxt_cmd.pkt_ext,
nss, ru_index_bitmap, ppe,
ppe_pos_bit, true);
flags |= STA_CTXT_HE_PACKET_EXT;
/* EHT PPE Thresholds doesn't exist - set the API according to HE PPE Tresholds*/
} else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
struct iwl_he_pkt_ext_v2 *pkt_ext =
&sta_ctxt_cmd.pkt_ext;
/*
* Even though HE Capabilities IE doesn't contain PPE
* Thresholds for BW 320Mhz, thresholds for this BW will
* be filled in with the same values as 160Mhz, due to
* the inheritance, as required.
*/
iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, pkt_ext,
true);
/*
* According to the requirements, for MCSs 12-13 the maximum value between
* HE PPE Threshold and Common Nominal Packet Padding needs to be taken
*/
iwl_mvm_get_optimal_ppe_info(pkt_ext, nominal_padding);
flags |= STA_CTXT_HE_PACKET_EXT;
/*
* if PPE Thresholds doesn't present in both EHT IE and HE IE -
* take the Thresholds from Common Nominal Packet Padding field
*/
} else {
iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
nominal_padding,
&flags);
}
} else if (sta->deflink.he_cap.has_he) {
/* If PPE Thresholds exist, parse them into a FW-familiar format. */
if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
&sta_ctxt_cmd.pkt_ext,
false);
flags |= STA_CTXT_HE_PACKET_EXT;
/*
* PPE Thresholds doesn't exist - set the API PPE values
* according to Common Nominal Packet Padding field.
*/
} else {
nominal_padding =
u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9],
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
nominal_padding,
&flags);
}
}
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
int bw;
for (bw = 0;
bw < ARRAY_SIZE(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i]);
bw++) {
u8 *qam_th =
&sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0];
IWL_DEBUG_HT(mvm,
"PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n",
i, bw, qam_th[0], qam_th[1]);
}
}
if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
@ -2195,8 +2321,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* interface was added.
*/
if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if (vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
if ((vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax) ||
(vif->bss_conf.eht_support &&
!iwlwifi_mod_params.disable_11be))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
@ -2204,8 +2332,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
vif->cfg.assoc && vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
vif->cfg.assoc &&
((vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax) ||
(vif->bss_conf.eht_support &&
!iwlwifi_mod_params.disable_11be)))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
/*
@ -3218,8 +3349,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
if (vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
if ((vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax) ||
(vif->bss_conf.eht_support &&
!iwlwifi_mod_params.disable_11be))
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
@ -3777,7 +3910,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
iwl_mvm_phy_band_from_nl80211(channel->band),
PHY_VHT_CHANNEL_MODE20,
IWL_PHY_CHANNEL_MODE20,
0);
/* Set the time and duration */

View File

@ -1105,6 +1105,8 @@ struct iwl_mvm {
unsigned long last_reset_or_resume_time_jiffies;
bool sta_remove_requires_queue_remove;
bool pldr_sync;
};
/* Extract MVM priv from op_mode and _hw */

View File

@ -1077,6 +1077,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
u32 max_agg;
size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
@ -1098,12 +1099,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
max_agg = IEEE80211_MAX_AMPDU_BUF_EHT;
else
max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
hw->max_rx_aggregation_subframes = max_agg;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
hw->max_tx_aggregation_subframes = max_agg;
op_mode = hw->priv;
@ -1882,6 +1888,9 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (mvm->pldr_sync)
return;
if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
!test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
&mvm->status))

View File

@ -14,16 +14,18 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
return PHY_VHT_CHANNEL_MODE20;
return IWL_PHY_CHANNEL_MODE20;
case NL80211_CHAN_WIDTH_40:
return PHY_VHT_CHANNEL_MODE40;
return IWL_PHY_CHANNEL_MODE40;
case NL80211_CHAN_WIDTH_80:
return PHY_VHT_CHANNEL_MODE80;
return IWL_PHY_CHANNEL_MODE80;
case NL80211_CHAN_WIDTH_160:
return PHY_VHT_CHANNEL_MODE160;
return IWL_PHY_CHANNEL_MODE160;
case NL80211_CHAN_WIDTH_320:
return IWL_PHY_CHANNEL_MODE320;
default:
WARN(1, "Invalid channel width=%u", chandef->width);
return PHY_VHT_CHANNEL_MODE20;
return IWL_PHY_CHANNEL_MODE20;
}
}
@ -33,34 +35,32 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
*/
u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
{
switch (chandef->chan->center_freq - chandef->center_freq1) {
case -70:
return PHY_VHT_CTRL_POS_4_BELOW;
case -50:
return PHY_VHT_CTRL_POS_3_BELOW;
case -30:
return PHY_VHT_CTRL_POS_2_BELOW;
case -10:
return PHY_VHT_CTRL_POS_1_BELOW;
case 10:
return PHY_VHT_CTRL_POS_1_ABOVE;
case 30:
return PHY_VHT_CTRL_POS_2_ABOVE;
case 50:
return PHY_VHT_CTRL_POS_3_ABOVE;
case 70:
return PHY_VHT_CTRL_POS_4_ABOVE;
default:
WARN(1, "Invalid channel definition");
fallthrough;
case 0:
int offs = chandef->chan->center_freq - chandef->center_freq1;
int abs_offs = abs(offs);
u8 ret;
if (offs == 0) {
/*
* The FW is expected to check the control channel position only
* when in HT/VHT and the channel width is not 20MHz. Return
* this value as the default one.
*/
return PHY_VHT_CTRL_POS_1_BELOW;
return 0;
}
/* this results in a value 0-7, i.e. fitting into 0b0111 */
ret = (abs_offs - 10) / 20;
/*
* But we need the value to be in 0b1011 because 0b0100 is
* IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in
* IWL_PHY_CTRL_POS_OFFS_EXT (0b1000)
*/
ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) |
((ret & BIT(2)) << 1);
/* and add the above bit */
ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE;
return ret;
}
/*

View File

@ -9,9 +9,11 @@
#include "iwl-op-mode.h"
#include "mvm.h"
static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
static u8 rs_fw_bw_from_sta_bw(const struct ieee80211_sta *sta)
{
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_320:
return IWL_TLC_MNG_CH_WIDTH_320MHZ;
case IEEE80211_STA_RX_BW_160:
return IWL_TLC_MNG_CH_WIDTH_160MHZ;
case IEEE80211_STA_RX_BW_80:
@ -238,6 +240,122 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
}
static u8 rs_fw_eht_max_nss(u8 rx_nss, u8 tx_nss)
{
u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX);
u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX);
/* the max nss that can be used,
* is the min with our tx capa and the peer rx capa.
*/
return min(tx, rx);
}
#define MAX_NSS_MCS(mcs_num, rx, tx) \
rs_fw_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \
(tx)->rx_tx_mcs ##mcs_num## _max_nss)
static void rs_fw_set_eht_mcs_nss(__le16 ht_rates[][3],
enum IWL_TLC_MCS_PER_BW bw,
u8 max_nss, u16 mcs_msk)
{
if (max_nss >= 2)
ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
if (max_nss >= 1)
ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
}
static const
struct ieee80211_eht_mcs_nss_supp_bw *
rs_fw_rs_mcs2eht_mcs(enum IWL_TLC_MCS_PER_BW bw,
const struct ieee80211_eht_mcs_nss_supp *eht_mcs)
{
switch (bw) {
case IWL_TLC_MCS_PER_BW_80:
return &eht_mcs->bw._80;
case IWL_TLC_MCS_PER_BW_160:
return &eht_mcs->bw._160;
case IWL_TLC_MCS_PER_BW_320:
return &eht_mcs->bw._320;
default:
return NULL;
}
}
static void rs_fw_eht_set_enabled_rates(const struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd_v4 *cmd)
{
/* peer RX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
&sta->deflink.eht_cap.eht_mcs_nss_supp;
/* our TX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs =
&sband->iftype_data->eht_cap.eht_mcs_nss_supp;
enum IWL_TLC_MCS_PER_BW bw;
struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20;
struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20;
/* peer is 20Mhz only */
if (!(sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
mcs_rx_20 = eht_rx_mcs->only_20mhz;
} else {
mcs_rx_20.rx_tx_mcs7_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_rx_20.rx_tx_mcs9_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_rx_20.rx_tx_mcs11_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss;
mcs_rx_20.rx_tx_mcs13_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss;
}
/* nic is 20Mhz only */
if (!(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
mcs_tx_20 = eht_tx_mcs->only_20mhz;
} else {
mcs_tx_20.rx_tx_mcs7_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_tx_20.rx_tx_mcs9_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_tx_20.rx_tx_mcs11_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss;
mcs_tx_20.rx_tx_mcs13_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss;
}
/* rates for 20/40/80 bw */
bw = IWL_TLC_MCS_PER_BW_80;
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20), GENMASK(7, 0));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20), GENMASK(9, 8));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20), GENMASK(11, 10));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20), GENMASK(13, 12));
/* rate for 160/320 bw */
for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) {
const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx =
rs_fw_rs_mcs2eht_mcs(bw, eht_rx_mcs);
const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx =
rs_fw_rs_mcs2eht_mcs(bw, eht_tx_mcs);
/* got unsuppored index for bw */
if (!mcs_rx || !mcs_tx)
continue;
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(9, mcs_rx, mcs_tx), GENMASK(9, 0));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(11, mcs_rx, mcs_tx), GENMASK(11, 10));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(13, mcs_rx, mcs_tx), GENMASK(13, 12));
}
/* the station support only a single receive chain */
if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC ||
sta->deflink.rx_nss < 2)
memset(cmd->ht_rates[IWL_TLC_NSS_2], 0,
sizeof(cmd->ht_rates[IWL_TLC_NSS_2]));
}
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd_v4 *cmd)
@ -258,7 +376,10 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
/* HT/VHT rates */
if (he_cap->has_he) {
if (sta->deflink.eht_cap.has_eht) {
cmd->mode = IWL_TLC_MNG_MODE_EHT;
rs_fw_eht_set_enabled_rates(sta, sband, cmd);
} else if (he_cap->has_he) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
rs_fw_he_set_enabled_rates(sta, sband, cmd);
} else if (vht_cap->vht_supported) {

View File

@ -1215,6 +1215,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct sk_buff_head mpdus_skbs;
unsigned int payload_len;
int ret;
struct sk_buff *orig_skb = skb;
if (WARN_ON_ONCE(!mvmsta))
return -1;
@ -1247,8 +1248,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
if (ret) {
/* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
return ret;
/* skb here is not necessarily same as skb that entered this method,
* so free it explicitly.
*/
if (skb == orig_skb)
ieee80211_free_txskb(mvm->hw, skb);
else
kfree_skb(skb);
/* there was error, but we consumed skb one way or another, so return 0 */
return 0;
}
}

View File

@ -2052,6 +2052,7 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
struct iwl_trans_pcie_removal {
struct pci_dev *pdev;
struct work_struct work;
bool rescan;
};
static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
@ -2060,18 +2061,61 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
container_of(wk, struct iwl_trans_pcie_removal, work);
struct pci_dev *pdev = removal->pdev;
static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
struct pci_bus *bus = pdev->bus;
dev_err(&pdev->dev, "Device gone - attempting removal\n");
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
pci_lock_rescan_remove();
pci_dev_put(pdev);
pci_stop_and_remove_bus_device(pdev);
if (removal->rescan)
pci_rescan_bus(bus->parent);
pci_unlock_rescan_remove();
kfree(removal);
module_put(THIS_MODULE);
}
void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
{
struct iwl_trans_pcie_removal *removal;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return;
IWL_ERR(trans, "Device gone - scheduling removal!\n");
/*
* get a module reference to avoid doing this
* while unloading anyway and to avoid
* scheduling a work with code that's being
* removed.
*/
if (!try_module_get(THIS_MODULE)) {
IWL_ERR(trans,
"Module is being unloaded - abort\n");
return;
}
removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
if (!removal) {
module_put(THIS_MODULE);
return;
}
/*
* we don't need to clear this flag, because
* the trans will be freed and reallocated.
*/
set_bit(STATUS_TRANS_DEAD, &trans->status);
removal->pdev = to_pci_dev(trans->dev);
removal->rescan = rescan;
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
pci_dev_get(removal->pdev);
schedule_work(&removal->work);
}
EXPORT_SYMBOL(iwl_trans_pcie_remove);
/*
* This version doesn't disable BHs but rather assumes they're
* already disabled.
@ -2131,47 +2175,12 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
iwl_trans_pcie_dump_regs(trans);
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
struct iwl_trans_pcie_removal *removal;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
goto err;
IWL_ERR(trans, "Device gone - scheduling removal!\n");
/*
* get a module reference to avoid doing this
* while unloading anyway and to avoid
* scheduling a work with code that's being
* removed.
*/
if (!try_module_get(THIS_MODULE)) {
IWL_ERR(trans,
"Module is being unloaded - abort\n");
goto err;
}
removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
if (!removal) {
module_put(THIS_MODULE);
goto err;
}
/*
* we don't need to clear this flag, because
* the trans will be freed and reallocated.
*/
set_bit(STATUS_TRANS_DEAD, &trans->status);
removal->pdev = to_pci_dev(trans->dev);
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
pci_dev_get(removal->pdev);
schedule_work(&removal->work);
} else {
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
iwl_trans_pcie_remove(trans, false);
else
iwl_write32(trans, CSR_RESET,
CSR_RESET_REG_FLAG_FORCE_NMI);
}
err:
spin_unlock(&trans_pcie->reg_lock);
return false;
}

View File

@ -108,10 +108,10 @@ struct pda_country {
} __packed;
struct pda_antenna_gain {
struct {
DECLARE_FLEX_ARRAY(struct {
u8 gain_5GHz; /* 0.25 dBi units */
u8 gain_2GHz; /* 0.25 dBi units */
} __packed antenna[0];
} __packed, antenna);
} __packed;
struct pda_custom_wrapper {

View File

@ -34,3 +34,4 @@ source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7921/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7996/Kconfig"

View File

@ -35,3 +35,4 @@ obj-$(CONFIG_MT7603E) += mt7603/
obj-$(CONFIG_MT7615_COMMON) += mt7615/
obj-$(CONFIG_MT7915E) += mt7915/
obj-$(CONFIG_MT7921_COMMON) += mt7921/
obj-$(CONFIG_MT7996E) += mt7996/

View File

@ -100,23 +100,6 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
}
EXPORT_SYMBOL_GPL(mt76_seq_puts_array);
static int mt76_read_rate_txpower(struct seq_file *s, void *data)
{
struct mt76_dev *dev = dev_get_drvdata(s->private);
mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
ARRAY_SIZE(dev->rate_power.cck));
mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
ARRAY_SIZE(dev->rate_power.ofdm));
mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc,
ARRAY_SIZE(dev->rate_power.stbc));
mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
ARRAY_SIZE(dev->rate_power.ht));
mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
ARRAY_SIZE(dev->rate_power.vht));
return 0;
}
struct dentry *
mt76_register_debugfs_fops(struct mt76_phy *phy,
const struct file_operations *ops)
@ -137,8 +120,6 @@ mt76_register_debugfs_fops(struct mt76_phy *phy,
debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
if (dev->otp.data)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
mt76_read_rate_txpower);
debugfs_create_devm_seqfile(dev->dev, "rx-queues", dir,
mt76_rx_queues_read);

View File

@ -59,6 +59,19 @@ mt76_alloc_txwi(struct mt76_dev *dev)
return t;
}
static struct mt76_txwi_cache *
mt76_alloc_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
if (!t)
return NULL;
t->ptr = NULL;
return t;
}
static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev *dev)
{
@ -75,6 +88,22 @@ __mt76_get_txwi(struct mt76_dev *dev)
return t;
}
static struct mt76_txwi_cache *
__mt76_get_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
spin_lock(&dev->wed_lock);
if (!list_empty(&dev->rxwi_cache)) {
t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
spin_unlock(&dev->wed_lock);
return t;
}
static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
@ -86,6 +115,18 @@ mt76_get_txwi(struct mt76_dev *dev)
return mt76_alloc_txwi(dev);
}
struct mt76_txwi_cache *
mt76_get_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
if (t)
return t;
return mt76_alloc_rxwi(dev);
}
EXPORT_SYMBOL_GPL(mt76_get_rxwi);
void
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@ -98,6 +139,18 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
}
EXPORT_SYMBOL_GPL(mt76_put_txwi);
void
mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
if (!t)
return;
spin_lock(&dev->wed_lock);
list_add(&t->list, &dev->rxwi_cache);
spin_unlock(&dev->wed_lock);
}
EXPORT_SYMBOL_GPL(mt76_put_rxwi);
static void
mt76_free_pending_txwi(struct mt76_dev *dev)
{
@ -112,6 +165,20 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
local_bh_enable();
}
static void
mt76_free_pending_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
local_bh_disable();
while ((t = __mt76_get_rxwi(dev)) != NULL) {
if (t->ptr)
skb_free_frag(t->ptr);
kfree(t);
}
local_bh_enable();
}
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
@ -148,11 +215,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
u32 ctrl;
int i, idx = -1;
if (txwi) {
q->entry[q->head].txwi = DMA_DUMMY_DATA;
q->entry[q->head].skip_buf0 = true;
}
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
@ -162,28 +224,48 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
desc = &q->desc[idx];
entry = &q->entry[idx];
if (buf[0].skip_unmap)
entry->skip_buf0 = true;
entry->skip_buf1 = i == nbufs - 1;
if ((q->flags & MT_QFLAG_WED) &&
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
struct mt76_txwi_cache *t = txwi;
int rx_token;
entry->dma_addr[0] = buf[0].addr;
entry->dma_len[0] = buf[0].len;
if (!t)
return -ENOMEM;
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) {
entry->dma_addr[1] = buf[1].addr;
entry->dma_len[1] = buf[1].len;
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
if (buf[1].skip_unmap)
entry->skip_buf1 = true;
rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
buf[0].addr);
buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
MT_DMA_CTL_TO_HOST;
} else {
if (txwi) {
q->entry[q->head].txwi = DMA_DUMMY_DATA;
q->entry[q->head].skip_buf0 = true;
}
if (buf[0].skip_unmap)
entry->skip_buf0 = true;
entry->skip_buf1 = i == nbufs - 1;
entry->dma_addr[0] = buf[0].addr;
entry->dma_len[0] = buf[0].len;
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) {
entry->dma_addr[1] = buf[1].addr;
entry->dma_len[1] = buf[1].len;
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
if (buf[1].skip_unmap)
entry->skip_buf1 = true;
}
if (i == nbufs - 1)
ctrl |= MT_DMA_CTL_LAST_SEC0;
else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1;
}
if (i == nbufs - 1)
ctrl |= MT_DMA_CTL_LAST_SEC0;
else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1;
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info));
@ -272,33 +354,60 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
int *len, u32 *info, bool *more)
int *len, u32 *info, bool *more, bool *drop)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
dma_addr_t buf_addr;
void *buf = e->buf;
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
void *buf;
buf_addr = e->dma_addr[0];
if (len) {
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
}
if (info)
*info = le32_to_cpu(desc->info);
dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL;
if ((q->flags & MT_QFLAG_WED) &&
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
le32_to_cpu(desc->buf1));
struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
if (!t)
return NULL;
dma_unmap_single(dev->dma_dev, t->dma_addr,
SKB_WITH_OVERHEAD(q->buf_size),
DMA_FROM_DEVICE);
buf = t->ptr;
t->dma_addr = 0;
t->ptr = NULL;
mt76_put_rxwi(dev, t);
if (drop) {
u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
*drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
MT_DMA_CTL_DROP));
}
} else {
buf = e->buf;
e->buf = NULL;
dma_unmap_single(dev->dma_dev, e->dma_addr[0],
SKB_WITH_OVERHEAD(q->buf_size),
DMA_FROM_DEVICE);
}
return buf;
}
static void *
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more)
int *len, u32 *info, bool *more, bool *drop)
{
int idx = q->tail;
@ -314,7 +423,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
return mt76_dma_get_buf(dev, q, idx, len, info, more);
return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
}
static int
@ -441,14 +550,26 @@ free_skb:
return ret;
}
static struct page_frag_cache *
mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
{
struct page_frag_cache *rx_page = &q->rx_page;
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
if ((q->flags & MT_QFLAG_WED) &&
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
#endif
return rx_page;
}
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
dma_addr_t addr;
void *buf;
int frames = 0;
struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
int frames = 0, offset = q->buf_offset;
dma_addr_t addr;
if (!q->ndesc)
return 0;
@ -456,9 +577,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
struct mt76_txwi_cache *t = NULL;
struct mt76_queue_buf qbuf;
void *buf = NULL;
buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if ((q->flags & MT_QFLAG_WED) &&
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
t = mt76_get_rxwi(dev);
if (!t)
break;
}
buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
@ -471,7 +601,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
qbuf.addr = addr + offset;
qbuf.len = len - offset;
qbuf.skip_unmap = false;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
frames++;
}
@ -517,6 +647,11 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
if (!ret)
q->wed_regs = wed->txfree_ring.reg_base;
break;
case MT76_WED_Q_RX:
ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
if (!ret)
q->wed_regs = wed->rx_ring[ring].reg_base;
break;
default:
ret = -EINVAL;
}
@ -574,7 +709,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
if (!buf)
break;
@ -615,7 +750,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
int len, bool more, u32 info)
{
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
@ -635,7 +770,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
q->rx_head = NULL;
if (nr_frags < ARRAY_SIZE(shinfo->frags))
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
else
dev_kfree_skb(skb);
}
@ -656,6 +791,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
}
while (done < budget) {
bool drop = false;
u32 info;
if (check_ddone) {
@ -666,10 +802,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
break;
}
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
&drop);
if (!data)
break;
if (drop)
goto free_frag;
if (q->rx_head)
data_len = q->buf_size;
else
@ -682,7 +822,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
}
if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more);
mt76_add_fragment(dev, q, data, len, more, info);
continue;
}
@ -706,7 +846,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
continue;
}
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
continue;
free_frag:
@ -803,11 +943,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
mt76_for_each_q_rx(dev, i) {
struct mt76_queue *q = &dev->q_rx[i];
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
mt76_dma_rx_cleanup(dev, q);
}
mt76_free_pending_txwi(dev);
mt76_free_pending_rxwi(dev);
if (mtk_wed_device_active(&dev->mmio.wed))
mtk_wed_device_detach(&dev->mmio.wed);

View File

@ -15,6 +15,14 @@
#define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16)
#define MT_DMA_CTL_LAST_SEC0 BIT(30)
#define MT_DMA_CTL_DMA_DONE BIT(31)
#define MT_DMA_CTL_TO_HOST BIT(8)
#define MT_DMA_CTL_TO_HOST_A BIT(12)
#define MT_DMA_CTL_DROP BIT(14)
#define MT_DMA_CTL_TOKEN GENMASK(31, 16)
#define MT_DMA_PPE_CPU_REASON GENMASK(15, 11)
#define MT_DMA_PPE_ENTRY GENMASK(30, 16)
#define MT_DMA_INFO_PPE_VLD BIT(31)
#define MT_DMA_HDR_LEN 4
#define MT_RX_INFO_LEN 4

View File

@ -443,8 +443,12 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
}
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
@ -568,6 +572,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
spin_lock_init(&dev->status_lock);
spin_lock_init(&dev->wed_lock);
mutex_init(&dev->mutex);
init_waitqueue_head(&dev->tx_wait);
@ -590,9 +595,13 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
spin_lock_init(&dev->rx_token_lock);
idr_init(&dev->rx_token);
INIT_LIST_HEAD(&dev->wcid_list);
INIT_LIST_HEAD(&dev->txwi_cache);
INIT_LIST_HEAD(&dev->rxwi_cache);
dev->token_size = dev->drv->token_size;
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
@ -947,14 +956,12 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
static int
mt76_rx_signal(struct mt76_rx_status *status)
int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
{
s8 *chain_signal = status->chain_signal;
int signal = -128;
u8 chains;
for (chains = status->chains; chains; chains >>= 1, chain_signal++) {
for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
int cur, diff;
cur = *chain_signal;
@ -976,6 +983,7 @@ mt76_rx_signal(struct mt76_rx_status *status)
return signal;
}
EXPORT_SYMBOL(mt76_rx_signal);
static void
mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
@ -1005,7 +1013,7 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->ampdu_reference = mstat.ampdu_ref;
status->device_timestamp = mstat.timestamp;
status->mactime = mstat.timestamp;
status->signal = mt76_rx_signal(&mstat);
status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
if (status->signal <= -128)
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
@ -1289,7 +1297,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
mt76_check_sta(dev, skb);
mt76_rx_aggr_reorder(skb, &frames);
if (mtk_wed_device_active(&dev->mmio.wed))
__skb_queue_tail(&frames, skb);
else
mt76_rx_aggr_reorder(skb, &frames);
}
mt76_rx_complete(dev, &frames, napi);

View File

@ -35,6 +35,7 @@
FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
FIELD_PREP(MT_QFLAG_WED_RING, _n))
#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
#define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n)
#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
struct mt76_dev;
@ -56,6 +57,7 @@ enum mt76_bus_type {
enum mt76_wed_type {
MT76_WED_Q_TX,
MT76_WED_Q_TXFREE,
MT76_WED_Q_RX,
};
struct mt76_bus_ops {
@ -271,9 +273,15 @@ struct mt76_sta_stats {
u64 tx_nss[4]; /* 1, 2, 3, 4 */
u64 tx_mcs[16]; /* mcs idx */
u64 tx_bytes;
/* WED TX */
u32 tx_packets;
u32 tx_retries;
u32 tx_failed;
/* WED RX */
u64 rx_bytes;
u32 rx_packets;
u32 rx_errors;
u32 rx_drops;
};
enum mt76_wcid_flags {
@ -339,7 +347,10 @@ struct mt76_txwi_cache {
struct list_head list;
dma_addr_t dma_addr;
struct sk_buff *skb;
union {
struct sk_buff *skb;
void *ptr;
};
};
struct mt76_rx_tid {
@ -415,6 +426,7 @@ struct mt76_hw_cap {
#define MT_DRV_SW_RX_AIRTIME BIT(2)
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
#define MT_DRV_AMSDU_OFFLOAD BIT(5)
struct mt76_driver_ops {
u32 drv_flags;
@ -438,7 +450,7 @@ struct mt76_driver_ops {
bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
struct sk_buff *skb);
struct sk_buff *skb, u32 *info);
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
@ -470,19 +482,6 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
struct mt76_rate_power {
union {
struct {
s8 cck[4];
s8 ofdm[8];
s8 stbc[10];
s8 ht[16];
s8 vht[10];
};
s8 all[48];
};
};
/* addr req mask */
#define MT_VEND_TYPE_EEPROM BIT(31)
#define MT_VEND_TYPE_CFG BIT(30)
@ -705,6 +704,8 @@ struct mt76_phy {
enum mt76_dfs_state dfs_state;
ktime_t survey_time;
u32 aggr_stats[32];
struct mt76_hw_cap cap;
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
@ -738,6 +739,7 @@ struct mt76_dev {
struct ieee80211_hw *hw;
spinlock_t wed_lock;
spinlock_t lock;
spinlock_t cc_lock;
@ -764,6 +766,7 @@ struct mt76_dev {
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
struct list_head rxwi_cache;
struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
@ -778,6 +781,10 @@ struct mt76_dev {
u16 token_count;
u16 token_size;
spinlock_t rx_token_lock;
struct idr rx_token;
u16 rx_token_size;
wait_queue_head_t tx_wait;
/* spinclock used to protect wcid pktid linked list */
spinlock_t status_lock;
@ -793,8 +800,6 @@ struct mt76_dev {
u32 rev;
u32 aggr_stats[32];
struct tasklet_struct pre_tbtt_tasklet;
int beacon_int;
u8 beacon_mask;
@ -802,8 +807,6 @@ struct mt76_dev {
struct debugfs_blob_wrapper eeprom;
struct debugfs_blob_wrapper otp;
struct mt76_rate_power rate_power;
char alpha2[3];
enum nl80211_dfs_regions region;
@ -1107,8 +1110,9 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
static inline u8 mt76_tx_power_nss_delta(u8 nss)
{
static const u8 nss_delta[4] = { 0, 6, 9, 12 };
u8 idx = nss - 1;
return nss_delta[nss - 1];
return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0;
}
static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
@ -1163,6 +1167,7 @@ void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
int mt76_rx_signal(u8 chain_mask, s8 *chain_signal);
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
@ -1260,6 +1265,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
}
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi);
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
@ -1404,6 +1411,9 @@ struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
struct mt76_txwi_cache *r, dma_addr_t phys);
static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{

View File

@ -85,7 +85,7 @@ mt7603_ampdu_stat_show(struct seq_file *file, void *data)
bound[i], bound[i + 1]);
seq_puts(file, "\nCount: ");
for (i = 0; i < ARRAY_SIZE(bound); i++)
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
seq_printf(file, "%8d | ", dev->mphy.aggr_stats[i]);
seq_puts(file, "\n");
return 0;

View File

@ -69,7 +69,7 @@ free:
}
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
struct sk_buff *skb, u32 *info)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;

View File

@ -39,7 +39,7 @@ void mt7603_mac_reset_counters(struct mt7603_dev *dev)
for (i = 0; i < 2; i++)
mt76_rr(dev, MT_TX_AGG_CNT(i));
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
memset(dev->mphy.aggr_stats, 0, sizeof(dev->mphy.aggr_stats));
}
void mt7603_mac_set_timing(struct mt7603_dev *dev)
@ -1827,8 +1827,8 @@ void mt7603_mac_work(struct work_struct *work)
for (i = 0, idx = 0; i < 2; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
dev->mt76.aggr_stats[idx++] += val & 0xffff;
dev->mt76.aggr_stats[idx++] += val >> 16;
dev->mphy.aggr_stats[idx++] += val & 0xffff;
dev->mphy.aggr_stats[idx++] += val >> 16;
}
if (dev->mphy.mac_work_count == 10)

View File

@ -244,7 +244,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
struct sk_buff *skb, u32 *info);
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,

View File

@ -278,7 +278,6 @@ mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
{
struct mt7615_dev *dev = file->private;
u32 reg = is_mt7663(&dev->mt76) ? MT_MIB_ARNG(0) : MT_AGG_ASRCR0;
bool ext_phy = phy != &dev->phy;
int bound[7], i, range;
if (!phy)
@ -292,7 +291,7 @@ mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
for (i = 0; i < 3; i++)
bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1;
seq_printf(file, "\nPhy %d\n", ext_phy);
seq_printf(file, "\nPhy %d\n", phy != &dev->phy);
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
@ -300,9 +299,8 @@ mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
bound[i], bound[i + 1]);
seq_puts(file, "\nCount: ");
range = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + range]);
seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]);
seq_puts(file, "\n");
seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);

View File

@ -107,9 +107,9 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
return &sta->vif->sta.wcid;
}
void mt7615_mac_reset_counters(struct mt7615_dev *dev)
void mt7615_mac_reset_counters(struct mt7615_phy *phy)
{
struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
struct mt7615_dev *dev = phy->dev;
int i;
for (i = 0; i < 4; i++) {
@ -117,10 +117,8 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
mt76_rr(dev, MT_TX_AGG_CNT(1, i));
}
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
dev->mt76.phy.survey_time = ktime_get_boottime();
if (mphy_ext)
mphy_ext->survey_time = ktime_get_boottime();
memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
phy->mt76->survey_time = ktime_get_boottime();
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
@ -1177,6 +1175,21 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
}
EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
void mt7615_mac_enable_rtscts(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
u32 addr;
addr = mt7615_mac_wtbl_addr(dev, mvif->sta.wcid.idx) + 3 * 4;
if (enable)
mt76_set(dev, addr, MT_WTBL_W3_RTS);
else
mt76_clear(dev, addr, MT_WTBL_W3_RTS);
}
EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
@ -1653,7 +1666,7 @@ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
EXPORT_SYMBOL_GPL(mt7615_rx_check);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
struct sk_buff *skb, u32 *info)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
@ -1999,7 +2012,7 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
bool ext_phy = phy != &dev->phy;
int i, aggr;
int i, aggr = 0;
u32 val, val2;
mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
@ -2013,7 +2026,6 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
mib->aggr_per = 1000 * (val - val2) / val;
}
aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 4; i++) {
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
@ -2026,8 +2038,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
val);
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
dev->mt76.aggr_stats[aggr++] += val & 0xffff;
dev->mt76.aggr_stats[aggr++] += val >> 16;
phy->mt76->aggr_stats[aggr++] += val & 0xffff;
phy->mt76->aggr_stats[aggr++] += val >> 16;
}
}

View File

@ -83,7 +83,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
if (!running)
mt7615_mac_reset_counters(dev);
mt7615_mac_reset_counters(phy);
out:
mt7615_mutex_release(dev);
@ -320,7 +320,7 @@ int mt7615_set_channel(struct mt7615_phy *phy)
if (ret)
goto out;
mt7615_mac_reset_counters(dev);
mt7615_mac_reset_counters(phy);
phy->noise = 0;
phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
@ -572,6 +572,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT)
mt7615_mac_enable_rtscts(dev, vif, info->use_cts_prot);
if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
mt7615_mcu_add_bss_info(phy, vif, NULL, true);
mt7615_mcu_sta_add(phy, vif, NULL, true);

View File

@ -1119,7 +1119,7 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
return mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
enable);
enable, NULL);
}
static inline int

View File

@ -469,10 +469,12 @@ void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
void mt7615_update_channel(struct mt76_phy *mphy);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
void mt7615_mac_reset_counters(struct mt7615_dev *dev);
void mt7615_mac_reset_counters(struct mt7615_phy *phy);
void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable);
void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
void mt7615_mac_enable_rtscts(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable);
void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
@ -511,7 +513,7 @@ void mt7615_tx_worker(struct mt76_worker *w);
void mt7615_tx_token_put(struct mt7615_dev *dev);
bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
struct sk_buff *skb, u32 *info);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);

View File

@ -446,6 +446,8 @@ enum mt7615_reg_base {
#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
#define MT_WTBL_W3_RTS BIT(22)
#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
#define MT_WTBL_W5_SHORT_GI_40 BIT(9)

View File

@ -187,6 +187,11 @@ static inline bool is_mt7986(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7986;
}
static inline bool is_mt7996(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7990;
}
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))
@ -261,6 +266,17 @@ mt76_connac_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
return (void *)(txwi + MT_TXD_SIZE);
}
static inline u8 mt76_connac_spe_idx(u8 antenna_mask)
{
static const u8 ant_to_spe[] = {0, 0, 1, 0, 3, 2, 4, 0,
9, 8, 6, 10, 16, 12, 18, 0};
if (antenna_mask >= sizeof(ant_to_spe))
return 0;
return ant_to_spe[antenna_mask];
}
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);

View File

@ -417,9 +417,6 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
if (ieee80211_is_beacon(fc)) {
txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
if (!is_mt7921(dev))
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
0x18));
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
@ -550,6 +547,14 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
val |= FIELD_PREP(MT_TXD6_TX_RATE, rate);
txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
if (!is_mt7921(dev)) {
u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask);
if (!spe_idx)
spe_idx = 24 + phy_idx;
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, spe_idx));
}
}
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
@ -562,7 +567,7 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct mt76_phy *mphy;
struct rate_info rate = {};
bool cck = false;
u32 txrate, txs, mode;
u32 txrate, txs, mode, stbc;
txs = le32_to_cpu(txs_data[0]);
@ -582,6 +587,10 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
stbc = FIELD_GET(MT_TX_RATE_STBC, txrate);
if (stbc && rate.nss > 1)
rate.nss >>= 1;
if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
stats->tx_nss[rate.nss - 1]++;

View File

@ -65,7 +65,8 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
int cmd;
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000))
(is_mt7921(dev) && addr == 0x900000) ||
(is_mt7996(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@ -744,6 +745,39 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he->pkt_ext = 2;
}
static void
mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta)
{
struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
struct sta_rec_he_v2 *he;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_V2, sizeof(*he));
he = (struct sta_rec_he_v2 *)tlv;
memcpy(he->he_phy_cap, elem->phy_cap_info, sizeof(he->he_phy_cap));
memcpy(he->he_mac_cap, elem->mac_cap_info, sizeof(he->he_mac_cap));
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
he->max_nss_mcs[CMD_HE_MCS_BW8080] =
he_cap->he_mcs_nss_supp.rx_mcs_80p80;
he->max_nss_mcs[CMD_HE_MCS_BW160] =
he_cap->he_mcs_nss_supp.rx_mcs_160;
fallthrough;
default:
he->max_nss_mcs[CMD_HE_MCS_BW80] =
he_cap->he_mcs_nss_supp.rx_mcs_80;
break;
}
he->pkt_ext = IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US;
}
static u8
mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
enum nl80211_band band, struct ieee80211_sta *sta)
@ -838,6 +872,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
/* starec he */
if (sta->deflink.he_cap.has_he) {
mt76_connac_mcu_sta_he_tlv(skb, sta);
mt76_connac_mcu_sta_he_tlv_v2(skb, sta);
if (band == NL80211_BAND_6GHZ &&
sta_state == MT76_STA_INFO_STATE_ASSOC) {
struct sta_rec_he_6g_capa *he_6g_capa;
@ -1184,6 +1219,16 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
{
if (!mtk_wed_device_active(&dev->mmio.wed))
return 0;
return mtk_wed_device_update_msg(&dev->mmio.wed, WED_WO_STA_REC,
skb->data, skb->len);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_wed_update);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
int cmd, bool enable, bool tx)
@ -1209,6 +1254,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
wtbl_hdr);
ret = mt76_connac_mcu_sta_wed_update(dev, skb);
if (ret)
return ret;
ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
if (ret)
return ret;
@ -1219,6 +1268,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
ret = mt76_connac_mcu_sta_wed_update(dev, skb);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
@ -1313,44 +1366,13 @@ mt76_connac_mcu_uni_bss_he_tlv(struct mt76_phy *phy, struct ieee80211_vif *vif,
he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
}
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
bool enable)
int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
struct ieee80211_chanctx_conf *ctx)
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct cfg80211_chan_def *chandef = &phy->chandef;
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
enum nl80211_band band = chandef->chan->band;
struct mt76_dev *mdev = phy->dev;
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt76_connac_bss_basic_tlv basic;
struct mt76_connac_bss_qos_tlv qos;
} basic_req = {
.hdr = {
.bss_idx = mvif->idx,
},
.basic = {
.tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
.len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
.dtim_period = vif->bss_conf.dtim_period,
.omac_idx = mvif->omac_idx,
.band_idx = mvif->band_idx,
.wmm_idx = mvif->wmm_idx,
.active = true, /* keep bss deactivated */
.phymode = mt76_connac_get_phy_mode(phy, vif, band, NULL),
},
.qos = {
.tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
.len = cpu_to_le16(sizeof(struct mt76_connac_bss_qos_tlv)),
.qos = vif->bss_conf.qos,
},
};
struct {
struct {
u8 bss_idx;
@ -1388,6 +1410,82 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
.band = band,
},
};
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
rlm_req.rlm.bw = CMD_CBW_40MHZ;
break;
case NL80211_CHAN_WIDTH_80:
rlm_req.rlm.bw = CMD_CBW_80MHZ;
break;
case NL80211_CHAN_WIDTH_80P80:
rlm_req.rlm.bw = CMD_CBW_8080MHZ;
break;
case NL80211_CHAN_WIDTH_160:
rlm_req.rlm.bw = CMD_CBW_160MHZ;
break;
case NL80211_CHAN_WIDTH_5:
rlm_req.rlm.bw = CMD_CBW_5MHZ;
break;
case NL80211_CHAN_WIDTH_10:
rlm_req.rlm.bw = CMD_CBW_10MHZ;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
default:
rlm_req.rlm.bw = CMD_CBW_20MHZ;
rlm_req.rlm.ht_op_info = 0;
break;
}
if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
rlm_req.rlm.sco = 1; /* SCA */
else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
rlm_req.rlm.sco = 3; /* SCB */
return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req,
sizeof(rlm_req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_set_chctx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
bool enable,
struct ieee80211_chanctx_conf *ctx)
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_dev *mdev = phy->dev;
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt76_connac_bss_basic_tlv basic;
struct mt76_connac_bss_qos_tlv qos;
} basic_req = {
.hdr = {
.bss_idx = mvif->idx,
},
.basic = {
.tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
.len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
.dtim_period = vif->bss_conf.dtim_period,
.omac_idx = mvif->omac_idx,
.band_idx = mvif->band_idx,
.wmm_idx = mvif->wmm_idx,
.active = true, /* keep bss deactivated */
.phymode = mt76_connac_get_phy_mode(phy, vif, band, NULL),
},
.qos = {
.tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
.len = cpu_to_le16(sizeof(struct mt76_connac_bss_qos_tlv)),
.qos = vif->bss_conf.qos,
},
};
int err, conn_type;
u8 idx, basic_phy;
@ -1474,40 +1572,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
return err;
}
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
rlm_req.rlm.bw = CMD_CBW_40MHZ;
break;
case NL80211_CHAN_WIDTH_80:
rlm_req.rlm.bw = CMD_CBW_80MHZ;
break;
case NL80211_CHAN_WIDTH_80P80:
rlm_req.rlm.bw = CMD_CBW_8080MHZ;
break;
case NL80211_CHAN_WIDTH_160:
rlm_req.rlm.bw = CMD_CBW_160MHZ;
break;
case NL80211_CHAN_WIDTH_5:
rlm_req.rlm.bw = CMD_CBW_5MHZ;
break;
case NL80211_CHAN_WIDTH_10:
rlm_req.rlm.bw = CMD_CBW_10MHZ;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
default:
rlm_req.rlm.bw = CMD_CBW_20MHZ;
rlm_req.rlm.ht_op_info = 0;
break;
}
if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
rlm_req.rlm.sco = 1; /* SCA */
else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
rlm_req.rlm.sco = 3; /* SCB */
return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req,
sizeof(rlm_req), true);
return mt76_connac_mcu_uni_set_chctx(phy, mvif, ctx);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
@ -1525,6 +1590,9 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_connac_hw_scan_req *req;
struct sk_buff *skb;
if (test_bit(MT76_HW_SCANNING, &phy->state))
return -EBUSY;
skb = mt76_mcu_msg_alloc(mdev, NULL, sizeof(*req));
if (!skb)
return -ENOMEM;
@ -2646,6 +2714,10 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
if (ret)
return ret;
ret = mt76_connac_mcu_sta_wed_update(dev, skb);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
@ -2834,6 +2906,9 @@ mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
if (region->feature_set & FW_FEATURE_NON_DL)
goto next;
if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
override = addr;
@ -2850,6 +2925,7 @@ mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
return err;
}
next:
offset += len;
}

View File

@ -63,7 +63,7 @@ struct mt76_connac2_mcu_txd {
} __packed __aligned(4);
/**
* struct mt76_connac2_mcu_uni_txd - mcu command descriptor for firmware v3
* struct mt76_connac2_mcu_uni_txd - mcu command descriptor for connac2 and connac3
* @txd: hardware descriptor
* @len: total length not including txd
* @cid: command identifier
@ -121,11 +121,13 @@ struct mt76_connac2_mcu_rxd {
u8 eid;
u8 seq;
u8 rsv[2];
u8 option;
u8 rsv;
u8 ext_eid;
u8 rsv1[2];
u8 s2d_index;
u8 tlv[0];
};
struct mt76_connac2_patch_hdr {
@ -354,6 +356,16 @@ struct sta_rec_he {
u8 rsv2[2];
} __packed;
struct sta_rec_he_v2 {
__le16 tag;
__le16 len;
u8 he_mac_cap[6];
u8 he_phy_cap[11];
u8 pkt_ext;
/* 0: BW80, 1: BW160, 2: BW8080 */
__le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
} __packed;
struct sta_rec_amsdu {
__le16 tag;
__le16 len;
@ -391,7 +403,8 @@ struct sta_rec_phy {
u8 ampdu;
u8 rts_policy;
u8 rcpi;
u8 rsv[2];
u8 max_ampdu_len; /* connac3 */
u8 rsv[1];
} __packed;
struct sta_rec_he_6g_capa {
@ -452,8 +465,8 @@ struct sta_rec_bf {
u8 ibf_dbw;
u8 ibf_ncol;
u8 ibf_nrow;
u8 nrow_bw160;
u8 ncol_bw160;
u8 nrow_gt_bw80;
u8 ncol_gt_bw80;
u8 ru_start_idx;
u8 ru_end_idx;
@ -580,7 +593,7 @@ struct sta_rec_ra_fixed {
struct sta_phy phy;
u8 spe_en;
u8 spe_idx;
u8 short_preamble;
u8 is_5g;
u8 mmps_mode;
@ -779,6 +792,9 @@ enum {
STA_REC_BFEE,
STA_REC_PHY = 0x15,
STA_REC_HE_6G = 0x17,
STA_REC_HE_V2 = 0x19,
STA_REC_HDRT = 0x28,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
};
@ -946,6 +962,9 @@ enum {
DEV_INFO_MAX_NUM
};
#define MCU_UNI_CMD_EVENT BIT(1)
#define MCU_UNI_CMD_UNSOLICITED_EVENT BIT(2)
/* event table */
enum {
MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
@ -981,6 +1000,17 @@ enum {
MCU_EXT_EVENT_MURU_CTRL = 0x9f,
};
/* unified event table */
enum {
MCU_UNI_EVENT_RESULT = 0x01,
MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04,
MCU_UNI_EVENT_IE_COUNTDOWN = 0x09,
MCU_UNI_EVENT_RDD_REPORT = 0x11,
};
#define MCU_UNI_CMD_EVENT BIT(1)
#define MCU_UNI_CMD_UNSOLICITED_EVENT BIT(2)
enum {
MCU_Q_QUERY,
MCU_Q_SET,
@ -1063,10 +1093,11 @@ enum {
#define MCU_CMD_ACK BIT(0)
#define MCU_CMD_UNI BIT(1)
#define MCU_CMD_QUERY BIT(2)
#define MCU_CMD_SET BIT(2)
#define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | \
MCU_CMD_QUERY)
MCU_CMD_SET)
#define MCU_CMD_UNI_QUERY_ACK (MCU_CMD_ACK | MCU_CMD_UNI)
#define __MCU_CMD_FIELD_ID GENMASK(7, 0)
#define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8)
@ -1074,6 +1105,7 @@ enum {
#define __MCU_CMD_FIELD_UNI BIT(17)
#define __MCU_CMD_FIELD_CE BIT(18)
#define __MCU_CMD_FIELD_WA BIT(19)
#define __MCU_CMD_FIELD_WM BIT(20)
#define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, \
MCU_CMD_##_t)
@ -1095,6 +1127,16 @@ enum {
FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
MCU_WA_PARAM_CMD_##_t))
#define MCU_WM_UNI_CMD(_t) (MCU_UNI_CMD(_t) | \
__MCU_CMD_FIELD_WM)
#define MCU_WM_UNI_CMD_QUERY(_t) (MCU_UNI_CMD(_t) | \
__MCU_CMD_FIELD_QUERY | \
__MCU_CMD_FIELD_WM)
#define MCU_WA_UNI_CMD(_t) (MCU_UNI_CMD(_t) | \
__MCU_CMD_FIELD_WA)
#define MCU_WMWA_UNI_CMD(_t) (MCU_WM_UNI_CMD(_t) | \
__MCU_CMD_FIELD_WA)
enum {
MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
@ -1148,10 +1190,33 @@ enum {
MCU_UNI_CMD_DEV_INFO_UPDATE = 0x01,
MCU_UNI_CMD_BSS_INFO_UPDATE = 0x02,
MCU_UNI_CMD_STA_REC_UPDATE = 0x03,
MCU_UNI_CMD_EDCA_UPDATE = 0x04,
MCU_UNI_CMD_SUSPEND = 0x05,
MCU_UNI_CMD_OFFLOAD = 0x06,
MCU_UNI_CMD_HIF_CTRL = 0x07,
MCU_UNI_CMD_BAND_CONFIG = 0x08,
MCU_UNI_CMD_REPT_MUAR = 0x09,
MCU_UNI_CMD_WSYS_CONFIG = 0x0b,
MCU_UNI_CMD_REG_ACCESS = 0x0d,
MCU_UNI_CMD_POWER_CREL = 0x0f,
MCU_UNI_CMD_RX_HDR_TRANS = 0x12,
MCU_UNI_CMD_SER = 0x13,
MCU_UNI_CMD_TWT = 0x14,
MCU_UNI_CMD_RDD_CTRL = 0x19,
MCU_UNI_CMD_GET_MIB_INFO = 0x22,
MCU_UNI_CMD_SNIFFER = 0x24,
MCU_UNI_CMD_SR = 0x25,
MCU_UNI_CMD_ROC = 0x27,
MCU_UNI_CMD_TXPOWER = 0x2b,
MCU_UNI_CMD_EFUSE_CTRL = 0x2d,
MCU_UNI_CMD_RA = 0x2f,
MCU_UNI_CMD_MURU = 0x31,
MCU_UNI_CMD_BF = 0x33,
MCU_UNI_CMD_CHANNEL_SWITCH = 0x34,
MCU_UNI_CMD_THERMAL = 0x35,
MCU_UNI_CMD_VOW = 0x37,
MCU_UNI_CMD_RRO = 0x57,
MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58,
};
enum {
@ -1201,14 +1266,23 @@ enum {
enum {
UNI_BSS_INFO_BASIC = 0,
UNI_BSS_INFO_RA = 1,
UNI_BSS_INFO_RLM = 2,
UNI_BSS_INFO_BSS_COLOR = 4,
UNI_BSS_INFO_HE_BASIC = 5,
UNI_BSS_INFO_BCN_CONTENT = 7,
UNI_BSS_INFO_BCN_CSA = 8,
UNI_BSS_INFO_BCN_BCC = 9,
UNI_BSS_INFO_BCN_MBSSID = 10,
UNI_BSS_INFO_RATE = 11,
UNI_BSS_INFO_QBSS = 15,
UNI_BSS_INFO_SEC = 16,
UNI_BSS_INFO_TXCMD = 18,
UNI_BSS_INFO_UAPSD = 19,
UNI_BSS_INFO_PS = 21,
UNI_BSS_INFO_BCNFT = 22,
UNI_BSS_INFO_OFFLOAD = 25,
UNI_BSS_INFO_MLD = 26,
};
enum {
@ -1736,10 +1810,14 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
int cmd, bool enable, bool tx);
int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy,
struct mt76_vif *vif,
struct ieee80211_chanctx_conf *ctx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
bool enable);
bool enable,
struct ieee80211_chanctx_conf *ctx);
int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info);
void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
@ -1813,6 +1891,7 @@ int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
int mt76_connac_mcu_restart(struct mt76_dev *dev);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb);
int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
const char *fw_wa);
int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name);

View File

@ -151,7 +151,7 @@ static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
struct ieee80211_channel *chan,
struct mt76_rate_power *t)
struct mt76x02_rate_power *t)
{
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
u16 val, addr;
@ -179,31 +179,19 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
/* ht-vht mcs 1ss 0, 1, 2, 3 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
val = mt76x02_eeprom_get(dev, addr);
t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
t->ht[0] = t->ht[1] = s6_to_s8(val);
t->ht[2] = t->ht[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
val = mt76x02_eeprom_get(dev, addr);
t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
t->ht[6] = t->ht[7] = t->vht[6] = t->vht[7] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
val = mt76x02_eeprom_get(dev, addr);
t->stbc[0] = t->stbc[1] = s6_to_s8(val);
t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
val = mt76x02_eeprom_get(dev, addr);
t->stbc[4] = t->stbc[5] = s6_to_s8(val);
t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
t->ht[4] = t->ht[5] = s6_to_s8(val);
t->ht[6] = t->ht[7] = s6_to_s8(val >> 8);
/* vht mcs 8, 9 5GHz */
val = mt76x02_eeprom_get(dev, 0x12c);
t->vht[8] = s6_to_s8(val);
t->vht[9] = s6_to_s8(val >> 8);
t->vht[0] = s6_to_s8(val);
t->vht[1] = s6_to_s8(val >> 8);
delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev);
mt76x02_add_rate_power_offset(t, delta);
@ -235,7 +223,7 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev,
data = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER);
else
data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
target_power = (data & 0xff) - dev->rate_power.ofdm[7];
*tp = target_power + mt76x0_get_delta(dev);
return;

View File

@ -19,7 +19,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev);
void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
struct ieee80211_channel *chan,
struct mt76_rate_power *t);
struct mt76x02_rate_power *t);
void mt76x0_get_power_info(struct mt76x02_dev *dev,
struct ieee80211_channel *chan, s8 *tp);

View File

@ -217,7 +217,7 @@ mt76x0_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband)
{
struct ieee80211_channel *chan;
struct mt76_rate_power t;
struct mt76x02_rate_power t;
s8 tp;
int i;

View File

@ -595,10 +595,7 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
case 0:
/* cck rates */
tx_rate = (info[0] & 0x60) >> 5;
if (tx_rate > 3)
return -EINVAL;
*target_power = cur_power + dev->mt76.rate_power.cck[tx_rate];
*target_power = cur_power + dev->rate_power.cck[tx_rate];
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, tx_rate);
break;
case 1: {
@ -635,7 +632,7 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
return -EINVAL;
}
*target_power = cur_power + dev->mt76.rate_power.ofdm[index];
*target_power = cur_power + dev->rate_power.ofdm[index];
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, index + 4);
break;
}
@ -645,7 +642,7 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
if (tx_rate > 9)
return -EINVAL;
*target_power = cur_power + dev->mt76.rate_power.vht[tx_rate];
*target_power = cur_power + dev->rate_power.vht[tx_rate];
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
break;
default:
@ -654,7 +651,7 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
if (tx_rate > 9)
return -EINVAL;
*target_power = cur_power + dev->mt76.rate_power.ht[tx_rate];
*target_power = cur_power + dev->rate_power.ht[tx_rate];
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
break;
}
@ -841,7 +838,7 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
{
struct mt76_rate_power *t = &dev->mt76.rate_power;
struct mt76x02_rate_power *t = &dev->rate_power;
s8 info;
mt76x0_get_tx_power_per_rate(dev, dev->mphy.chandef.chan, t);

View File

@ -72,6 +72,18 @@ struct mt76x02_beacon_ops {
#define mt76x02_pre_tbtt_enable(dev, enable) \
(dev)->beacon_ops->pre_tbtt_enable(dev, enable)
struct mt76x02_rate_power {
union {
struct {
s8 cck[4];
s8 ofdm[8];
s8 ht[16];
s8 vht[2];
};
s8 all[30];
};
};
struct mt76x02_dev {
union { /* must be first */
struct mt76_dev mt76;
@ -107,6 +119,8 @@ struct mt76x02_dev {
u8 beacon_hang_check;
u8 mcu_timeout;
struct mt76x02_rate_power rate_power;
struct mt76x02_calibration cal;
int txpower_conf;
@ -174,7 +188,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
struct sk_buff *skb, u32 *info);
void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,

View File

@ -20,7 +20,7 @@ mt76x02_ampdu_stat_show(struct seq_file *file, void *data)
seq_puts(file, "Count: ");
for (j = 0; j < 8; j++)
seq_printf(file, "%8d | ",
dev->mt76.aggr_stats[i * 8 + j]);
dev->mphy.aggr_stats[i * 8 + j]);
seq_puts(file, "\n");
seq_puts(file, "--------");
for (j = 0; j < 8; j++)
@ -114,6 +114,21 @@ mt76_edcca_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
"%lld\n");
static int mt76x02_read_rate_txpower(struct seq_file *s, void *data)
{
struct mt76x02_dev *dev = dev_get_drvdata(s->private);
mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
ARRAY_SIZE(dev->rate_power.cck));
mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
ARRAY_SIZE(dev->rate_power.ofdm));
mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
ARRAY_SIZE(dev->rate_power.ht));
mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
ARRAY_SIZE(dev->rate_power.vht));
return 0;
}
void mt76x02_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
@ -133,6 +148,8 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
read_txpower);
debugfs_create_devm_seqfile(dev->mt76.dev, "rate_txpower", dir,
mt76x02_read_rate_txpower);
debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset);

View File

@ -62,8 +62,6 @@ enum mt76x02_eeprom_field {
MT_EE_TX_POWER_HT_MCS4 = 0x0a8,
MT_EE_TX_POWER_HT_MCS8 = 0x0aa,
MT_EE_TX_POWER_HT_MCS12 = 0x0ac,
MT_EE_TX_POWER_VHT_MCS0 = 0x0ba,
MT_EE_TX_POWER_VHT_MCS4 = 0x0bc,
MT_EE_TX_POWER_VHT_MCS8 = 0x0be,
MT_EE_2G_TARGET_POWER = 0x0d0,

View File

@ -25,7 +25,7 @@ void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
for (i = 0; i < 16; i++)
mt76_rr(dev, MT_TX_STAT_FIFO);
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
memset(dev->mphy.aggr_stats, 0, sizeof(dev->mphy.aggr_stats));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters);
@ -1191,8 +1191,8 @@ void mt76x02_mac_work(struct work_struct *work)
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
dev->mt76.aggr_stats[idx++] += val & 0xffff;
dev->mt76.aggr_stats[idx++] += val >> 16;
dev->mphy.aggr_stats[idx++] += val & 0xffff;
dev->mphy.aggr_stats[idx++] += val >> 16;
}
mt76x02_check_mac_err(dev);

View File

@ -59,7 +59,7 @@ mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
return val;
}
int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
int mt76x02_get_max_rate_power(struct mt76x02_rate_power *r)
{
s8 ret = 0;
int i;
@ -71,7 +71,7 @@ int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
}
EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power);
void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
void mt76x02_limit_rate_power(struct mt76x02_rate_power *r, int limit)
{
int i;
@ -81,7 +81,7 @@ void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
}
EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power);
void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
void mt76x02_add_rate_power_offset(struct mt76x02_rate_power *r, int offset)
{
int i;
@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
{
struct mt76_rate_power *t = &dev->mt76.rate_power;
struct mt76x02_rate_power *t = &dev->rate_power;
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
@ -107,17 +107,17 @@ void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
t->ht[10]));
mt76_wr(dev, MT_TX_PWR_CFG_3,
mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
t->stbc[2]));
mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->ht[0],
t->ht[2]));
mt76_wr(dev, MT_TX_PWR_CFG_4,
mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
mt76x02_tx_power_mask(t->ht[4], t->ht[6], 0, 0));
mt76_wr(dev, MT_TX_PWR_CFG_7,
mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
t->vht[9]));
mt76x02_tx_power_mask(t->ofdm[7], t->vht[0], t->ht[7],
t->vht[1]));
mt76_wr(dev, MT_TX_PWR_CFG_8,
mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
mt76x02_tx_power_mask(t->ht[14], 0, t->vht[0], t->vht[1]));
mt76_wr(dev, MT_TX_PWR_CFG_9,
mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
mt76x02_tx_power_mask(t->ht[7], 0, t->vht[0], t->vht[1]));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);

View File

@ -34,10 +34,10 @@ mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
}
}
void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
void mt76x02_add_rate_power_offset(struct mt76x02_rate_power *r, int offset);
void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
void mt76x02_limit_rate_power(struct mt76x02_rate_power *r, int limit);
int mt76x02_get_max_rate_power(struct mt76x02_rate_power *r);
void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);

View File

@ -33,7 +33,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
EXPORT_SYMBOL_GPL(mt76x02_tx);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
struct sk_buff *skb, u32 *info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
void *rxwi = skb->data;
@ -62,23 +62,23 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
u8 mcs = ieee80211_rate_get_vht_mcs(rate);
if (mcs == 8 || mcs == 9) {
max_txpwr = dev->mt76.rate_power.vht[8];
max_txpwr = dev->rate_power.vht[0];
} else {
u8 nss, idx;
nss = ieee80211_rate_get_vht_nss(rate);
idx = ((nss - 1) << 3) + mcs;
max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
max_txpwr = dev->rate_power.ht[idx & 0xf];
}
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
} else {
enum nl80211_band band = dev->mphy.chandef.chan->band;
if (band == NL80211_BAND_2GHZ) {
const struct ieee80211_rate *r;
struct wiphy *wiphy = dev->mt76.hw->wiphy;
struct mt76_rate_power *rp = &dev->mt76.rate_power;
struct mt76x02_rate_power *rp = &dev->rate_power;
r = &wiphy->bands[band]->bitrates[rate->idx];
if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
@ -86,7 +86,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
else
max_txpwr = rp->ofdm[r->hw_value & 0x7];
} else {
max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
}
}
@ -112,7 +112,7 @@ void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
s8 txpwr_adj;
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
dev->mt76.rate_power.ofdm[4]);
dev->rate_power.ofdm[4]);
mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,

View File

@ -280,7 +280,7 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76x02_rate_power *t,
struct ieee80211_channel *chan)
{
bool is_5ghz;
@ -324,22 +324,10 @@ void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
if (!is_5ghz)
val >>= 8;
t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
memcpy(t->stbc, t->ht, sizeof(t->stbc[0]) * 8);
t->stbc[8] = t->vht[8];
t->stbc[9] = t->vht[9];
t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val >> 8);
}
EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);

View File

@ -40,7 +40,7 @@ struct mt76x2_temp_comp {
unsigned int low_slope; /* J / dB */
};
void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76x02_rate_power *t,
struct ieee80211_channel *chan);
void mt76x2_get_power_info(struct mt76x02_dev *dev,
struct mt76x2_tx_power_info *t,

View File

@ -182,7 +182,7 @@ void mt76x2_init_txpower(struct mt76x02_dev *dev,
{
struct ieee80211_channel *chan;
struct mt76x2_tx_power_info txp;
struct mt76_rate_power t = {};
struct mt76x02_rate_power t = {};
int i;
for (i = 0; i < sband->n_channels; i++) {

View File

@ -116,7 +116,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
static int
mt76x2_get_min_rate_power(struct mt76_rate_power *r)
mt76x2_get_min_rate_power(struct mt76x02_rate_power *r)
{
int i;
s8 ret = 0;
@ -140,7 +140,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x2_tx_power_info txp;
int txp_0, txp_1, delta = 0;
struct mt76_rate_power t = {};
struct mt76x02_rate_power t = {};
int base_power, gain;
mt76x2_get_power_info(dev, &txp, chan);
@ -175,7 +175,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
dev->target_power = txp.target_power;
dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
dev->mt76.rate_power = t;
dev->rate_power = t;
mt76x02_phy_set_txpower(dev, txp_0, txp_1);
}

View File

@ -2,6 +2,7 @@
config MT7915E
tristate "MediaTek MT7915E (PCIe) support"
select MT76_CONNAC_LIB
select WANT_DEV_COREDUMP
depends on MAC80211
depends on PCI
select RELAY

View File

@ -7,3 +7,4 @@ mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt7915e-$(CONFIG_MT7986_WMAC) += soc.o
mt7915e-$(CONFIG_DEV_COREDUMP) += coredump.o

View File

@ -0,0 +1,410 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2022 MediaTek Inc. */
#include <linux/devcoredump.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/utsname.h>
#include "coredump.h"
static bool coredump_memdump;
module_param(coredump_memdump, bool, 0644);
MODULE_PARM_DESC(coredump_memdump, "Optional ability to dump firmware memory");
static const struct mt7915_mem_region mt7915_mem_regions[] = {
{
.start = 0xe003b400,
.len = 0x00003bff,
.name = "CRAM",
},
};
static const struct mt7915_mem_region mt7916_mem_regions[] = {
{
.start = 0x00800000,
.len = 0x0005ffff,
.name = "ROM",
},
{
.start = 0x00900000,
.len = 0x00013fff,
.name = "ULM1",
},
{
.start = 0x02200000,
.len = 0x0004ffff,
.name = "ULM2",
},
{
.start = 0x02300000,
.len = 0x0004ffff,
.name = "ULM3",
},
{
.start = 0x00400000,
.len = 0x00027fff,
.name = "SRAM",
},
{
.start = 0xe0000000,
.len = 0x00157fff,
.name = "CRAM",
},
};
static const struct mt7915_mem_region mt7986_mem_regions[] = {
{
.start = 0x00800000,
.len = 0x0005ffff,
.name = "ROM",
},
{
.start = 0x00900000,
.len = 0x0000ffff,
.name = "ULM1",
},
{
.start = 0x02200000,
.len = 0x0004ffff,
.name = "ULM2",
},
{
.start = 0x02300000,
.len = 0x0004ffff,
.name = "ULM3",
},
{
.start = 0x00400000,
.len = 0x00017fff,
.name = "SRAM",
},
{
.start = 0xe0000000,
.len = 0x00113fff,
.name = "CRAM",
},
};
const struct mt7915_mem_region*
mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num)
{
switch (mt76_chip(&dev->mt76)) {
case 0x7915:
*num = ARRAY_SIZE(mt7915_mem_regions);
return &mt7915_mem_regions[0];
case 0x7986:
*num = ARRAY_SIZE(mt7986_mem_regions);
return &mt7986_mem_regions[0];
case 0x7916:
*num = ARRAY_SIZE(mt7916_mem_regions);
return &mt7916_mem_regions[0];
default:
return NULL;
}
}
static int mt7915_coredump_get_mem_size(struct mt7915_dev *dev)
{
const struct mt7915_mem_region *mem_region;
size_t size = 0;
u32 num;
int i;
mem_region = mt7915_coredump_get_mem_layout(dev, &num);
if (!mem_region)
return 0;
for (i = 0; i < num; i++) {
size += mem_region->len;
mem_region++;
}
/* reserve space for the headers */
size += num * sizeof(struct mt7915_mem_hdr);
/* make sure it is aligned 4 bytes for debug message print out */
size = ALIGN(size, 4);
return size;
}
struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev)
{
struct mt7915_crash_data *crash_data = dev->coredump.crash_data;
lockdep_assert_held(&dev->dump_mutex);
guid_gen(&crash_data->guid);
ktime_get_real_ts64(&crash_data->timestamp);
return crash_data;
}
static void
mt7915_coredump_fw_state(struct mt7915_dev *dev, struct mt7915_coredump *dump,
bool *exception)
{
u32 state, count, type;
type = (u32)mt76_get_field(dev, MT_FW_EXCEPT_TYPE, GENMASK(7, 0));
state = (u32)mt76_get_field(dev, MT_FW_ASSERT_STAT, GENMASK(7, 0));
count = is_mt7915(&dev->mt76) ?
(u32)mt76_get_field(dev, MT_FW_EXCEPT_COUNT, GENMASK(15, 8)) :
(u32)mt76_get_field(dev, MT_FW_EXCEPT_COUNT, GENMASK(7, 0));
/* normal mode: driver can manually trigger assert for detail info */
if (!count)
strscpy(dump->fw_state, "normal", sizeof(dump->fw_state));
else if (state > 1 && (count == 1) && type == 5)
strscpy(dump->fw_state, "assert", sizeof(dump->fw_state));
else if ((state > 1 && count == 1) || count > 1)
strscpy(dump->fw_state, "exception", sizeof(dump->fw_state));
*exception = !!count;
}
static void
mt7915_coredump_fw_trace(struct mt7915_dev *dev, struct mt7915_coredump *dump,
bool exception)
{
u32 n, irq, sch, base = MT_FW_EINT_INFO;
/* trap or run? */
dump->last_msg_id = mt76_rr(dev, MT_FW_LAST_MSG_ID);
n = is_mt7915(&dev->mt76) ?
(u32)mt76_get_field(dev, base, GENMASK(7, 0)) :
(u32)mt76_get_field(dev, base, GENMASK(15, 8));
dump->eint_info_idx = n;
irq = mt76_rr(dev, base + 0x8);
n = is_mt7915(&dev->mt76) ?
FIELD_GET(GENMASK(7, 0), irq) : FIELD_GET(GENMASK(23, 16), irq);
dump->irq_info_idx = n;
sch = mt76_rr(dev, MT_FW_SCHED_INFO);
n = is_mt7915(&dev->mt76) ?
FIELD_GET(GENMASK(7, 0), sch) : FIELD_GET(GENMASK(15, 8), sch);
dump->sched_info_idx = n;
if (exception) {
u32 i, y;
/* sched trace */
n = is_mt7915(&dev->mt76) ?
FIELD_GET(GENMASK(15, 8), sch) : FIELD_GET(GENMASK(7, 0), sch);
n = n > 60 ? 60 : n;
strscpy(dump->trace_sched, "(sched_info) id, time",
sizeof(dump->trace_sched));
for (y = dump->sched_info_idx, i = 0; i < n; i++, y++) {
mt7915_memcpy_fromio(dev, dump->sched, base + 0xc + y * 12,
sizeof(dump->sched));
y = y >= n ? 0 : y;
}
/* irq trace */
n = is_mt7915(&dev->mt76) ?
FIELD_GET(GENMASK(15, 8), irq) : FIELD_GET(GENMASK(7, 0), irq);
n = n > 60 ? 60 : n;
strscpy(dump->trace_irq, "(irq_info) id, time",
sizeof(dump->trace_irq));
for (y = dump->irq_info_idx, i = 0; i < n; i++, y++) {
mt7915_memcpy_fromio(dev, dump->irq, base + 0x4 + y * 16,
sizeof(dump->irq));
y = y >= n ? 0 : y;
}
}
}
static void
mt7915_coredump_fw_stack(struct mt7915_dev *dev, struct mt7915_coredump *dump,
bool exception)
{
u32 oldest, i, idx;
/* stop call stack record */
if (!exception)
mt76_clear(dev, 0x89050200, BIT(0));
oldest = (u32)mt76_get_field(dev, 0x89050200, GENMASK(20, 16)) + 2;
for (i = 0; i < 16; i++) {
idx = ((oldest + 2 * i + 1) % 32);
dump->call_stack[i] = mt76_rr(dev, 0x89050204 + idx * 4);
}
/* start call stack record */
if (!exception)
mt76_set(dev, 0x89050200, BIT(0));
}
static void
mt7915_coredump_fw_task(struct mt7915_dev *dev, struct mt7915_coredump *dump)
{
u32 offs = is_mt7915(&dev->mt76) ? 0xe0 : 0x170;
strscpy(dump->task_qid, "(task queue id) read, write",
sizeof(dump->task_qid));
dump->taskq[0].read = mt76_rr(dev, MT_FW_TASK_QID1);
dump->taskq[0].write = mt76_rr(dev, MT_FW_TASK_QID1 - 4);
dump->taskq[1].read = mt76_rr(dev, MT_FW_TASK_QID2);
dump->taskq[1].write = mt76_rr(dev, MT_FW_TASK_QID2 - 4);
strscpy(dump->task_info, "(task stack) start, end, size",
sizeof(dump->task_info));
dump->taski[0].start = mt76_rr(dev, MT_FW_TASK_START);
dump->taski[0].end = mt76_rr(dev, MT_FW_TASK_END);
dump->taski[0].size = mt76_rr(dev, MT_FW_TASK_SIZE);
dump->taski[1].start = mt76_rr(dev, MT_FW_TASK_START + offs);
dump->taski[1].end = mt76_rr(dev, MT_FW_TASK_END + offs);
dump->taski[1].size = mt76_rr(dev, MT_FW_TASK_SIZE + offs);
}
static void
mt7915_coredump_fw_context(struct mt7915_dev *dev, struct mt7915_coredump *dump)
{
u32 count, idx, id;
count = mt76_rr(dev, MT_FW_CIRQ_COUNT);
/* current context */
if (!count) {
strscpy(dump->fw_context, "(context) interrupt",
sizeof(dump->fw_context));
idx = is_mt7915(&dev->mt76) ?
(u32)mt76_get_field(dev, MT_FW_CIRQ_IDX, GENMASK(31, 16)) :
(u32)mt76_get_field(dev, MT_FW_CIRQ_IDX, GENMASK(15, 0));
dump->context.idx = idx;
dump->context.handler = mt76_rr(dev, MT_FW_CIRQ_LISR);
} else {
idx = mt76_rr(dev, MT_FW_TASK_IDX);
id = mt76_rr(dev, MT_FW_TASK_ID);
if (!id && idx == 3) {
strscpy(dump->fw_context, "(context) idle",
sizeof(dump->fw_context));
} else if (id && idx != 3) {
strscpy(dump->fw_context, "(context) task",
sizeof(dump->fw_context));
dump->context.idx = idx;
dump->context.handler = id;
}
}
}
static struct mt7915_coredump *mt7915_coredump_build(struct mt7915_dev *dev)
{
struct mt7915_crash_data *crash_data = dev->coredump.crash_data;
struct mt7915_coredump *dump;
struct mt7915_coredump_mem *dump_mem;
size_t len, sofar = 0, hdr_len = sizeof(*dump);
unsigned char *buf;
bool exception;
len = hdr_len;
if (coredump_memdump && crash_data->memdump_buf_len)
len += sizeof(*dump_mem) + crash_data->memdump_buf_len;
sofar += hdr_len;
/* this is going to get big when we start dumping memory and such,
* so go ahead and use vmalloc.
*/
buf = vzalloc(len);
if (!buf)
return NULL;
mutex_lock(&dev->dump_mutex);
dump = (struct mt7915_coredump *)(buf);
dump->len = len;
/* plain text */
strscpy(dump->magic, "mt76-crash-dump", sizeof(dump->magic));
strscpy(dump->kernel, init_utsname()->release, sizeof(dump->kernel));
strscpy(dump->fw_ver, dev->mt76.hw->wiphy->fw_version,
sizeof(dump->fw_ver));
guid_copy(&dump->guid, &crash_data->guid);
dump->tv_sec = crash_data->timestamp.tv_sec;
dump->tv_nsec = crash_data->timestamp.tv_nsec;
dump->device_id = mt76_chip(&dev->mt76);
mt7915_coredump_fw_state(dev, dump, &exception);
mt7915_coredump_fw_trace(dev, dump, exception);
mt7915_coredump_fw_task(dev, dump);
mt7915_coredump_fw_context(dev, dump);
mt7915_coredump_fw_stack(dev, dump, exception);
/* gather memory content */
dump_mem = (struct mt7915_coredump_mem *)(buf + sofar);
dump_mem->len = crash_data->memdump_buf_len;
if (coredump_memdump && crash_data->memdump_buf_len)
memcpy(dump_mem->data, crash_data->memdump_buf,
crash_data->memdump_buf_len);
mutex_unlock(&dev->dump_mutex);
return dump;
}
int mt7915_coredump_submit(struct mt7915_dev *dev)
{
struct mt7915_coredump *dump;
dump = mt7915_coredump_build(dev);
if (!dump) {
dev_warn(dev->mt76.dev, "no crash dump data found\n");
return -ENODATA;
}
dev_coredumpv(dev->mt76.dev, dump, dump->len, GFP_KERNEL);
return 0;
}
int mt7915_coredump_register(struct mt7915_dev *dev)
{
struct mt7915_crash_data *crash_data;
crash_data = vzalloc(sizeof(*dev->coredump.crash_data));
if (!crash_data)
return -ENOMEM;
dev->coredump.crash_data = crash_data;
if (coredump_memdump) {
crash_data->memdump_buf_len = mt7915_coredump_get_mem_size(dev);
if (!crash_data->memdump_buf_len)
/* no memory content */
return 0;
crash_data->memdump_buf = vzalloc(crash_data->memdump_buf_len);
if (!crash_data->memdump_buf) {
vfree(crash_data);
return -ENOMEM;
}
}
return 0;
}
void mt7915_coredump_unregister(struct mt7915_dev *dev)
{
if (dev->coredump.crash_data->memdump_buf) {
vfree(dev->coredump.crash_data->memdump_buf);
dev->coredump.crash_data->memdump_buf = NULL;
dev->coredump.crash_data->memdump_buf_len = 0;
}
vfree(dev->coredump.crash_data);
dev->coredump.crash_data = NULL;
}

View File

@ -0,0 +1,136 @@
/* SPDX-License-Identifier: ISC */
/* Copyright (C) 2022 MediaTek Inc. */
#ifndef _COREDUMP_H_
#define _COREDUMP_H_
#include "mt7915.h"
struct trace {
u32 id;
u32 timestamp;
};
struct mt7915_coredump {
char magic[16];
u32 len;
guid_t guid;
/* time-of-day stamp */
u64 tv_sec;
/* time-of-day stamp, nano-seconds */
u64 tv_nsec;
/* kernel version */
char kernel[64];
/* firmware version */
char fw_ver[ETHTOOL_FWVERS_LEN];
u32 device_id;
/* exception state */
char fw_state[12];
u32 last_msg_id;
u32 eint_info_idx;
u32 irq_info_idx;
u32 sched_info_idx;
/* schedule info */
char trace_sched[32];
struct {
struct trace t;
u32 pc;
} sched[60];
/* irq info */
char trace_irq[32];
struct trace irq[60];
/* task queue status */
char task_qid[32];
struct {
u32 read;
u32 write;
} taskq[2];
/* task stack info */
char task_info[32];
struct {
u32 start;
u32 end;
u32 size;
} taski[2];
/* firmware context */
char fw_context[24];
struct {
u32 idx;
u32 handler;
} context;
/* link registers calltrace */
u32 call_stack[16];
/* memory content */
u8 data[];
} __packed;
struct mt7915_coredump_mem {
u32 len;
u8 data[];
} __packed;
struct mt7915_mem_hdr {
u32 start;
u32 len;
u8 data[];
};
struct mt7915_mem_region {
u32 start;
size_t len;
const char *name;
};
#ifdef CONFIG_DEV_COREDUMP
const struct mt7915_mem_region *
mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num);
struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev);
int mt7915_coredump_submit(struct mt7915_dev *dev);
int mt7915_coredump_register(struct mt7915_dev *dev);
void mt7915_coredump_unregister(struct mt7915_dev *dev);
#else /* CONFIG_DEV_COREDUMP */
static inline const struct mt7915_mem_region *
mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num)
{
return NULL;
}
static inline int mt7915_coredump_submit(struct mt7915_dev *dev)
{
return 0;
}
static inline struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev)
{
return NULL;
}
static inline int mt7915_coredump_register(struct mt7915_dev *dev)
{
return 0;
}
static inline void mt7915_coredump_unregister(struct mt7915_dev *dev)
{
}
#endif /* CONFIG_DEV_COREDUMP */
#endif /* _COREDUMP_H_ */

View File

@ -46,12 +46,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7915_implicit_txbf_get,
/* test knob of system error recovery */
static ssize_t
mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
mt7915_sys_recovery_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
bool band = phy->mt76->band_idx;
char buf[16];
int ret = 0;
u16 val;
@ -71,9 +71,19 @@ mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
return -EINVAL;
switch (val) {
/*
* 0: grab firmware current SER state.
* 1: trigger & enable system error L1 recovery.
* 2: trigger & enable system error L2 recovery.
* 3: trigger & enable system error L3 rx abort.
* 4: trigger & enable system error L3 tx abort
* 5: trigger & enable system error L3 tx disable.
* 6: trigger & enable system error L3 bf recovery.
* 7: trigger & enable system error full recovery.
* 8: trigger firmware crash.
*/
case SER_QUERY:
/* grab firmware SER stats */
ret = mt7915_mcu_set_ser(dev, 0, 0, ext_phy);
ret = mt7915_mcu_set_ser(dev, 0, 0, band);
break;
case SER_SET_RECOVER_L1:
case SER_SET_RECOVER_L2:
@ -81,11 +91,28 @@ mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
case SER_SET_RECOVER_L3_TX_ABORT:
case SER_SET_RECOVER_L3_TX_DISABLE:
case SER_SET_RECOVER_L3_BF:
ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), ext_phy);
ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), band);
if (ret)
return ret;
ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, ext_phy);
ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, band);
break;
/* enable full chip reset */
case SER_SET_RECOVER_FULL:
mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
ret = mt7915_mcu_set_ser(dev, 1, 3, band);
if (ret)
return ret;
dev->recovery.state |= MT_MCU_CMD_WDT_MASK;
mt7915_reset(dev);
break;
/* WARNING: trigger firmware crash */
case SER_SET_SYSTEM_ASSERT:
mt76_wr(dev, MT_MCU_WM_CIRQ_EINT_MASK_CLR_ADDR, BIT(18));
mt76_wr(dev, MT_MCU_WM_CIRQ_EINT_SOFT_ADDR, BIT(18));
break;
default:
break;
@ -95,20 +122,45 @@ mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
}
static ssize_t
mt7915_fw_ser_get(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
mt7915_sys_recovery_get(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
char *buff;
int desc = 0;
ssize_t ret;
static const size_t bufsz = 400;
static const size_t bufsz = 1024;
buff = kmalloc(bufsz, GFP_KERNEL);
if (!buff)
return -ENOMEM;
/* HELP */
desc += scnprintf(buff + desc, bufsz - desc,
"Please echo the correct value ...\n");
desc += scnprintf(buff + desc, bufsz - desc,
"0: grab firmware transient SER state\n");
desc += scnprintf(buff + desc, bufsz - desc,
"1: trigger system error L1 recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
"2: trigger system error L2 recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
"3: trigger system error L3 rx abort\n");
desc += scnprintf(buff + desc, bufsz - desc,
"4: trigger system error L3 tx abort\n");
desc += scnprintf(buff + desc, bufsz - desc,
"5: trigger system error L3 tx disable\n");
desc += scnprintf(buff + desc, bufsz - desc,
"6: trigger system error L3 bf recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
"7: trigger system error full recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
"8: trigger firmware crash\n");
/* SER statistics */
desc += scnprintf(buff + desc, bufsz - desc,
"\nlet's dump firmware SER statistics...\n");
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_STATUS = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_SER_STATS));
@ -139,15 +191,19 @@ mt7915_fw_ser_get(struct file *file, char __user *user_buf,
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_LMAC_WISR7_B1 = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN1_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"\nSYS_RESET_COUNT: WM %d, WA %d\n",
dev->recovery.wm_reset_count,
dev->recovery.wa_reset_count);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
kfree(buff);
return ret;
}
static const struct file_operations mt7915_fw_ser_ops = {
.write = mt7915_fw_ser_set,
.read = mt7915_fw_ser_get,
static const struct file_operations mt7915_sys_recovery_ops = {
.write = mt7915_sys_recovery_set,
.read = mt7915_sys_recovery_get,
.open = simple_open,
.llseek = default_llseek,
};
@ -598,10 +654,6 @@ mt7915_fw_util_wm_show(struct seq_file *file, void *data)
struct mt7915_dev *dev = file->private;
seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WM_MCU_PC));
seq_printf(file, "Exception state: 0x%x\n",
is_mt7915(&dev->mt76) ?
(u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(15, 8)) :
(u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(7, 0)));
if (dev->fw.debug_wm) {
seq_printf(file, "Busy: %u%% Peak busy: %u%%\n",
@ -639,16 +691,17 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
{
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int bound[15], range[4], i, n;
int bound[15], range[4], i;
u8 band = phy->mt76->band_idx;
/* Tx ampdu stat */
for (i = 0; i < ARRAY_SIZE(range); i++)
range[i] = mt76_rr(dev, MT_MIB_ARNG(phy->band_idx, i));
range[i] = mt76_rr(dev, MT_MIB_ARNG(band, i));
for (i = 0; i < ARRAY_SIZE(bound); i++)
bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, phy->band_idx);
seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, band);
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
@ -656,9 +709,8 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
bound[i] + 1, bound[i + 1]);
seq_puts(file, "\nCount: ");
n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]);
seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]);
seq_puts(file, "\n");
seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
@ -906,35 +958,199 @@ mt7915_xmit_queues_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_xmit_queues);
static int
mt7915_rate_txpower_show(struct seq_file *file, void *data)
#define mt7915_txpower_puts(prefix, rate) \
({ \
len += scnprintf(buf + len, sz - len, "%-16s:", #prefix " (tmac)"); \
for (i = 0; i < mt7915_sku_group_len[rate]; i++, offs++) \
len += scnprintf(buf + len, sz - len, " %6d", txpwr[offs]); \
len += scnprintf(buf + len, sz - len, "\n"); \
})
#define mt7915_txpower_sets(rate, pwr, flag) \
({ \
offs += len; \
len = mt7915_sku_group_len[rate]; \
if (mode == flag) { \
for (i = 0; i < len; i++) \
req.txpower_sku[offs + i] = pwr; \
} \
})
static ssize_t
mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
static const char * const sku_group_name[] = {
"CCK", "OFDM", "HT20", "HT40",
"VHT20", "VHT40", "VHT80", "VHT160",
"RU26", "RU52", "RU106", "RU242/SU20",
"RU484/SU40", "RU996/SU80", "RU2x996/SU160"
};
struct mt7915_phy *phy = file->private;
s8 txpower[MT7915_SKU_RATE_NUM], *buf;
int i;
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
s8 txpwr[MT7915_SKU_RATE_NUM];
static const size_t sz = 2048;
u8 band = phy->mt76->band_idx;
int i, offs = 0, len = 0;
ssize_t ret;
char *buf;
u32 reg;
seq_printf(file, "\nBand %d\n", phy != &phy->dev->phy);
mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
u8 mcs_num = mt7915_sku_group_len[i];
buf = kzalloc(sz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160)
mcs_num = 10;
ret = mt7915_mcu_get_txpower_sku(phy, txpwr, sizeof(txpwr));
if (ret)
return ret;
mt76_seq_puts_array(file, sku_group_name[i], buf, mcs_num);
buf += mt7915_sku_group_len[i];
}
/* Txpower propagation path: TMAC -> TXV -> BBP */
len += scnprintf(buf + len, sz - len,
"\nPhy%d Tx power table (channel %d)\n",
phy != &dev->phy, phy->mt76->chandef.chan->hw_value);
len += scnprintf(buf + len, sz - len, "%-16s %6s %6s %6s %6s\n",
" ", "1m", "2m", "5m", "11m");
mt7915_txpower_puts(CCK, SKU_CCK);
return 0;
len += scnprintf(buf + len, sz - len,
"%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
" ", "6m", "9m", "12m", "18m", "24m", "36m", "48m",
"54m");
mt7915_txpower_puts(OFDM, SKU_OFDM);
len += scnprintf(buf + len, sz - len,
"%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
" ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4",
"mcs5", "mcs6", "mcs7");
mt7915_txpower_puts(HT20, SKU_HT_BW20);
len += scnprintf(buf + len, sz - len,
"%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
" ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
"mcs6", "mcs7", "mcs32");
mt7915_txpower_puts(HT40, SKU_HT_BW40);
len += scnprintf(buf + len, sz - len,
"%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
" ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
"mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11");
mt7915_txpower_puts(VHT20, SKU_VHT_BW20);
mt7915_txpower_puts(VHT40, SKU_VHT_BW40);
mt7915_txpower_puts(VHT80, SKU_VHT_BW80);
mt7915_txpower_puts(VHT160, SKU_VHT_BW160);
mt7915_txpower_puts(HE26, SKU_HE_RU26);
mt7915_txpower_puts(HE52, SKU_HE_RU52);
mt7915_txpower_puts(HE106, SKU_HE_RU106);
mt7915_txpower_puts(HE242, SKU_HE_RU242);
mt7915_txpower_puts(HE484, SKU_HE_RU484);
mt7915_txpower_puts(HE996, SKU_HE_RU996);
mt7915_txpower_puts(HE996x2, SKU_HE_RU2x996);
reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_TPC_CTRL_STAT(band) :
MT_WF_PHY_TPC_CTRL_STAT_MT7916(band);
len += scnprintf(buf + len, sz - len, "\nTx power (bbp) : %6ld\n",
mt76_get_field(dev, reg, MT_WF_PHY_TPC_POWER));
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
DEFINE_SHOW_ATTRIBUTE(mt7915_rate_txpower);
static ssize_t
mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
struct mt7915_mcu_txpower_sku req = {
.format_id = TX_POWER_LIMIT_TABLE,
.band_idx = phy->mt76->band_idx,
};
char buf[100];
int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
enum mac80211_rx_encoding mode;
u32 offs = 0, len = 0;
if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (count && buf[count - 1] == '\n')
buf[count - 1] = '\0';
else
buf[count] = '\0';
if (sscanf(buf, "%u %u %u %u %u",
&mode, &pwr160, &pwr80, &pwr40, &pwr20) != 5) {
dev_warn(dev->mt76.dev,
"per bandwidth power limit: Mode BW160 BW80 BW40 BW20");
return -EINVAL;
}
if (mode > RX_ENC_HE)
return -EINVAL;
if (pwr160)
pwr160 = mt7915_get_power_bound(phy, pwr160);
if (pwr80)
pwr80 = mt7915_get_power_bound(phy, pwr80);
if (pwr40)
pwr40 = mt7915_get_power_bound(phy, pwr40);
if (pwr20)
pwr20 = mt7915_get_power_bound(phy, pwr20);
if (pwr160 < 0 || pwr80 < 0 || pwr40 < 0 || pwr20 < 0)
return -EINVAL;
mutex_lock(&dev->mt76.mutex);
ret = mt7915_mcu_get_txpower_sku(phy, req.txpower_sku,
sizeof(req.txpower_sku));
if (ret)
goto out;
mt7915_txpower_sets(SKU_CCK, pwr20, RX_ENC_LEGACY);
mt7915_txpower_sets(SKU_OFDM, pwr20, RX_ENC_LEGACY);
if (mode == RX_ENC_LEGACY)
goto skip;
mt7915_txpower_sets(SKU_HT_BW20, pwr20, RX_ENC_HT);
mt7915_txpower_sets(SKU_HT_BW40, pwr40, RX_ENC_HT);
if (mode == RX_ENC_HT)
goto skip;
mt7915_txpower_sets(SKU_VHT_BW20, pwr20, RX_ENC_VHT);
mt7915_txpower_sets(SKU_VHT_BW40, pwr40, RX_ENC_VHT);
mt7915_txpower_sets(SKU_VHT_BW80, pwr80, RX_ENC_VHT);
mt7915_txpower_sets(SKU_VHT_BW160, pwr160, RX_ENC_VHT);
if (mode == RX_ENC_VHT)
goto skip;
mt7915_txpower_sets(SKU_HE_RU26, pwr20, RX_ENC_HE + 1);
mt7915_txpower_sets(SKU_HE_RU52, pwr20, RX_ENC_HE + 1);
mt7915_txpower_sets(SKU_HE_RU106, pwr20, RX_ENC_HE + 1);
mt7915_txpower_sets(SKU_HE_RU242, pwr20, RX_ENC_HE);
mt7915_txpower_sets(SKU_HE_RU484, pwr40, RX_ENC_HE);
mt7915_txpower_sets(SKU_HE_RU996, pwr80, RX_ENC_HE);
mt7915_txpower_sets(SKU_HE_RU2x996, pwr160, RX_ENC_HE);
skip:
ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
&req, sizeof(req), true);
if (ret)
goto out;
mphy->txpower_cur = max(mphy->txpower_cur,
max(pwr160, max(pwr80, max(pwr40, pwr20))));
out:
mutex_unlock(&dev->mt76.mutex);
return ret ? ret : count;
}
static const struct file_operations mt7915_rate_txpower_fops = {
.write = mt7915_rate_txpower_set,
.read = mt7915_rate_txpower_get,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int
mt7915_twt_stats(struct seq_file *s, void *data)
@ -963,7 +1179,7 @@ mt7915_twt_stats(struct seq_file *s, void *data)
}
/* The index of RF registers use the generic regidx, combined with two parts:
* WF selection [31:28] and offset [27:0].
* WF selection [31:24] and offset [23:0].
*/
static int
mt7915_rf_regval_get(void *data, u64 *val)
@ -1010,7 +1226,8 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("xmit-queues", 0400, dir, phy,
&mt7915_xmit_queues_fops);
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
debugfs_create_file("fw_ser", 0600, dir, phy, &mt7915_fw_ser_ops);
debugfs_create_file("sys_recovery", 0600, dir, phy,
&mt7915_sys_recovery_ops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
@ -1026,7 +1243,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
mt7915_twt_stats);
debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
if (!dev->dbdc_support || phy->band_idx) {
if (!dev->dbdc_support || phy->mt76->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);
debugfs_create_file("radar_trigger", 0200, dir, dev,

View File

@ -11,7 +11,11 @@ mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base
struct mt7915_dev *dev = phy->dev;
if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
ring_base = MT_WED_TX_RING_BASE;
if (is_mt7986(&dev->mt76))
ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
else
ring_base = MT_WED_TX_RING_BASE;
idx -= MT_TXQ_ID(0);
}
@ -46,29 +50,65 @@ static void mt7915_dma_config(struct mt7915_dev *dev)
#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
if (is_mt7915(&dev->mt76)) {
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, MT7915_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0,
MT7915_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM,
MT7915_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA,
MT7915_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1,
MT7915_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT,
MT7915_RXQ_MCU_WA_EXT);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN,
MT7915_RXQ_MCU_WA);
TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM,
MT7915_TXQ_MCU_WM);
MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA,
MT7915_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL,
MT7915_TXQ_FWDL);
} else {
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM,
MT7916_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916,
MT7916_RXQ_MCU_WA_EXT);
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM,
MT7915_TXQ_MCU_WM);
MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916,
MT7915_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL,
MT7915_TXQ_FWDL);
if (is_mt7916(&dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) {
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916,
MT7916_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916,
MT7916_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_WED_RX_DONE_BAND1_MT7916,
MT7916_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916,
MT7916_RXQ_MCU_WA_MAIN);
TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0,
MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1,
MT7915_TXQ_BAND1);
} else {
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916,
MT7916_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA,
MT7916_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916,
MT7916_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916,
MT7916_RXQ_MCU_WA_MAIN);
TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
MT7915_TXQ_BAND1);
}
}
}
@ -313,17 +353,26 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
MT_INT_TX_DONE_MCU |
MT_INT_MCU_CMD;
if (!dev->phy.band_idx)
if (!dev->phy.mt76->band_idx)
irq_mask |= MT_INT_BAND0_RX_DONE;
if (dev->dbdc_support || dev->phy.band_idx)
if (dev->dbdc_support || dev->phy.mt76->band_idx)
irq_mask |= MT_INT_BAND1_RX_DONE;
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
u32 wed_irq_mask = irq_mask;
int ret;
wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
if (!is_mt7986(&dev->mt76))
mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
else
mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
ret = mt7915_mcu_wed_enable_rx_stats(dev);
if (ret)
return ret;
mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
}
@ -348,20 +397,28 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
mt7915_dma_disable(dev, true);
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
if (mtk_wed_device_active(&mdev->mmio.wed)) {
if (!is_mt7986(mdev)) {
u8 wed_control_rx1 = is_mt7915(mdev) ? 1 : 2;
mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
MT_WFDMA_HOST_CONFIG_WED);
mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1,
wed_control_rx1));
if (is_mt7915(mdev))
mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
MT_WFDMA0_EXT0_RXWB_KEEP);
}
} else {
mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
}
/* init tx queue */
ret = mt7915_init_tx_queues(&dev->phy,
MT_TXQ_ID(dev->phy.band_idx),
MT_TXQ_ID(dev->phy.mt76->band_idx),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(0));
if (ret)
@ -369,7 +426,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (phy2) {
ret = mt7915_init_tx_queues(phy2,
MT_TXQ_ID(phy2->band_idx),
MT_TXQ_ID(phy2->mt76->band_idx),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(1));
if (ret)
@ -410,7 +467,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return ret;
/* event from WA */
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) {
wa_rx_base = MT_WED_RX_RING_BASE;
wa_rx_idx = MT7915_RXQ_MCU_WA;
dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
@ -425,7 +482,14 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return ret;
/* rx data queue for band0 */
if (!dev->phy.band_idx) {
if (!dev->phy.mt76->band_idx) {
if (mtk_wed_device_active(&mdev->mmio.wed) &&
mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
dev->mt76.q_rx[MT_RXQ_MAIN].flags =
MT_WED_Q_RX(MT7915_RXQ_BAND0);
dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT_RXQ_ID(MT_RXQ_MAIN),
MT7915_RX_RING_SIZE,
@ -437,16 +501,32 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
/* tx free notify event from WA for band0 */
if (!is_mt7915(mdev)) {
wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA);
wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA);
if (mtk_wed_device_active(&mdev->mmio.wed)) {
mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
if (is_mt7916(mdev)) {
wa_rx_base = MT_WED_RX_RING_BASE;
wa_rx_idx = MT7915_RXQ_MCU_WA;
}
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
MT_RXQ_ID(MT_RXQ_MAIN_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE, wa_rx_base);
if (ret)
return ret;
}
if (dev->dbdc_support || dev->phy.band_idx) {
if (dev->dbdc_support || dev->phy.mt76->band_idx) {
if (mtk_wed_device_active(&mdev->mmio.wed) &&
mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
dev->mt76.q_rx[MT_RXQ_BAND1].flags =
MT_WED_Q_RX(MT7915_RXQ_BAND1);
dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
}
/* rx data queue for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
MT_RXQ_ID(MT_RXQ_BAND1),
@ -479,6 +559,53 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return 0;
}
int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
{
struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
int i;
/* clean up hw queues */
for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) {
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
if (mphy_ext)
mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
}
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
/* reset wfsys */
if (force)
mt7915_wfsys_reset(dev);
mt7915_dma_disable(dev, force);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
mt76_queue_reset(dev, dev->mphy.q_tx[i]);
if (mphy_ext)
mt76_queue_reset(dev, mphy_ext->q_tx[i]);
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
mt76_tx_status_check(&dev->mt76, true);
mt7915_dma_enable(dev);
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
return 0;
}
void mt7915_dma_cleanup(struct mt7915_dev *dev)
{
mt7915_dma_disable(dev, true);

View File

@ -131,9 +131,10 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
u8 *eeprom = dev->mt76.eeprom.data;
u8 band = phy->mt76->band_idx;
u32 val;
val = eeprom[MT_EE_WIFI_CONF + phy->band_idx];
val = eeprom[MT_EE_WIFI_CONF + band];
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
if (!is_mt7915(&dev->mt76)) {
@ -153,7 +154,7 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
return;
}
} else if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support) {
val = phy->band_idx ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
val = band ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
}
switch (val) {
@ -173,60 +174,51 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
struct mt7915_phy *phy)
{
u8 nss, nss_band, nss_band_max, *eeprom = dev->mt76.eeprom.data;
u8 path, nss, nss_max = 4, *eeprom = dev->mt76.eeprom.data;
struct mt76_phy *mphy = phy->mt76;
bool ext_phy = phy != &dev->phy;
u8 band = phy->mt76->band_idx;
mt7915_eeprom_parse_band_config(phy);
/* read tx/rx mask from eeprom */
/* read tx/rx path from eeprom */
if (is_mt7915(&dev->mt76)) {
nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
eeprom[MT_EE_WIFI_CONF]);
path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
eeprom[MT_EE_WIFI_CONF]);
} else {
nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
eeprom[MT_EE_WIFI_CONF + band]);
}
if (!nss || nss > 4)
nss = 4;
if (!path || path > 4)
path = 4;
/* read tx/rx stream */
nss_band = nss;
nss = path;
if (dev->dbdc_support) {
if (is_mt7915(&dev->mt76)) {
nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
eeprom[MT_EE_WIFI_CONF + 3]);
if (phy->band_idx)
nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
eeprom[MT_EE_WIFI_CONF + 3]);
path = min_t(u8, path, 2);
nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
eeprom[MT_EE_WIFI_CONF + 3]);
if (band)
nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
eeprom[MT_EE_WIFI_CONF + 3]);
} else {
nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
nss = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
eeprom[MT_EE_WIFI_CONF + 2 + band]);
}
nss_band_max = is_mt7986(&dev->mt76) ?
MT_EE_NSS_MAX_DBDC_MA7986 : MT_EE_NSS_MAX_DBDC_MA7915;
} else {
nss_band_max = is_mt7986(&dev->mt76) ?
MT_EE_NSS_MAX_MA7986 : MT_EE_NSS_MAX_MA7915;
if (!is_mt7986(&dev->mt76))
nss_max = 2;
}
if (!nss_band || nss_band > nss_band_max)
nss_band = nss_band_max;
if (!nss)
nss = nss_max;
nss = min_t(u8, min_t(u8, nss_max, nss), path);
if (nss_band > nss) {
dev_warn(dev->mt76.dev,
"nss mismatch, nss(%d) nss_band(%d) band(%d) ext_phy(%d)\n",
nss, nss_band, phy->band_idx, ext_phy);
nss = nss_band;
}
mphy->chainmask = BIT(nss) - 1;
if (ext_phy)
mphy->chainmask = BIT(path) - 1;
if (band)
mphy->chainmask <<= dev->chainshift;
mphy->antenna_mask = BIT(nss_band) - 1;
mphy->antenna_mask = BIT(nss) - 1;
dev->chainmask |= mphy->chainmask;
dev->chainshift = hweight8(dev->mphy.chainmask);
}

View File

@ -58,11 +58,6 @@ enum mt7915_eeprom_field {
#define MT_EE_RATE_DELTA_SIGN BIT(6)
#define MT_EE_RATE_DELTA_EN BIT(7)
#define MT_EE_NSS_MAX_MA7915 4
#define MT_EE_NSS_MAX_DBDC_MA7915 2
#define MT_EE_NSS_MAX_MA7986 4
#define MT_EE_NSS_MAX_DBDC_MA7986 4
enum mt7915_adie_sku {
MT7976_ONE_ADIE_DBDC = 0x7,
MT7975_ONE_ADIE = 0x8,

View File

@ -8,6 +8,7 @@
#include "mt7915.h"
#include "mac.h"
#include "mcu.h"
#include "coredump.h"
#include "eeprom.h"
static const struct ieee80211_iface_limit if_limits[] = {
@ -262,9 +263,8 @@ static void mt7915_led_set_brightness(struct led_classdev *led_cdev,
mt7915_led_set_config(led_cdev, 0xff, 0);
}
static void
mt7915_init_txpower(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband)
void mt7915_init_txpower(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mphy.antenna_mask);
int nss_delta = mt76_tx_power_nss_delta(n_chains);
@ -353,6 +353,10 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
if (!is_mt7915(&dev->mt76))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
if (!mdev->dev->of_node ||
!of_property_read_bool(mdev->dev->of_node,
@ -444,9 +448,32 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
/* mt7915: disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
/* clear estimated value of EIFS for Rx duration & OBSS time */
mt76_wr(dev, MT_WF_RMAC_RSVD0(band), MT_WF_RMAC_RSVD0_EIFS_CLR);
/* clear backoff time for Rx duration */
mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME1(band),
MT_WF_RMAC_MIB_NONQOSD_BACKOFF);
mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME3(band),
MT_WF_RMAC_MIB_QOS01_BACKOFF);
mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME4(band),
MT_WF_RMAC_MIB_QOS23_BACKOFF);
/* clear backoff time and set software compensation for OBSS time */
mask = MT_WF_RMAC_MIB_OBSS_BACKOFF | MT_WF_RMAC_MIB_ED_OFFSET;
set = FIELD_PREP(MT_WF_RMAC_MIB_OBSS_BACKOFF, 0) |
FIELD_PREP(MT_WF_RMAC_MIB_ED_OFFSET, 4);
mt76_rmw(dev, MT_WF_RMAC_MIB_AIRTIME0(band), mask, set);
/* filter out non-resp frames and get instanstaeous signal reporting */
mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM;
set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) |
FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3);
mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
}
static void mt7915_mac_init(struct mt7915_dev *dev)
void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
u32 rx_len = is_mt7915(&dev->mt76) ? 0x400 : 0x680;
@ -476,7 +503,7 @@ static void mt7915_mac_init(struct mt7915_dev *dev)
}
}
static int mt7915_txbf_init(struct mt7915_dev *dev)
int mt7915_txbf_init(struct mt7915_dev *dev)
{
int ret;
@ -513,7 +540,7 @@ mt7915_alloc_ext_phy(struct mt7915_dev *dev)
phy->mt76 = mphy;
/* Bind main phy to band0 and ext_phy to band1 for dbdc case */
phy->band_idx = 1;
phy->mt76->band_idx = 1;
return phy;
}
@ -633,7 +660,7 @@ static bool mt7915_band_config(struct mt7915_dev *dev)
{
bool ret = true;
dev->phy.band_idx = 0;
dev->phy.mt76->band_idx = 0;
if (is_mt7986(&dev->mt76)) {
u32 sku = mt7915_check_adie(dev, true);
@ -644,7 +671,7 @@ static bool mt7915_band_config(struct mt7915_dev *dev)
* dbdc is disabled.
*/
if (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE) {
dev->phy.band_idx = 1;
dev->phy.mt76->band_idx = 1;
ret = false;
}
} else {
@ -700,45 +727,49 @@ mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2)
void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
{
int nss;
int sts;
u32 *cap;
if (!phy->mt76->cap.has_5ghz)
return;
nss = hweight8(phy->mt76->chainmask);
sts = hweight8(phy->mt76->chainmask);
cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
(3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
sts - 1);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
if (nss < 2)
if (sts < 2)
return;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE |
FIELD_PREP(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
nss - 1);
sts - 1);
}
static void
mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
struct ieee80211_sta_he_cap *he_cap,
int vif, int nss)
mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
struct ieee80211_sta_he_cap *he_cap, int vif)
{
struct mt7915_dev *dev = phy->dev;
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
u8 c, nss_160;
int sts = hweight8(phy->mt76->chainmask);
u8 c, sts_160 = sts;
/* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
nss_160 = nss / 2;
else
nss_160 = nss;
/* Can do 1/2 of STS in 160Mhz mode for mt7915 */
if (is_mt7915(&dev->mt76)) {
if (!dev->dbdc_support)
sts_160 /= 2;
else
sts_160 = 0;
}
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
@ -748,8 +779,9 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
elem->phy_cap_info[4] &= ~IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
if (sts_160)
c |= IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
elem->phy_cap_info[5] &= ~c;
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
@ -765,8 +797,9 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[2] |= c;
c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
if (sts_160)
c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
elem->phy_cap_info[4] |= c;
/* do not support NG16 due to spec D4.0 changes subcarrier idx */
@ -778,11 +811,11 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[6] |= c;
if (nss < 2)
if (sts < 2)
return;
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
if (vif != NL80211_IFTYPE_AP)
return;
@ -791,12 +824,13 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
/* num_snd_dim
* for mt7915, max supported nss is 2 for bw > 80MHz
* for mt7915, max supported sts is 2 for bw > 80MHz and 0 if dbdc
*/
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
nss - 1) |
FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
nss_160 - 1);
sts - 1);
if (sts_160)
c |= FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
sts_160 - 1);
elem->phy_cap_info[5] |= c;
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
@ -836,16 +870,19 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data)
{
struct mt7915_dev *dev = phy->dev;
int i, idx = 0, nss = hweight8(phy->mt76->chainmask);
int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
u8 nss_160;
/* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
if (!is_mt7915(&dev->mt76))
nss_160 = nss;
else if (!dev->dbdc_support)
/* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
nss_160 = nss / 2;
else
nss_160 = nss;
/* Can't do 160MHz with mt7915 dbdc */
nss_160 = 0;
for (i = 0; i < 8; i++) {
if (i < nss)
@ -891,11 +928,14 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
else
else if (nss_160)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
else
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
he_cap_elem->phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
@ -949,9 +989,11 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
if (nss_160)
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
@ -969,7 +1011,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
mt7915_set_stream_he_txbf_caps(dev, he_cap, i, nss);
mt7915_set_stream_he_txbf_caps(phy, he_cap, i);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
@ -1078,6 +1120,8 @@ int mt7915_register_device(struct mt7915_dev *dev)
init_waitqueue_head(&dev->reset_wait);
INIT_WORK(&dev->reset_work, mt7915_mac_reset_work);
INIT_WORK(&dev->dump_work, mt7915_mac_dump_work);
mutex_init(&dev->dump_mutex);
dev->dbdc_support = mt7915_band_config(dev);
@ -1118,7 +1162,15 @@ int mt7915_register_device(struct mt7915_dev *dev)
goto unreg_thermal;
}
mt7915_init_debugfs(&dev->phy);
dev->recovery.hw_init_done = true;
ret = mt7915_init_debugfs(&dev->phy);
if (ret)
goto unreg_thermal;
ret = mt7915_coredump_register(dev);
if (ret)
goto unreg_thermal;
return 0;
@ -1137,6 +1189,7 @@ free_phy2:
void mt7915_unregister_device(struct mt7915_dev *dev)
{
mt7915_unregister_ext_phy(dev);
mt7915_coredump_unregister(dev);
mt7915_unregister_thermal(&dev->phy);
mt76_unregister_device(&dev->mt76);
mt7915_stop_hardware(dev);

File diff suppressed because it is too large Load Diff

View File

@ -20,45 +20,45 @@ static bool mt7915_dev_running(struct mt7915_dev *dev)
return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
}
static int mt7915_start(struct ieee80211_hw *hw)
int mt7915_run(struct ieee80211_hw *hw)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool running;
int ret;
flush_work(&dev->init_work);
mutex_lock(&dev->mt76.mutex);
running = mt7915_dev_running(dev);
if (!running) {
ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0);
ret = mt76_connac_mcu_set_pm(&dev->mt76,
dev->phy.mt76->band_idx, 0);
if (ret)
goto out;
ret = mt7915_mcu_set_mac(dev, 0, true, true);
ret = mt7915_mcu_set_mac(dev, dev->phy.mt76->band_idx,
true, true);
if (ret)
goto out;
mt7915_mac_enable_nf(dev, 0);
mt7915_mac_enable_nf(dev, dev->phy.mt76->band_idx);
}
if (phy != &dev->phy || phy->band_idx) {
ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
if (phy != &dev->phy) {
ret = mt76_connac_mcu_set_pm(&dev->mt76,
phy->mt76->band_idx, 0);
if (ret)
goto out;
ret = mt7915_mcu_set_mac(dev, 1, true, true);
ret = mt7915_mcu_set_mac(dev, phy->mt76->band_idx,
true, true);
if (ret)
goto out;
mt7915_mac_enable_nf(dev, 1);
mt7915_mac_enable_nf(dev, phy->mt76->band_idx);
}
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
phy != &dev->phy);
phy->mt76->band_idx);
if (ret)
goto out;
@ -80,6 +80,18 @@ static int mt7915_start(struct ieee80211_hw *hw)
mt7915_mac_reset_counters(phy);
out:
return ret;
}
static int mt7915_start(struct ieee80211_hw *hw)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
int ret;
flush_work(&dev->init_work);
mutex_lock(&dev->mt76.mutex);
ret = mt7915_run(hw);
mutex_unlock(&dev->mt76.mutex);
return ret;
@ -99,13 +111,13 @@ static void mt7915_stop(struct ieee80211_hw *hw)
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (phy != &dev->phy) {
mt76_connac_mcu_set_pm(&dev->mt76, 1, 1);
mt7915_mcu_set_mac(dev, 1, false, false);
mt76_connac_mcu_set_pm(&dev->mt76, phy->mt76->band_idx, 1);
mt7915_mcu_set_mac(dev, phy->mt76->band_idx, false, false);
}
if (!mt7915_dev_running(dev)) {
mt76_connac_mcu_set_pm(&dev->mt76, 0, 1);
mt7915_mcu_set_mac(dev, 0, false, false);
mt76_connac_mcu_set_pm(&dev->mt76, dev->phy.mt76->band_idx, 1);
mt7915_mcu_set_mac(dev, dev->phy.mt76->band_idx, false, false);
}
mutex_unlock(&dev->mt76.mutex);
@ -209,7 +221,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
}
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
mvif->mt76.band_idx = phy->band_idx;
mvif->mt76.band_idx = phy->mt76->band_idx;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
if (ext_phy)
@ -432,7 +444,6 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool band = phy != &dev->phy;
int ret;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@ -460,6 +471,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
bool band = phy->mt76->band_idx;
if (!enabled)
phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
@ -498,7 +510,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool band = phy != &dev->phy;
bool band = phy->mt76->band_idx;
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@ -593,10 +605,11 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_mcu_add_sta(dev, vif, NULL, join);
}
if (changed & BSS_CHANGED_ASSOC) {
if (changed & BSS_CHANGED_ASSOC)
mt7915_mcu_add_bss_info(phy, vif, vif->cfg.assoc);
mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT)
mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
@ -617,7 +630,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_mcu_set_tx(dev, vif);
if (changed & BSS_CHANGED_HE_OBSS_PD)
mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
mt7915_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
if (changed & BSS_CHANGED_HE_BSS_COLOR)
mt7915_update_bss_color(hw, vif, &info->he_bss_color);
@ -665,6 +678,8 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
ewma_avg_signal_init(&msta->avg_ack_signal);
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@ -732,7 +747,8 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
int ret;
mutex_lock(&dev->mt76.mutex);
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy);
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val,
phy->mt76->band_idx);
mutex_unlock(&dev->mt76.mutex);
return ret;
@ -835,7 +851,7 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool band = phy != &dev->phy;
bool band = phy->mt76->band_idx;
union {
u64 t64;
u32 t32[2];
@ -880,7 +896,7 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool band = phy != &dev->phy;
bool band = phy->mt76->band_idx;
union {
u64 t64;
u32 t32[2];
@ -911,7 +927,7 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool band = phy != &dev->phy;
bool band = phy->mt76->band_idx;
union {
u64 t64;
u32 t32[2];
@ -953,22 +969,21 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
int max_nss = hweight8(hw->wiphy->available_antennas_tx);
bool ext_phy = phy != &dev->phy;
u8 chainshift = dev->chainshift;
u8 band = phy->mt76->band_idx;
if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
return -EINVAL;
if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
tx_ant = BIT(ffs(tx_ant) - 1) - 1;
mutex_lock(&dev->mt76.mutex);
phy->mt76->antenna_mask = tx_ant;
if (ext_phy)
tx_ant <<= dev->chainshift;
phy->mt76->chainmask = tx_ant;
/* handle a variant of mt7916 which has 3T3R but nss2 on 5 GHz band */
if (is_mt7916(&dev->mt76) && band && hweight8(tx_ant) == max_nss)
phy->mt76->chainmask = (dev->chainmask >> chainshift) << chainshift;
else
phy->mt76->chainmask = tx_ant << (chainshift * band);
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
@ -1026,7 +1041,21 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
sinfo->tx_retries = msta->wcid.stats.tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
if (mtk_wed_get_rx_capa(&phy->dev->mt76.mmio.wed)) {
sinfo->rx_bytes = msta->wcid.stats.rx_bytes;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
sinfo->rx_packets = msta->wcid.stats.rx_packets;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
}
}
sinfo->ack_signal = (s8)msta->ack_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
@ -1111,6 +1140,39 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static int mt7915_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = mt7915_hw_dev(hw);
s16 txpower = sta->deflink.txpwr.power;
int ret;
if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC)
txpower = 0;
mutex_lock(&dev->mt76.mutex);
/* NOTE: temporarily use 0 as minimum limit, which is a
* global setting and will be applied to all stations.
*/
ret = mt7915_mcu_set_txpower_frame_min(phy, 0);
if (ret)
goto out;
/* This only applies to data frames while pushing traffic,
* whereas the management frames or other packets that are
* using fixed rate can be configured via TxD.
*/
ret = mt7915_mcu_set_txpower_frame(phy, vif, sta, txpower);
out:
mutex_unlock(&dev->mt76.mutex);
return ret;
}
static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_ampdu_cnt",
"tx_stop_q_empty_cnt",
@ -1258,7 +1320,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
};
struct mib_stats *mib = &phy->mib;
/* See mt7915_ampdu_stat_read_phy, etc */
int i, n, ei = 0;
int i, ei = 0;
mutex_lock(&dev->mt76.mutex);
@ -1274,9 +1336,8 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
data[ei++] = mib->tx_pkt_ibf_cnt;
/* Tx ampdu stat */
n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
data[ei++] = dev->mt76.aggr_stats[i + n];
data[ei++] = phy->mt76->aggr_stats[i];
data[ei++] = phy->mib.ba_miss_cnt;
@ -1431,7 +1492,7 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
path->dev = ctx->dev;
path->mtk_wdma.wdma_idx = wed->wdma_idx;
path->mtk_wdma.bss = mvif->mt76.idx;
path->mtk_wdma.wcid = msta->wcid.idx;
path->mtk_wdma.wcid = is_mt7915(&dev->mt76) ? msta->wcid.idx : 0x3ff;
path->mtk_wdma.queue = phy != &dev->phy;
ctx->dev = NULL;
@ -1477,6 +1538,7 @@ const struct ieee80211_ops mt7915_ops = {
.set_bitrate_mask = mt7915_set_bitrate_mask,
.set_coverage_class = mt7915_set_coverage_class,
.sta_statistics = mt7915_sta_statistics,
.sta_set_txpwr = mt7915_sta_set_txpwr,
.sta_set_4addr = mt7915_sta_set_4addr,
.sta_set_decap_offload = mt7915_sta_set_decap_offload,
.add_twt_setup = mt7915_mac_add_twt_setup,

View File

@ -32,6 +32,10 @@
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
static bool sr_scene_detect = true;
module_param(sr_scene_detect, bool, 0644);
MODULE_PARM_DESC(sr_scene_detect, "Enable firmware scene detection algorithm");
static u8
mt7915_mcu_get_sta_nss(u16 mcs_map)
{
@ -228,7 +232,8 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
c = (struct mt7915_mcu_csa_notify *)skb->data;
if ((c->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
if ((c->band_idx && !dev->phy.mt76->band_idx) &&
dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@ -247,7 +252,8 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
return;
if ((t->ctrl.band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
if ((t->ctrl.band_idx && !dev->phy.mt76->band_idx) &&
dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
phy = (struct mt7915_phy *)mphy->priv;
@ -262,7 +268,8 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
if ((r->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
if ((r->band_idx && !dev->phy.mt76->band_idx) &&
dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
if (r->band_idx == MT_RX_SEL2)
@ -319,7 +326,7 @@ mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
b = (struct mt7915_mcu_bcc_notify *)skb->data;
if ((b->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
if ((b->band_idx && !dev->phy.mt76->band_idx) && dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@ -485,7 +492,7 @@ static void
mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct mt7915_phy *phy)
{
int max_nss = hweight8(phy->mt76->chainmask);
int max_nss = hweight8(phy->mt76->antenna_mask);
struct bss_info_ra *ra;
struct tlv *tlv;
@ -595,7 +602,7 @@ mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif,
.mode = !!mask || enable,
.entry_count = 1,
.write = 1,
.band = phy != &dev->phy,
.band = phy->mt76->band_idx,
.index = idx * 2 + bssid,
};
@ -1131,7 +1138,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160);
nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
bf->ncol_bw160 = nss_mcs;
bf->ncol_gt_bw80 = nss_mcs;
}
if (pe->phy_cap_info[0] &
@ -1139,10 +1146,10 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80);
nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
if (bf->ncol_bw160)
bf->ncol_bw160 = min_t(u8, bf->ncol_bw160, nss_mcs);
if (bf->ncol_gt_bw80)
bf->ncol_gt_bw80 = min_t(u8, bf->ncol_gt_bw80, nss_mcs);
else
bf->ncol_bw160 = nss_mcs;
bf->ncol_gt_bw80 = nss_mcs;
}
snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
@ -1150,7 +1157,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
pe->phy_cap_info[4]);
bf->nrow_bw160 = min_t(int, snd_dim, sts);
bf->nrow_gt_bw80 = min_t(int, snd_dim, sts);
}
static void
@ -1306,6 +1313,9 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
case RATE_PARAM_MMPS_UPDATE:
ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
break;
case RATE_PARAM_SPE_UPDATE:
ra->spe_idx = *(u8 *)data;
break;
default:
break;
}
@ -1348,6 +1358,18 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
RATE_PARAM_MMPS_UPDATE);
}
static int
mt7915_mcu_set_spe_idx(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt76_phy *mphy = mvif->phy->mt76;
u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask);
return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &spe_idx,
RATE_PARAM_SPE_UPDATE);
}
static int
mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
struct ieee80211_vif *vif,
@ -1435,7 +1457,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
return ret;
}
return 0;
return mt7915_mcu_set_spe_idx(dev, vif, sta);
}
static void
@ -1662,10 +1684,32 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return ret;
}
out:
ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
}
int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct {
__le32 args[2];
} req = {
.args[0] = cpu_to_le32(1),
.args[1] = cpu_to_le32(6),
};
return mtk_wed_device_update_msg(wed, MTK_WED_WO_CMD_RXCNT_CTRL,
&req, sizeof(req));
#else
return 0;
#endif
}
int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, bool enable)
{
@ -1674,7 +1718,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
struct {
struct req_hdr {
u8 omac_idx;
u8 dbdc_idx;
u8 band_idx;
__le16 tlv_num;
u8 is_tlv_append;
u8 rsv[3];
@ -1683,13 +1727,13 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
__le16 tag;
__le16 len;
u8 active;
u8 dbdc_idx;
u8 band_idx;
u8 omac_addr[ETH_ALEN];
} __packed tlv;
} data = {
.hdr = {
.omac_idx = mvif->mt76.omac_idx,
.dbdc_idx = mvif->mt76.band_idx,
.band_idx = mvif->mt76.band_idx,
.tlv_num = cpu_to_le16(1),
.is_tlv_append = 1,
},
@ -1697,7 +1741,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
.len = cpu_to_le16(sizeof(struct req_tlv)),
.active = enable,
.dbdc_idx = mvif->mt76.band_idx,
.band_idx = mvif->mt76.band_idx,
},
};
@ -2151,7 +2195,7 @@ int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms)
u8 band_idx;
} req = {
.cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS),
.band_idx = phy->band_idx,
.band_idx = phy->mt76->band_idx,
};
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL),
@ -2234,18 +2278,10 @@ mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev)
sizeof(req), true);
}
int mt7915_mcu_init(struct mt7915_dev *dev)
int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
.headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
.mcu_restart = mt76_connac_mcu_restart,
};
int ret;
dev->mt76.mcu_ops = &mt7915_mcu_ops;
/* force firmware operation mode into normal state,
* which should be set before firmware download stage.
*/
@ -2274,7 +2310,7 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
if (ret)
return ret;
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
if (mtk_wed_device_active(&dev->mt76.mmio.wed) && is_mt7915(&dev->mt76))
mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
ret = mt7915_mcu_set_mwds(dev, 1);
@ -2294,6 +2330,20 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
MCU_WA_PARAM_RED, 0, 0);
}
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
.headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
.mcu_restart = mt76_connac_mcu_restart,
};
dev->mt76.mcu_ops = &mt7915_mcu_ops;
return mt7915_mcu_init_firmware(dev);
}
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
@ -2538,7 +2588,7 @@ mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
req.monitor_central_chan =
ieee80211_frequency_to_channel(chandef->center_freq1);
req.monitor_bw = mt76_connac_chan_bw(chandef);
req.band_idx = phy != &dev->phy;
req.band_idx = phy->mt76->band_idx;
req.scan_mode = 1;
break;
}
@ -2546,7 +2596,7 @@ mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
req.monitor_chan = chandef->chan->hw_value;
req.monitor_central_chan =
ieee80211_frequency_to_channel(chandef->center_freq1);
req.band_idx = phy != &dev->phy;
req.band_idx = phy->mt76->band_idx;
req.scan_mode = 2;
break;
case CH_SWITCH_BACKGROUND_SCAN_STOP:
@ -2613,12 +2663,13 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1;
u8 band = phy->mt76->band_idx;
struct {
u8 control_ch;
u8 center_ch;
u8 bw;
u8 tx_streams_num;
u8 rx_streams; /* mask or num */
u8 tx_path_num;
u8 rx_path; /* mask or num */
u8 switch_reason;
u8 band_idx;
u8 center_ch2; /* for 80+80 only */
@ -2634,25 +2685,23 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
.bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
.band_idx = phy->band_idx,
.tx_path_num = hweight16(phy->mt76->chainmask),
.rx_path = phy->mt76->chainmask >> (dev->chainshift * band),
.band_idx = band,
.channel_band = ch_band[chandef->chan->band],
};
#ifdef CONFIG_NL80211_TESTMODE
if (phy->mt76->test.tx_antenna_mask &&
(phy->mt76->test.state == MT76_TM_STATE_TX_FRAMES ||
phy->mt76->test.state == MT76_TM_STATE_RX_FRAMES ||
phy->mt76->test.state == MT76_TM_STATE_TX_CONT)) {
req.tx_streams_num = fls(phy->mt76->test.tx_antenna_mask);
req.rx_streams = phy->mt76->test.tx_antenna_mask;
if (phy != &dev->phy)
req.rx_streams >>= dev->chainshift;
mt76_testmode_enabled(phy->mt76)) {
req.tx_path_num = fls(phy->mt76->test.tx_antenna_mask);
req.rx_path = phy->mt76->test.tx_antenna_mask;
}
#endif
if (mt76_connac_spe_idx(phy->mt76->antenna_mask))
req.tx_path_num = fls(phy->mt76->antenna_mask);
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
@ -2665,7 +2714,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
req.switch_reason = CH_SWITCH_NORMAL;
if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH))
req.rx_streams = hweight8(req.rx_streams);
req.rx_path = hweight8(req.rx_path);
if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
int freq2 = chandef->center_freq2;
@ -2927,25 +2976,36 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
{
/* strict order */
static const u32 offs[] = {
MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME,
MIB_BUSY_TIME_V2, MIB_TX_TIME_V2, MIB_RX_TIME_V2,
MIB_NON_WIFI_TIME,
MIB_TX_TIME,
MIB_RX_TIME,
MIB_OBSS_AIRTIME,
MIB_TXOP_INIT_COUNT,
/* v2 */
MIB_NON_WIFI_TIME_V2,
MIB_TX_TIME_V2,
MIB_RX_TIME_V2,
MIB_OBSS_AIRTIME_V2
};
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
struct mt7915_dev *dev = phy->dev;
struct mt7915_mcu_mib *res, req[4];
struct mt7915_mcu_mib *res, req[5];
struct sk_buff *skb;
int i, ret, start = 0, ofs = 20;
u64 cc_tx;
if (!is_mt7915(&dev->mt76)) {
start = 4;
start = 5;
ofs = 0;
}
for (i = 0; i < 4; i++) {
req[i].band = cpu_to_le32(phy != &dev->phy);
for (i = 0; i < 5; i++) {
req[i].band = cpu_to_le32(phy->mt76->band_idx);
req[i].offs = cpu_to_le32(offs[i + start]);
if (!is_mt7915(&dev->mt76) && i == 3)
break;
}
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
@ -2955,20 +3015,24 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
res = (struct mt7915_mcu_mib *)(skb->data + ofs);
#define __res_u64(s) le64_to_cpu(res[s].data)
/* subtract Tx backoff time from Tx duration */
cc_tx = is_mt7915(&dev->mt76) ? __res_u64(1) - __res_u64(4) : __res_u64(1);
if (chan_switch)
goto out;
#define __res_u64(s) le64_to_cpu(res[s].data)
state->cc_busy += __res_u64(0) - state_ts->cc_busy;
state->cc_tx += __res_u64(1) - state_ts->cc_tx;
state->cc_tx += cc_tx - state_ts->cc_tx;
state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
state->cc_busy += __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3) -
state_ts->cc_busy;
out:
state_ts->cc_busy = __res_u64(0);
state_ts->cc_tx = __res_u64(1);
state_ts->cc_tx = cc_tx;
state_ts->cc_bss_rx = __res_u64(2);
state_ts->cc_rx = __res_u64(2) + __res_u64(3);
state_ts->cc_busy = __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3);
#undef __res_u64
dev_kfree_skb(skb);
@ -2982,11 +3046,11 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
struct {
u8 ctrl_id;
u8 action;
u8 dbdc_idx;
u8 band_idx;
u8 rsv[5];
} req = {
.ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
.dbdc_idx = phy != &dev->phy,
.band_idx = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
@ -3005,7 +3069,7 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
u8 rsv[2];
} __packed req = {
.ctrl = {
.band_idx = phy->band_idx,
.band_idx = phy->mt76->band_idx,
},
};
int level;
@ -3045,28 +3109,103 @@ out:
&req, sizeof(req), false);
}
int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower)
{
struct mt7915_dev *dev = phy->dev;
struct {
u8 format_id;
u8 rsv;
u8 band_idx;
s8 txpower_min;
} __packed req = {
.format_id = TX_POWER_LIMIT_FRAME_MIN,
.band_idx = phy->mt76->band_idx,
.txpower_min = txpower * 2, /* 0.5db */
};
return mt76_mcu_send_msg(&dev->mt76,
MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
sizeof(req), true);
}
int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, s8 txpower)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
struct {
u8 format_id;
u8 rsv[3];
u8 band_idx;
s8 txpower_max;
__le16 wcid;
s8 txpower_offs[48];
} __packed req = {
.format_id = TX_POWER_LIMIT_FRAME,
.band_idx = phy->mt76->band_idx,
.txpower_max = DIV_ROUND_UP(mphy->txpower_cur, 2),
.wcid = cpu_to_le16(msta->wcid.idx),
};
int ret;
s8 txpower_sku[MT7915_SKU_RATE_NUM];
ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku));
if (ret)
return ret;
txpower = mt7915_get_power_bound(phy, txpower);
if (txpower > mphy->txpower_cur || txpower < 0)
return -EINVAL;
if (txpower) {
u32 offs, len, i;
if (sta->deflink.ht_cap.ht_supported) {
const u8 *sku_len = mt7915_sku_group_len;
offs = sku_len[SKU_CCK] + sku_len[SKU_OFDM];
len = sku_len[SKU_HT_BW20] + sku_len[SKU_HT_BW40];
if (sta->deflink.vht_cap.vht_supported) {
offs += len;
len = sku_len[SKU_VHT_BW20] * 4;
if (sta->deflink.he_cap.has_he) {
offs += len + sku_len[SKU_HE_RU26] * 3;
len = sku_len[SKU_HE_RU242] * 4;
}
}
} else {
return -EINVAL;
}
for (i = 0; i < len; i++, offs++)
req.txpower_offs[i] =
DIV_ROUND_UP(txpower - txpower_sku[offs], 2);
}
return mt76_mcu_send_msg(&dev->mt76,
MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
sizeof(req), true);
}
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
struct mt7915_sku_val {
u8 format_id;
u8 limit_type;
u8 dbdc_idx;
s8 val[MT7915_SKU_RATE_NUM];
} __packed req = {
.format_id = 4,
.dbdc_idx = phy != &dev->phy,
struct mt7915_mcu_txpower_sku req = {
.format_id = TX_POWER_LIMIT_TABLE,
.band_idx = phy->mt76->band_idx,
};
struct mt76_power_limits limits_array;
s8 *la = (s8 *)&limits_array;
int i, idx, n_chains = hweight8(mphy->antenna_mask);
int tx_power = hw->conf.power_level * 2;
int i, idx;
int tx_power;
tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan,
tx_power);
tx_power -= mt76_tx_power_nss_delta(n_chains);
tx_power = mt7915_get_power_bound(phy, hw->conf.power_level);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits_array, tx_power);
mphy->txpower_cur = tx_power;
@ -3085,7 +3224,7 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
}
for (j = 0; j < min_t(u8, mcs_num, len); j++)
req.val[idx + j] = la[j];
req.txpower_sku[idx + j] = la[j];
la += mcs_num;
idx += len;
@ -3103,14 +3242,14 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
struct {
u8 format_id;
u8 category;
u8 band;
u8 band_idx;
u8 _rsv;
} __packed req = {
.format_id = 7,
.format_id = TX_POWER_LIMIT_INFO,
.category = RATE_POWER_INFO,
.band = phy != &dev->phy,
.band_idx = phy->mt76->band_idx,
};
s8 res[MT7915_SKU_RATE_NUM][2];
s8 txpower_sku[MT7915_SKU_RATE_NUM][2];
struct sk_buff *skb;
int ret, i;
@ -3120,9 +3259,9 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
if (ret)
return ret;
memcpy(res, skb->data + 4, sizeof(res));
memcpy(txpower_sku, skb->data + 4, sizeof(txpower_sku));
for (i = 0; i < len; i++)
txpower[i] = res[i][req.band];
txpower[i] = txpower_sku[i][req.band_idx];
dev_kfree_skb(skb);
@ -3157,11 +3296,11 @@ int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
struct mt7915_sku {
u8 format_id;
u8 sku_enable;
u8 dbdc_idx;
u8 band_idx;
u8 rsv;
} __packed req = {
.format_id = 0,
.dbdc_idx = phy != &dev->phy,
.format_id = TX_POWER_LIMIT_ENABLE,
.band_idx = phy->mt76->band_idx,
.sku_enable = enable,
};
@ -3236,31 +3375,193 @@ int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action)
sizeof(req), true);
}
int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bool enable)
static int
mt7915_mcu_enable_obss_spr(struct mt7915_phy *phy, u8 action, u8 val)
{
#define MT_SPR_ENABLE 1
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct {
u8 action;
u8 arg_num;
u8 band_idx;
u8 status;
u8 drop_tx_idx;
u8 sta_idx; /* 256 sta */
u8 rsv[2];
__le32 val;
} __packed req = {
.action = MT_SPR_ENABLE,
.arg_num = 1,
.band_idx = mvif->mt76.band_idx,
.val = cpu_to_le32(enable),
struct mt7915_dev *dev = phy->dev;
struct mt7915_mcu_sr_ctrl req = {
.action = action,
.argnum = 1,
.band_idx = phy->mt76->band_idx,
.val = cpu_to_le32(val),
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
sizeof(req), true);
}
static int
mt7915_mcu_set_obss_spr_pd(struct mt7915_phy *phy,
struct ieee80211_he_obss_pd *he_obss_pd)
{
struct mt7915_dev *dev = phy->dev;
struct {
struct mt7915_mcu_sr_ctrl ctrl;
struct {
u8 pd_th_non_srg;
u8 pd_th_srg;
u8 period_offs;
u8 rcpi_src;
__le16 obss_pd_min;
__le16 obss_pd_min_srg;
u8 resp_txpwr_mode;
u8 txpwr_restrict_mode;
u8 txpwr_ref;
u8 rsv[3];
} __packed param;
} __packed req = {
.ctrl = {
.action = SPR_SET_PARAM,
.argnum = 9,
.band_idx = phy->mt76->band_idx,
},
};
int ret;
u8 max_th = 82, non_srg_max_th = 62;
/* disable firmware dynamical PD asjustment */
ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_DPD, false);
if (ret)
return ret;
if (he_obss_pd->sr_ctrl &
IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED)
req.param.pd_th_non_srg = max_th;
else if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
req.param.pd_th_non_srg = max_th - he_obss_pd->non_srg_max_offset;
else
req.param.pd_th_non_srg = non_srg_max_th;
if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
req.param.pd_th_srg = max_th - he_obss_pd->max_offset;
req.param.obss_pd_min = cpu_to_le16(82);
req.param.obss_pd_min_srg = cpu_to_le16(82);
req.param.txpwr_restrict_mode = 2;
req.param.txpwr_ref = 21;
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
sizeof(req), true);
}
static int
mt7915_mcu_set_obss_spr_siga(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_he_obss_pd *he_obss_pd)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_dev *dev = phy->dev;
u8 omac = mvif->mt76.omac_idx;
struct {
struct mt7915_mcu_sr_ctrl ctrl;
struct {
u8 omac;
u8 rsv[3];
u8 flag[20];
} __packed siga;
} __packed req = {
.ctrl = {
.action = SPR_SET_SIGA,
.argnum = 1,
.band_idx = phy->mt76->band_idx,
},
.siga = {
.omac = omac > HW_BSSID_MAX ? omac - 12 : omac,
},
};
int ret;
if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED)
req.siga.flag[req.siga.omac] = 0xf;
else
return 0;
/* switch to normal AP mode */
ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_MODE, 0);
if (ret)
return ret;
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
sizeof(req), true);
}
static int
mt7915_mcu_set_obss_spr_bitmap(struct mt7915_phy *phy,
struct ieee80211_he_obss_pd *he_obss_pd)
{
struct mt7915_dev *dev = phy->dev;
struct {
struct mt7915_mcu_sr_ctrl ctrl;
struct {
__le32 color_l[2];
__le32 color_h[2];
__le32 bssid_l[2];
__le32 bssid_h[2];
} __packed bitmap;
} __packed req = {
.ctrl = {
.action = SPR_SET_SRG_BITMAP,
.argnum = 4,
.band_idx = phy->mt76->band_idx,
},
};
u32 bitmap;
memcpy(&bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
req.bitmap.color_l[req.ctrl.band_idx] = cpu_to_le32(bitmap);
memcpy(&bitmap, he_obss_pd->bss_color_bitmap + 4, sizeof(bitmap));
req.bitmap.color_h[req.ctrl.band_idx] = cpu_to_le32(bitmap);
memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
req.bitmap.bssid_l[req.ctrl.band_idx] = cpu_to_le32(bitmap);
memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap + 4, sizeof(bitmap));
req.bitmap.bssid_h[req.ctrl.band_idx] = cpu_to_le32(bitmap);
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
sizeof(req), true);
}
int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_he_obss_pd *he_obss_pd)
{
int ret;
/* enable firmware scene detection algorithms */
ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_SD, sr_scene_detect);
if (ret)
return ret;
/* firmware dynamically adjusts PD threshold so skip manual control */
if (sr_scene_detect && !he_obss_pd->enable)
return 0;
/* enable spatial reuse */
ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE, he_obss_pd->enable);
if (ret)
return ret;
if (sr_scene_detect || !he_obss_pd->enable)
return 0;
ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_TX, true);
if (ret)
return ret;
/* set SRG/non-SRG OBSS PD threshold */
ret = mt7915_mcu_set_obss_spr_pd(phy, he_obss_pd);
if (ret)
return ret;
/* Set SR prohibit */
ret = mt7915_mcu_set_obss_spr_siga(phy, vif, he_obss_pd);
if (ret)
return ret;
/* set SRG BSS color/BSSID bitmap */
return mt7915_mcu_set_obss_spr_bitmap(phy, he_obss_pd);
}
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate)
{
@ -3447,8 +3748,8 @@ int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set)
__le32 ofs;
__le32 data;
} __packed req = {
.idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 28))),
.ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(27, 0))),
.idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 24))),
.ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(23, 0))),
.data = set ? cpu_to_le32(*val) : 0,
};
struct sk_buff *skb;

View File

@ -129,6 +129,17 @@ struct mt7915_mcu_background_chain_ctrl {
u8 rsv[2];
} __packed;
struct mt7915_mcu_sr_ctrl {
u8 action;
u8 argnum;
u8 band_idx;
u8 status;
u8 drop_ta_idx;
u8 sta_idx; /* 256 sta */
u8 rsv[2];
__le32 val;
} __packed;
struct mt7915_mcu_eeprom {
u8 buffer_mode;
u8 format;
@ -160,17 +171,26 @@ struct mt7915_mcu_mib {
enum mt7915_chan_mib_offs {
/* mt7915 */
MIB_BUSY_TIME = 14,
MIB_TX_TIME = 81,
MIB_RX_TIME,
MIB_OBSS_AIRTIME = 86,
MIB_NON_WIFI_TIME,
MIB_TXOP_INIT_COUNT,
/* mt7916 */
MIB_BUSY_TIME_V2 = 0,
MIB_TX_TIME_V2 = 6,
MIB_RX_TIME_V2 = 8,
MIB_OBSS_AIRTIME_V2 = 490
MIB_OBSS_AIRTIME_V2 = 490,
MIB_NON_WIFI_TIME_V2
};
struct mt7915_mcu_txpower_sku {
u8 format_id;
u8 limit_type;
u8 band_idx;
s8 txpower_sku[MT7915_SKU_RATE_NUM];
} __packed;
struct edca {
u8 queue;
u8 set;
@ -394,6 +414,7 @@ enum {
RATE_PARAM_FIXED_MCS,
RATE_PARAM_FIXED_GI = 11,
RATE_PARAM_AUTO = 20,
RATE_PARAM_SPE_UPDATE = 22,
};
#define RATE_CFG_MCS GENMASK(3, 0)
@ -405,6 +426,25 @@ enum {
#define RATE_CFG_PHY_TYPE GENMASK(27, 24)
#define RATE_CFG_HE_LTF GENMASK(31, 28)
enum {
TX_POWER_LIMIT_ENABLE,
TX_POWER_LIMIT_TABLE = 0x4,
TX_POWER_LIMIT_INFO = 0x7,
TX_POWER_LIMIT_FRAME = 0x11,
TX_POWER_LIMIT_FRAME_MIN = 0x12,
};
enum {
SPR_ENABLE = 0x1,
SPR_ENABLE_SD = 0x3,
SPR_ENABLE_MODE = 0x5,
SPR_ENABLE_DPD = 0x23,
SPR_ENABLE_TX = 0x25,
SPR_SET_SRG_BITMAP = 0x80,
SPR_SET_PARAM = 0xc2,
SPR_SET_SIGA = 0xdc,
};
enum {
THERMAL_PROTECT_PARAMETER_CTRL,
THERMAL_PROTECT_BASIC_INFO,
@ -447,6 +487,8 @@ enum {
SER_SET_RECOVER_L3_TX_ABORT,
SER_SET_RECOVER_L3_TX_DISABLE,
SER_SET_RECOVER_L3_BF,
SER_SET_RECOVER_FULL,
SER_SET_SYSTEM_ASSERT,
/* action */
SER_ENABLE = 2,
SER_RECOVER
@ -474,4 +516,16 @@ enum {
sizeof(struct bss_info_bcn_cont) + \
sizeof(struct bss_info_inband_discovery))
static inline s8
mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
{
struct mt76_phy *mphy = phy->mt76;
int n_chains = hweight8(mphy->antenna_mask);
txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
txpower -= mt76_tx_power_nss_delta(n_chains);
return txpower;
}
#endif

View File

@ -9,53 +9,112 @@
#include "mt7915.h"
#include "mac.h"
#include "../trace.h"
#include "../dma.h"
static bool wed_enable;
module_param(wed_enable, bool, 0644);
MODULE_PARM_DESC(wed_enable, "Enable Wireless Ethernet Dispatch support");
static const u32 mt7915_reg[] = {
[INT_SOURCE_CSR] = 0xd7010,
[INT_MASK_CSR] = 0xd7014,
[INT1_SOURCE_CSR] = 0xd7088,
[INT1_MASK_CSR] = 0xd708c,
[INT_MCU_CMD_SOURCE] = 0xd51f0,
[INT_MCU_CMD_EVENT] = 0x3108,
[WFDMA0_ADDR] = 0xd4000,
[WFDMA0_PCIE1_ADDR] = 0xd8000,
[WFDMA_EXT_CSR_ADDR] = 0xd7000,
[CBTOP1_PHY_END] = 0x77ffffff,
[INFRA_MCU_ADDR_END] = 0x7c3fffff,
[FW_EXCEPTION_ADDR] = 0x219848,
[SWDEF_BASE_ADDR] = 0x41f200,
[INT_SOURCE_CSR] = 0xd7010,
[INT_MASK_CSR] = 0xd7014,
[INT1_SOURCE_CSR] = 0xd7088,
[INT1_MASK_CSR] = 0xd708c,
[INT_MCU_CMD_SOURCE] = 0xd51f0,
[INT_MCU_CMD_EVENT] = 0x3108,
[WFDMA0_ADDR] = 0xd4000,
[WFDMA0_PCIE1_ADDR] = 0xd8000,
[WFDMA_EXT_CSR_ADDR] = 0xd7000,
[CBTOP1_PHY_END] = 0x77ffffff,
[INFRA_MCU_ADDR_END] = 0x7c3fffff,
[FW_ASSERT_STAT_ADDR] = 0x219848,
[FW_EXCEPT_TYPE_ADDR] = 0x21987c,
[FW_EXCEPT_COUNT_ADDR] = 0x219848,
[FW_CIRQ_COUNT_ADDR] = 0x216f94,
[FW_CIRQ_IDX_ADDR] = 0x216ef8,
[FW_CIRQ_LISR_ADDR] = 0x2170ac,
[FW_TASK_ID_ADDR] = 0x216f90,
[FW_TASK_IDX_ADDR] = 0x216f9c,
[FW_TASK_QID1_ADDR] = 0x219680,
[FW_TASK_QID2_ADDR] = 0x219760,
[FW_TASK_START_ADDR] = 0x219558,
[FW_TASK_END_ADDR] = 0x219554,
[FW_TASK_SIZE_ADDR] = 0x219560,
[FW_LAST_MSG_ID_ADDR] = 0x216f70,
[FW_EINT_INFO_ADDR] = 0x219818,
[FW_SCHED_INFO_ADDR] = 0x219828,
[SWDEF_BASE_ADDR] = 0x41f200,
[TXQ_WED_RING_BASE] = 0xd7300,
[RXQ_WED_RING_BASE] = 0xd7410,
[RXQ_WED_DATA_RING_BASE] = 0xd4500,
};
static const u32 mt7916_reg[] = {
[INT_SOURCE_CSR] = 0xd4200,
[INT_MASK_CSR] = 0xd4204,
[INT1_SOURCE_CSR] = 0xd8200,
[INT1_MASK_CSR] = 0xd8204,
[INT_MCU_CMD_SOURCE] = 0xd41f0,
[INT_MCU_CMD_EVENT] = 0x2108,
[WFDMA0_ADDR] = 0xd4000,
[WFDMA0_PCIE1_ADDR] = 0xd8000,
[WFDMA_EXT_CSR_ADDR] = 0xd7000,
[CBTOP1_PHY_END] = 0x7fffffff,
[INFRA_MCU_ADDR_END] = 0x7c085fff,
[FW_EXCEPTION_ADDR] = 0x022050bc,
[SWDEF_BASE_ADDR] = 0x411400,
[INT_SOURCE_CSR] = 0xd4200,
[INT_MASK_CSR] = 0xd4204,
[INT1_SOURCE_CSR] = 0xd8200,
[INT1_MASK_CSR] = 0xd8204,
[INT_MCU_CMD_SOURCE] = 0xd41f0,
[INT_MCU_CMD_EVENT] = 0x2108,
[WFDMA0_ADDR] = 0xd4000,
[WFDMA0_PCIE1_ADDR] = 0xd8000,
[WFDMA_EXT_CSR_ADDR] = 0xd7000,
[CBTOP1_PHY_END] = 0x7fffffff,
[INFRA_MCU_ADDR_END] = 0x7c085fff,
[FW_ASSERT_STAT_ADDR] = 0x02204c14,
[FW_EXCEPT_TYPE_ADDR] = 0x022051a4,
[FW_EXCEPT_COUNT_ADDR] = 0x022050bc,
[FW_CIRQ_COUNT_ADDR] = 0x022001ac,
[FW_CIRQ_IDX_ADDR] = 0x02204f84,
[FW_CIRQ_LISR_ADDR] = 0x022050d0,
[FW_TASK_ID_ADDR] = 0x0220406c,
[FW_TASK_IDX_ADDR] = 0x0220500c,
[FW_TASK_QID1_ADDR] = 0x022028c8,
[FW_TASK_QID2_ADDR] = 0x02202a38,
[FW_TASK_START_ADDR] = 0x0220286c,
[FW_TASK_END_ADDR] = 0x02202870,
[FW_TASK_SIZE_ADDR] = 0x02202878,
[FW_LAST_MSG_ID_ADDR] = 0x02204fe8,
[FW_EINT_INFO_ADDR] = 0x0220525c,
[FW_SCHED_INFO_ADDR] = 0x0220516c,
[SWDEF_BASE_ADDR] = 0x411400,
[TXQ_WED_RING_BASE] = 0xd7300,
[RXQ_WED_RING_BASE] = 0xd7410,
[RXQ_WED_DATA_RING_BASE] = 0xd4540,
};
static const u32 mt7986_reg[] = {
[INT_SOURCE_CSR] = 0x24200,
[INT_MASK_CSR] = 0x24204,
[INT1_SOURCE_CSR] = 0x28200,
[INT1_MASK_CSR] = 0x28204,
[INT_MCU_CMD_SOURCE] = 0x241f0,
[INT_MCU_CMD_EVENT] = 0x54000108,
[WFDMA0_ADDR] = 0x24000,
[WFDMA0_PCIE1_ADDR] = 0x28000,
[WFDMA_EXT_CSR_ADDR] = 0x27000,
[CBTOP1_PHY_END] = 0x7fffffff,
[INFRA_MCU_ADDR_END] = 0x7c085fff,
[FW_EXCEPTION_ADDR] = 0x02204ffc,
[SWDEF_BASE_ADDR] = 0x411400,
[INT_SOURCE_CSR] = 0x24200,
[INT_MASK_CSR] = 0x24204,
[INT1_SOURCE_CSR] = 0x28200,
[INT1_MASK_CSR] = 0x28204,
[INT_MCU_CMD_SOURCE] = 0x241f0,
[INT_MCU_CMD_EVENT] = 0x54000108,
[WFDMA0_ADDR] = 0x24000,
[WFDMA0_PCIE1_ADDR] = 0x28000,
[WFDMA_EXT_CSR_ADDR] = 0x27000,
[CBTOP1_PHY_END] = 0x7fffffff,
[INFRA_MCU_ADDR_END] = 0x7c085fff,
[FW_ASSERT_STAT_ADDR] = 0x02204b54,
[FW_EXCEPT_TYPE_ADDR] = 0x022050dc,
[FW_EXCEPT_COUNT_ADDR] = 0x02204ffc,
[FW_CIRQ_COUNT_ADDR] = 0x022001ac,
[FW_CIRQ_IDX_ADDR] = 0x02204ec4,
[FW_CIRQ_LISR_ADDR] = 0x02205010,
[FW_TASK_ID_ADDR] = 0x02204fac,
[FW_TASK_IDX_ADDR] = 0x02204f4c,
[FW_TASK_QID1_ADDR] = 0x02202814,
[FW_TASK_QID2_ADDR] = 0x02202984,
[FW_TASK_START_ADDR] = 0x022027b8,
[FW_TASK_END_ADDR] = 0x022027bc,
[FW_TASK_SIZE_ADDR] = 0x022027c4,
[FW_LAST_MSG_ID_ADDR] = 0x02204f28,
[FW_EINT_INFO_ADDR] = 0x02205194,
[FW_SCHED_INFO_ADDR] = 0x022051a4,
[SWDEF_BASE_ADDR] = 0x411400,
[TXQ_WED_RING_BASE] = 0x24420,
[RXQ_WED_RING_BASE] = 0x24520,
[RXQ_WED_DATA_RING_BASE] = 0x24540,
};
static const u32 mt7915_offs[] = {
@ -448,6 +507,14 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
return mt7915_reg_map_l2(dev, addr);
}
void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
size_t len)
{
u32 addr = __mt7915_reg_addr(dev, offset);
memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
}
static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
@ -472,6 +539,257 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
struct mt7915_phy *phy;
int ret;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = wed->wlan.token_start;
spin_unlock_bh(&dev->mt76.token_lock);
ret = wait_event_timeout(dev->mt76.tx_wait,
!dev->mt76.wed_token_count, HZ);
if (!ret)
return -EAGAIN;
phy = &dev->phy;
mt76_set(dev, MT_AGG_ACR4(phy->mt76->band_idx), MT_AGG_ACR_PPDU_TXS2H);
phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (phy)
mt76_set(dev, MT_AGG_ACR4(phy->mt76->band_idx),
MT_AGG_ACR_PPDU_TXS2H);
return 0;
}
static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
struct mt7915_phy *phy;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = MT7915_TOKEN_SIZE;
spin_unlock_bh(&dev->mt76.token_lock);
/* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
* MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
*/
phy = &dev->phy;
mt76_clear(dev, MT_AGG_ACR4(phy->mt76->band_idx), MT_AGG_ACR_PPDU_TXS2H);
phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (phy)
mt76_clear(dev, MT_AGG_ACR4(phy->mt76->band_idx),
MT_AGG_ACR_PPDU_TXS2H);
}
static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
struct page *page;
int i;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
for (i = 0; i < dev->mt76.rx_token_size; i++) {
struct mt76_txwi_cache *t;
t = mt76_rx_token_release(&dev->mt76, i);
if (!t || !t->ptr)
continue;
dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
wed->wlan.rx_size, DMA_FROM_DEVICE);
skb_free_frag(t->ptr);
t->ptr = NULL;
mt76_put_rxwi(&dev->mt76, t);
}
if (!wed->rx_buf_ring.rx_page.va)
return;
page = virt_to_page(wed->rx_buf_ring.rx_page.va);
__page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
}
static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
struct mt7915_dev *dev;
u32 length;
int i;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
sizeof(struct skb_shared_info));
for (i = 0; i < size; i++) {
struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
dma_addr_t phy_addr;
int token;
void *ptr;
ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length,
GFP_KERNEL);
if (!ptr)
goto unmap;
phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
wed->wlan.rx_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
skb_free_frag(ptr);
goto unmap;
}
desc->buf0 = cpu_to_le32(phy_addr);
token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
token));
desc++;
}
return 0;
unmap:
mt7915_mmio_wed_release_rx_buf(wed);
return -ENOMEM;
}
static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed,
struct mtk_wed_wo_rx_stats *stats)
{
int idx = le16_to_cpu(stats->wlan_idx);
struct mt7915_dev *dev;
struct mt76_wcid *wcid;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
if (idx >= mt7915_wtbl_size(dev))
return;
rcu_read_lock();
wcid = rcu_dereference(dev->mt76.wcid[idx]);
if (wcid) {
wcid->stats.rx_bytes += le32_to_cpu(stats->rx_byte_cnt);
wcid->stats.rx_packets += le32_to_cpu(stats->rx_pkt_cnt);
wcid->stats.rx_errors += le32_to_cpu(stats->rx_err_cnt);
wcid->stats.rx_drops += le32_to_cpu(stats->rx_drop_cnt);
}
rcu_read_unlock();
}
#endif
int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
bool pci, int *irq)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
int ret;
if (!wed_enable)
return 0;
if (pci) {
struct pci_dev *pci_dev = pdev_ptr;
wed->wlan.pci_dev = pci_dev;
wed->wlan.bus_type = MTK_WED_BUS_PCIE;
wed->wlan.base = devm_ioremap(dev->mt76.dev,
pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) +
MT_INT_WED_SOURCE_CSR;
wed->wlan.wpdma_mask = pci_resource_start(pci_dev, 0) +
MT_INT_WED_MASK_CSR;
wed->wlan.wpdma_phys = pci_resource_start(pci_dev, 0) +
MT_WFDMA_EXT_CSR_BASE;
wed->wlan.wpdma_tx = pci_resource_start(pci_dev, 0) +
MT_TXQ_WED_RING_BASE;
wed->wlan.wpdma_txfree = pci_resource_start(pci_dev, 0) +
MT_RXQ_WED_RING_BASE;
wed->wlan.wpdma_rx_glo = pci_resource_start(pci_dev, 0) +
MT_WPDMA_GLO_CFG;
wed->wlan.wpdma_rx = pci_resource_start(pci_dev, 0) +
MT_RXQ_WED_DATA_RING_BASE;
} else {
struct platform_device *plat_dev = pdev_ptr;
struct resource *res;
res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
if (!res)
return -ENOMEM;
wed->wlan.platform_dev = plat_dev;
wed->wlan.bus_type = MTK_WED_BUS_AXI;
wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start,
resource_size(res));
wed->wlan.phy_base = res->start;
wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR;
wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR;
wed->wlan.wpdma_tx = res->start + MT_TXQ_WED_RING_BASE;
wed->wlan.wpdma_txfree = res->start + MT_RXQ_WED_RING_BASE;
wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
}
wed->wlan.nbuf = 4096;
wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
wed->wlan.tx_tbit[1] = is_mt7915(&dev->mt76) ? 5 : 31;
wed->wlan.txfree_tbit = is_mt7986(&dev->mt76) ? 2 : 1;
wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
wed->wlan.wcid_512 = !is_mt7915(&dev->mt76);
wed->wlan.rx_nbuf = 65536;
wed->wlan.rx_npkt = MT7915_WED_RX_TOKEN_SIZE;
wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
if (is_mt7915(&dev->mt76)) {
wed->wlan.rx_tbit[0] = 16;
wed->wlan.rx_tbit[1] = 17;
} else if (is_mt7986(&dev->mt76)) {
wed->wlan.rx_tbit[0] = 22;
wed->wlan.rx_tbit[1] = 23;
} else {
wed->wlan.rx_tbit[0] = 18;
wed->wlan.rx_tbit[1] = 19;
}
wed->wlan.init_buf = mt7915_wed_init_buf;
wed->wlan.offload_enable = mt7915_mmio_wed_offload_enable;
wed->wlan.offload_disable = mt7915_mmio_wed_offload_disable;
wed->wlan.init_rx_buf = mt7915_mmio_wed_init_rx_buf;
wed->wlan.release_rx_buf = mt7915_mmio_wed_release_rx_buf;
wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
dev->mt76.rx_token_size = wed->wlan.rx_npkt;
if (mtk_wed_device_attach(wed))
return 0;
*irq = wed->irq;
dev->mt76.dma_dev = wed->dev;
ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
return 1;
#else
return 0;
#endif
}
static int mt7915_mmio_init(struct mt76_dev *mdev,
void __iomem *mem_base,
u32 device_id)
@ -536,7 +854,11 @@ void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
mdev->mmio.irqmask |= set;
if (write_reg) {
mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
if (mtk_wed_device_active(&mdev->mmio.wed))
mtk_wed_device_irq_set_mask(&mdev->mmio.wed,
mdev->mmio.irqmask);
else
mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
}
@ -560,6 +882,8 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
if (mtk_wed_device_active(wed)) {
mtk_wed_device_irq_set_mask(wed, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
} else {
mt76_wr(dev, MT_INT_MASK_CSR, 0);
@ -613,10 +937,9 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
u32 val = mt76_rr(dev, MT_MCU_CMD);
mt76_wr(dev, MT_MCU_CMD, val);
if (val & MT_MCU_CMD_ERROR_MASK) {
dev->reset_state = val;
queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
if (val & (MT_MCU_CMD_ERROR_MASK | MT_MCU_CMD_WDT_MASK)) {
dev->recovery.state = val;
mt7915_reset(dev);
}
}
}
@ -648,7 +971,8 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
MT_DRV_AMSDU_OFFLOAD,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,

View File

@ -68,6 +68,8 @@
#define MT7915_MIN_TWT_DUR 64
#define MT7915_MAX_QUEUE (MT_RXQ_BAND2 + __MT_MCUQ_MAX + 2)
#define MT7915_WED_RX_TOKEN_SIZE 12288
struct mt7915_vif;
struct mt7915_sta;
struct mt7915_dfs_pulse;
@ -114,6 +116,8 @@ struct mt7915_twt_flow {
u8 sched:1;
};
DECLARE_EWMA(avg_signal, 10, 8)
struct mt7915_sta {
struct mt76_wcid wcid; /* must be first */
@ -123,10 +127,12 @@ struct mt7915_sta {
struct list_head rc_list;
u32 airtime_ac[8];
int ack_signal;
struct ewma_avg_signal avg_ack_signal;
unsigned long changed;
unsigned long jiffies;
unsigned long ampdu_state;
struct mt76_connac_sta_key_conf bip;
struct {
@ -220,6 +226,15 @@ struct mib_stats {
u32 tx_amsdu_cnt;
};
/* crash-dump */
struct mt7915_crash_data {
guid_t guid;
struct timespec64 timestamp;
u8 *memdump_buf;
size_t memdump_buf_len;
};
struct mt7915_hif {
struct list_head list;
@ -243,7 +258,6 @@ struct mt7915_phy {
u32 rxfilter;
u64 omac_mask;
u8 band_idx;
u16 noise;
@ -301,9 +315,26 @@ struct mt7915_dev {
struct work_struct init_work;
struct work_struct rc_work;
struct work_struct dump_work;
struct work_struct reset_work;
wait_queue_head_t reset_wait;
u32 reset_state;
struct {
u32 state;
u32 wa_reset_count;
u32 wm_reset_count;
bool hw_full_reset:1;
bool hw_init_done:1;
bool restart:1;
} recovery;
/* protects coredump data */
struct mutex dump_mutex;
#ifdef CONFIG_DEV_COREDUMP
struct {
struct mt7915_crash_data *crash_data;
} coredump;
#endif
struct list_head sta_rc_list;
struct list_head sta_poll_list;
@ -357,6 +388,7 @@ enum mt7915_rdd_cmd {
RDD_DET_MODE,
RDD_RADAR_EMULATE,
RDD_START_TXQ = 20,
RDD_SET_WF_ANT = 30,
RDD_CAC_START = 50,
RDD_CAC_END,
RDD_NORMAL_START,
@ -442,7 +474,14 @@ s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
int mt7915_dma_reset(struct mt7915_dev *dev, bool force);
int mt7915_txbf_init(struct mt7915_dev *dev);
void mt7915_init_txpower(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband);
void mt7915_reset(struct mt7915_dev *dev);
int mt7915_run(struct ieee80211_hw *hw);
int mt7915_mcu_init(struct mt7915_dev *dev);
int mt7915_mcu_init_firmware(struct mt7915_dev *dev);
int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
struct mt7915_vif *mvif,
struct mt7915_twt_flow *flow,
@ -463,8 +502,8 @@ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vi
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int enable, u32 changed);
int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_he_obss_pd *he_obss_pd);
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@ -488,6 +527,10 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower);
int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, s8 txpower);
int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action);
int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
@ -542,11 +585,17 @@ static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
size_t len);
void mt7915_mac_init(struct mt7915_dev *dev);
u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw);
bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
struct ieee80211_vif *vif, bool enable);
void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key,
@ -558,6 +607,7 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
void mt7915_mac_dump_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
void mt7915_mac_update_stats(struct mt7915_phy *phy);
void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
@ -572,7 +622,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7915_tx_token_put(struct mt7915_dev *dev);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
struct sk_buff *skb, u32 *info);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7915_stats_work(struct work_struct *work);
@ -583,6 +633,7 @@ void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
void mt7915_update_channel(struct mt76_phy *mphy);
int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev);
int mt7915_init_debugfs(struct mt7915_phy *phy);
void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
@ -590,5 +641,7 @@ bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
#endif
int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
bool pci, int *irq);
#endif

View File

@ -12,9 +12,6 @@
#include "mac.h"
#include "../trace.h"
static bool wed_enable = false;
module_param(wed_enable, bool, 0644);
static LIST_HEAD(hif_list);
static DEFINE_SPINLOCK(hif_lock);
static u32 hif_idx;
@ -65,10 +62,17 @@ static void mt7915_put_hif2(struct mt7915_hif *hif)
static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
{
struct pci_dev *tmp_pdev;
hif_idx++;
if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
return NULL;
tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL);
if (!tmp_pdev) {
tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL);
if (!tmp_pdev)
return NULL;
}
pci_dev_put(tmp_pdev);
writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
@ -95,94 +99,6 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
return 0;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
struct mt7915_phy *phy;
int ret;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = wed->wlan.token_start;
spin_unlock_bh(&dev->mt76.token_lock);
ret = wait_event_timeout(dev->mt76.tx_wait,
!dev->mt76.wed_token_count, HZ);
if (!ret)
return -EAGAIN;
phy = &dev->phy;
mt76_set(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (phy)
mt76_set(dev, MT_AGG_ACR4(phy->band_idx),
MT_AGG_ACR_PPDU_TXS2H);
return 0;
}
static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
struct mt7915_phy *phy;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = MT7915_TOKEN_SIZE;
spin_unlock_bh(&dev->mt76.token_lock);
/* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
* MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
*/
phy = &dev->phy;
mt76_clear(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (phy)
mt76_clear(dev, MT_AGG_ACR4(phy->band_idx),
MT_AGG_ACR_PPDU_TXS2H);
}
#endif
static int
mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
int ret;
if (!wed_enable)
return 0;
wed->wlan.pci_dev = pdev;
wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
MT_WFDMA_EXT_CSR_BASE;
wed->wlan.nbuf = 4096;
wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
wed->wlan.init_buf = mt7915_wed_init_buf;
wed->wlan.offload_enable = mt7915_wed_offload_enable;
wed->wlan.offload_disable = mt7915_wed_offload_disable;
if (mtk_wed_device_attach(wed) != 0)
return 0;
*irq = wed->irq;
dev->mt76.dma_dev = wed->dev;
ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
return 1;
#else
return 0;
#endif
}
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@ -220,7 +136,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
mt7915_wfsys_reset(dev);
hif2 = mt7915_pci_init_hif2(pdev);
ret = mt7915_pci_wed_init(dev, pdev, &irq);
ret = mt7915_mmio_wed_init(dev, pdev, true, &irq);
if (ret < 0)
goto free_wed_or_irq_vector;

View File

@ -24,8 +24,26 @@ enum reg_rev {
WFDMA_EXT_CSR_ADDR,
CBTOP1_PHY_END,
INFRA_MCU_ADDR_END,
FW_EXCEPTION_ADDR,
FW_ASSERT_STAT_ADDR,
FW_EXCEPT_TYPE_ADDR,
FW_EXCEPT_COUNT_ADDR,
FW_CIRQ_COUNT_ADDR,
FW_CIRQ_IDX_ADDR,
FW_CIRQ_LISR_ADDR,
FW_TASK_ID_ADDR,
FW_TASK_IDX_ADDR,
FW_TASK_QID1_ADDR,
FW_TASK_QID2_ADDR,
FW_TASK_START_ADDR,
FW_TASK_END_ADDR,
FW_TASK_SIZE_ADDR,
FW_LAST_MSG_ID_ADDR,
FW_EINT_INFO_ADDR,
FW_SCHED_INFO_ADDR,
SWDEF_BASE_ADDR,
TXQ_WED_RING_BASE,
RXQ_WED_RING_BASE,
RXQ_WED_DATA_RING_BASE,
__MT_REG_MAX,
};
@ -224,6 +242,14 @@ enum offs_rev {
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
/* WTBLOFF TOP: band 0(0x820e9000),band 1(0x820f9000) */
#define MT_WTBLOFF_TOP_BASE(_band) ((_band) ? 0x820f9000 : 0x820e9000)
#define MT_WTBLOFF_TOP(_band, ofs) (MT_WTBLOFF_TOP_BASE(_band) + (ofs))
#define MT_WTBLOFF_TOP_RSCR(_band) MT_WTBLOFF_TOP(_band, 0x008)
#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30)
#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24)
/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
@ -523,8 +549,22 @@ enum offs_rev {
#define MT_WF_RFCR1_DROP_CFEND BIT(7)
#define MT_WF_RFCR1_DROP_CFACK BIT(8)
#define MT_WF_RMAC_RSVD0(_band) MT_WF_RMAC(_band, 0x02e0)
#define MT_WF_RMAC_RSVD0_EIFS_CLR BIT(21)
#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
#define MT_WF_RMAC_MIB_OBSS_BACKOFF GENMASK(15, 0)
#define MT_WF_RMAC_MIB_ED_OFFSET GENMASK(20, 16)
#define MT_WF_RMAC_MIB_AIRTIME1(_band) MT_WF_RMAC(_band, 0x0384)
#define MT_WF_RMAC_MIB_NONQOSD_BACKOFF GENMASK(31, 16)
#define MT_WF_RMAC_MIB_AIRTIME3(_band) MT_WF_RMAC(_band, 0x038c)
#define MT_WF_RMAC_MIB_QOS01_BACKOFF GENMASK(31, 0)
#define MT_WF_RMAC_MIB_AIRTIME4(_band) MT_WF_RMAC(_band, 0x0390)
#define MT_WF_RMAC_MIB_QOS23_BACKOFF GENMASK(31, 0)
/* WFDMA0 */
#define MT_WFDMA0_BASE __REG(WFDMA0_ADDR)
@ -539,6 +579,8 @@ enum offs_rev {
#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
#define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
@ -547,9 +589,14 @@ enum offs_rev {
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_EXT0_CFG MT_WFDMA0(0x2b0)
#define MT_WFDMA0_EXT0_RXWB_KEEP BIT(10)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
#define MT_WPDMA_GLO_CFG MT_WFDMA0(0x208)
/* WFDMA1 */
#define MT_WFDMA1_BASE 0xd5000
@ -596,6 +643,7 @@ enum offs_rev {
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
#define MT_INT_WED_SOURCE_CSR MT_WFDMA_EXT_CSR(0x200)
#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
@ -642,6 +690,10 @@ enum offs_rev {
#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
MT_TXQ_ID(q)* 0x4)
#define MT_TXQ_WED_RING_BASE __REG(TXQ_WED_RING_BASE)
#define MT_RXQ_WED_RING_BASE __REG(RXQ_WED_RING_BASE)
#define MT_RXQ_WED_DATA_RING_BASE __REG(RXQ_WED_DATA_RING_BASE)
#define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
#define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
@ -660,6 +712,11 @@ enum offs_rev {
#define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
#define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
#define MT_INT_WED_RX_DONE_BAND0_MT7916 BIT(18)
#define MT_INT_WED_RX_DONE_BAND1_MT7916 BIT(19)
#define MT_INT_WED_RX_DONE_WA_MAIN_MT7916 BIT(1)
#define MT_INT_WED_RX_DONE_WA_MT7916 BIT(17)
#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
@ -683,6 +740,8 @@ enum offs_rev {
#define MT_INT_TX_DONE_BAND0 BIT(30)
#define MT_INT_TX_DONE_BAND1 BIT(31)
#define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
#define MT_INT_WED_TX_DONE_BAND0 BIT(4)
#define MT_INT_WED_TX_DONE_BAND1 BIT(5)
#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
MT_INT_TX_MCU(MT_MCUQ_WM) | \
@ -696,6 +755,10 @@ enum offs_rev {
#define MT_MCU_CMD_NORMAL_STATE BIT(5)
#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
#define MT_MCU_CMD_WA_WDT BIT(31)
#define MT_MCU_CMD_WM_WDT BIT(30)
#define MT_MCU_CMD_WDT_MASK GENMASK(31, 30)
/* TOP RGU */
#define MT_TOP_RGU_BASE 0x18000000
#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
@ -938,7 +1001,22 @@ enum offs_rev {
#define MT_ADIE_TYPE_MASK BIT(1)
/* FW MODE SYNC */
#define MT_FW_EXCEPTION __REG(FW_EXCEPTION_ADDR)
#define MT_FW_ASSERT_STAT __REG(FW_ASSERT_STAT_ADDR)
#define MT_FW_EXCEPT_TYPE __REG(FW_EXCEPT_TYPE_ADDR)
#define MT_FW_EXCEPT_COUNT __REG(FW_EXCEPT_COUNT_ADDR)
#define MT_FW_CIRQ_COUNT __REG(FW_CIRQ_COUNT_ADDR)
#define MT_FW_CIRQ_IDX __REG(FW_CIRQ_IDX_ADDR)
#define MT_FW_CIRQ_LISR __REG(FW_CIRQ_LISR_ADDR)
#define MT_FW_TASK_ID __REG(FW_TASK_ID_ADDR)
#define MT_FW_TASK_IDX __REG(FW_TASK_IDX_ADDR)
#define MT_FW_TASK_QID1 __REG(FW_TASK_QID1_ADDR)
#define MT_FW_TASK_QID2 __REG(FW_TASK_QID2_ADDR)
#define MT_FW_TASK_START __REG(FW_TASK_START_ADDR)
#define MT_FW_TASK_END __REG(FW_TASK_END_ADDR)
#define MT_FW_TASK_SIZE __REG(FW_TASK_SIZE_ADDR)
#define MT_FW_LAST_MSG_ID __REG(FW_LAST_MSG_ID_ADDR)
#define MT_FW_EINT_INFO __REG(FW_EINT_INFO_ADDR)
#define MT_FW_SCHED_INFO __REG(FW_SCHED_INFO_ADDR)
#define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR)
@ -1108,9 +1186,15 @@ enum offs_rev {
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
#define MT_WF_PHY_TPC_CTRL_STAT(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 16))
#define MT_WF_PHY_TPC_CTRL_STAT_MT7916(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 20))
#define MT_WF_PHY_TPC_POWER GENMASK(15, 8)
#define MT_MCU_WM_CIRQ_BASE 0x89010000
#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
#define MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x80)
#define MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR MT_MCU_WM_CIRQ(0xc0)
#define MT_MCU_WM_CIRQ_EINT_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x108)
#define MT_MCU_WM_CIRQ_EINT_SOFT_ADDR MT_MCU_WM_CIRQ(0x118)
#endif

View File

@ -1172,10 +1172,6 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
mem_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem_base)) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
@ -1187,6 +1183,18 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
return PTR_ERR(dev);
mdev = &dev->mt76;
ret = mt7915_mmio_wed_init(dev, pdev, false, &irq);
if (ret < 0)
goto free_device;
if (!ret) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto free_device;
}
}
ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
@ -1206,9 +1214,10 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
free_irq:
devm_free_irq(mdev->dev, irq, dev);
free_device:
mt76_free_device(&dev->mt76);
if (mtk_wed_device_active(&mdev->mmio.wed))
mtk_wed_device_detach(&mdev->mmio.wed);
mt76_free_device(mdev);
return ret;
}

View File

@ -44,14 +44,14 @@ mt7915_tm_set_tx_power(struct mt7915_phy *phy)
int ret;
struct {
u8 format_id;
u8 dbdc_idx;
u8 band_idx;
s8 tx_power;
u8 ant_idx; /* Only 0 is valid */
u8 center_chan;
u8 rsv[3];
} __packed req = {
.format_id = 0xf,
.dbdc_idx = phy != &dev->phy,
.band_idx = phy->mt76->band_idx,
.center_chan = ieee80211_frequency_to_channel(freq),
};
u8 *tx_power = NULL;
@ -77,7 +77,7 @@ mt7915_tm_set_freq_offset(struct mt7915_phy *phy, bool en, u32 val)
struct mt7915_tm_cmd req = {
.testmode_en = en,
.param_idx = MCU_ATE_SET_FREQ_OFFSET,
.param.freq.band = phy != &dev->phy,
.param.freq.band = phy->mt76->band_idx,
.param.freq.freq_offset = cpu_to_le32(val),
};
@ -111,7 +111,7 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
.param_idx = MCU_ATE_SET_TRX,
.param.trx.type = type,
.param.trx.enable = en,
.param.trx.band = phy != &dev->phy,
.param.trx.band = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
@ -126,7 +126,7 @@ mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
.testmode_en = 1,
.param_idx = MCU_ATE_CLEAN_TXQUEUE,
.param.clean.wcid = wcid,
.param.clean.band = phy != &dev->phy,
.param.clean.band = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
@ -144,7 +144,7 @@ mt7915_tm_set_slot_time(struct mt7915_phy *phy, u8 slot_time, u8 sifs)
.param.slot.sifs = sifs,
.param.slot.rifs = 2,
.param.slot.eifs = cpu_to_le16(60),
.param.slot.band = phy != &dev->phy,
.param.slot.band = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
@ -198,6 +198,7 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
u8 sig_ext = (mode == MT76_TM_TX_MODE_CCK) ? 0 : 6;
u8 slot_time = 9, sifs = TM_DEFAULT_SIFS;
u8 aifsn = TM_MIN_AIFSN;
u8 band = phy->mt76->band_idx;
u32 i2t_time, tr2t_time, txv_time;
u16 cw = 0;
@ -232,14 +233,14 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
sifs = min_t(u32, ipg, TM_MAX_SIFS);
}
done:
txv_time = mt76_get_field(dev, MT_TMAC_ATCR(phy->band_idx),
txv_time = mt76_get_field(dev, MT_TMAC_ATCR(band),
MT_TMAC_ATCR_TXV_TOUT);
txv_time *= 50; /* normal clock time */
i2t_time = (slot_time * 1000 - txv_time - BBP_PROC_TIME) / 50;
tr2t_time = (sifs * 1000 - txv_time - BBP_PROC_TIME) / 50;
mt76_set(dev, MT_TMAC_TRCR0(phy->band_idx),
mt76_set(dev, MT_TMAC_TRCR0(band),
FIELD_PREP(MT_TMAC_TRCR0_TR2T_CHK, tr2t_time) |
FIELD_PREP(MT_TMAC_TRCR0_I2T_CHK, i2t_time));
@ -336,6 +337,7 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
int n_regs = ARRAY_SIZE(reg_backup_list);
struct mt7915_dev *dev = phy->dev;
u32 *b = phy->test.reg_backup;
u8 band = phy->mt76->band_idx;
int i;
REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
@ -358,7 +360,7 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
mt76_wr(dev, reg_backup_list[i].band[phy->band_idx], b[i]);
mt76_wr(dev, reg_backup_list[i].band[band], b[i]);
return;
}
@ -369,33 +371,33 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
phy->test.reg_backup = b;
for (i = 0; i < n_regs; i++)
b[i] = mt76_rr(dev, reg_backup_list[i].band[phy->band_idx]);
b[i] = mt76_rr(dev, reg_backup_list[i].band[band]);
}
mt76_clear(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_MM_PROT |
mt76_clear(dev, MT_AGG_PCR0(band, 0), MT_AGG_PCR0_MM_PROT |
MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT |
MT_AGG_PCR0_VHT_PROT | MT_AGG_PCR0_BW20_PROT |
MT_AGG_PCR0_BW40_PROT | MT_AGG_PCR0_BW80_PROT);
mt76_set(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_PTA_WIN_DIS);
mt76_set(dev, MT_AGG_PCR0(band, 0), MT_AGG_PCR0_PTA_WIN_DIS);
mt76_wr(dev, MT_AGG_PCR0(phy->band_idx, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
mt76_wr(dev, MT_AGG_PCR0(band, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
MT_AGG_PCR1_RTS0_LEN_THRES);
mt76_clear(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_BAR_CNT_LIMIT |
mt76_clear(dev, MT_AGG_MRCR(band), MT_AGG_MRCR_BAR_CNT_LIMIT |
MT_AGG_MRCR_LAST_RTS_CTS_RN | MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT);
mt76_rmw(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_RTS_FAIL_LIMIT |
mt76_rmw(dev, MT_AGG_MRCR(band), MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT,
FIELD_PREP(MT_AGG_MRCR_RTS_FAIL_LIMIT, 1) |
FIELD_PREP(MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, 1));
mt76_wr(dev, MT_TMAC_TFCR0(phy->band_idx), 0);
mt76_clear(dev, MT_TMAC_TCR0(phy->band_idx), MT_TMAC_TCR0_TBTT_STOP_CTRL);
mt76_wr(dev, MT_TMAC_TFCR0(band), 0);
mt76_clear(dev, MT_TMAC_TCR0(band), MT_TMAC_TCR0_TBTT_STOP_CTRL);
/* config rx filter for testmode rx */
mt76_wr(dev, MT_WF_RFCR(phy->band_idx), 0xcf70a);
mt76_wr(dev, MT_WF_RFCR1(phy->band_idx), 0);
mt76_wr(dev, MT_WF_RFCR(band), 0xcf70a);
mt76_wr(dev, MT_WF_RFCR1(band), 0);
}
static void
@ -432,8 +434,6 @@ mt7915_tm_update_channel(struct mt7915_phy *phy)
static void
mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
{
static const u8 spe_idx_map[] = {0, 0, 1, 0, 3, 2, 4, 0,
9, 8, 6, 10, 16, 12, 18, 0};
struct mt76_testmode_data *td = &phy->mt76->test;
struct mt7915_dev *dev = phy->dev;
struct ieee80211_tx_info *info;
@ -447,15 +447,10 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
if (en) {
mt7915_tm_update_channel(phy);
if (td->tx_spe_idx) {
if (td->tx_spe_idx)
phy->test.spe_idx = td->tx_spe_idx;
} else {
u8 tx_ant = td->tx_antenna_mask;
if (phy != &dev->phy)
tx_ant >>= dev->chainshift;
phy->test.spe_idx = spe_idx_map[tx_ant];
}
else
phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
}
mt7915_tm_set_tam_arb(phy, en,
@ -495,7 +490,7 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
mt7915_tm_update_channel(phy);
/* read-clear */
mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy));
mt76_rr(dev, MT_MIB_SDR3(phy->mt76->band_idx));
mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
}
}
@ -522,6 +517,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
struct mt76_testmode_data *td = &phy->mt76->test;
u32 func_idx = en ? TX_CONT_START : TX_CONT_STOP;
u8 rate_idx = td->tx_rate_idx, mode;
u8 band = phy->mt76->band_idx;
u16 rateval;
struct mt7915_tm_rf_test req = {
.action = 1,
@ -533,7 +529,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
tx_cont->control_ch = chandef->chan->hw_value;
tx_cont->center_ch = freq1;
tx_cont->tx_ant = td->tx_antenna_mask;
tx_cont->band = phy != &dev->phy;
tx_cont->band = band;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
@ -565,7 +561,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
}
if (!en) {
req.op.rf.param.func_data = cpu_to_le32(phy != &dev->phy);
req.op.rf.param.func_data = cpu_to_le32(band);
goto out;
}
@ -696,7 +692,9 @@ mt7915_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
{
struct mt76_testmode_data *td = &mphy->test;
struct mt7915_phy *phy = mphy->priv;
u32 changed = 0;
struct mt7915_dev *dev = phy->dev;
u32 chainmask = mphy->chainmask, changed = 0;
bool ext_phy = phy != &dev->phy;
int i;
BUILD_BUG_ON(NUM_TM_CHANGED >= 32);
@ -705,7 +703,8 @@ mt7915_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
td->state == MT76_TM_STATE_OFF)
return 0;
if (td->tx_antenna_mask & ~mphy->chainmask)
chainmask = ext_phy ? chainmask >> dev->chainshift : chainmask;
if (td->tx_antenna_mask > chainmask)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
@ -771,11 +770,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
cnt = mt76_rr(dev, MT_MIB_SDR3(phy->mt76->band_idx));
fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
q = phy->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
q = phy->mt76->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;

View File

@ -85,7 +85,7 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
seq_puts(file, "\nCount: ");
for (i = 0; i < ARRAY_SIZE(bound); i++)
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]);
seq_puts(file, "\n");
seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);

View File

@ -2,6 +2,7 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include "mt7921.h"
#include "mac.h"
#include "mcu.h"
@ -25,6 +26,27 @@ static const struct ieee80211_iface_combination if_comb[] = {
.max_interfaces = MT7921_MAX_INTERFACES,
.num_different_channels = 1,
.beacon_int_infra_match = true,
},
};
static const struct ieee80211_iface_limit if_limits_chanctx[] = {
{
.max = 2,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP),
}
};
static const struct ieee80211_iface_combination if_comb_chanctx[] = {
{
.limits = if_limits_chanctx,
.n_limits = ARRAY_SIZE(if_limits_chanctx),
.max_interfaces = 2,
.num_different_channels = 2,
.beacon_int_infra_match = false,
}
};
@ -37,6 +59,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
dev->country_ie_env = request->country_ie_env;
mt7921_mutex_acquire(dev);
mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
@ -65,12 +88,20 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
hw->sta_data_size = sizeof(struct mt7921_sta);
hw->vif_data_size = sizeof(struct mt7921_vif);
wiphy->iface_combinations = if_comb;
if (dev->fw_features & MT7921_FW_CAP_CNM) {
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->iface_combinations = if_comb_chanctx;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_chanctx);
} else {
wiphy->flags &= ~WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
}
wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION);
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP);
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->max_remain_on_channel_duration = 5000;
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_scan_ssids = 4;
wiphy->max_sched_scan_plan_interval =
@ -129,6 +160,58 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
{
struct mt7921_fw_features *features = NULL;
const struct mt76_connac2_fw_trailer *hdr;
struct mt7921_realease_info *rel_info;
const struct firmware *fw;
int ret, i, offset = 0;
const u8 *data, *end;
ret = request_firmware(&fw, fw_wm, dev);
if (ret)
return ret;
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
dev_err(dev, "Invalid firmware\n");
return -EINVAL;
}
data = fw->data;
hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
for (i = 0; i < hdr->n_region; i++) {
const struct mt76_connac2_fw_region *region;
region = (const void *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
offset += le32_to_cpu(region->len);
}
data += offset + 16;
rel_info = (struct mt7921_realease_info *)data;
data += sizeof(*rel_info);
end = data + le16_to_cpu(rel_info->len);
while (data < end) {
rel_info = (struct mt7921_realease_info *)data;
data += sizeof(*rel_info);
if (rel_info->tag == MT7921_FW_TAG_FEATURE) {
features = (struct mt7921_fw_features *)data;
break;
}
data += le16_to_cpu(rel_info->len) + rel_info->pad_len;
}
release_firmware(fw);
return features ? features->data : 0;
}
EXPORT_SYMBOL_GPL(mt7921_check_offload_capability);
int mt7921_mac_init(struct mt7921_dev *dev)
{
int i;
@ -278,6 +361,10 @@ int mt7921_register_device(struct mt7921_dev *dev)
INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
INIT_WORK(&dev->init_work, mt7921_init_work);
INIT_WORK(&dev->phy.roc_work, mt7921_roc_work);
timer_setup(&dev->phy.roc_timer, mt7921_roc_timer, 0);
init_waitqueue_head(&dev->phy.roc_wait);
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;

View File

@ -168,14 +168,6 @@ static void
mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
struct mt76_rx_status *status, u8 chfreq)
{
if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
!test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
!test_bit(MT76_STATE_ROC, &mphy->state)) {
status->freq = mphy->chandef.chan->center_freq;
status->band = mphy->chandef.chan->band;
return;
}
if (chfreq > 180) {
status->band = NL80211_BAND_6GHZ;
chfreq = (chfreq - 181) * 4 + 1;
@ -396,6 +388,27 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
rxv, &mode);
if (ret < 0)
return ret;
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
rxd += 6;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
rxv = rxd;
/* Monitor mode would use RCPI described in GROUP 5
* instead.
*/
v1 = le32_to_cpu(rxv[0]);
rxd += 12;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
}
status->chains = mphy->antenna_mask;
status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
@ -410,17 +423,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->signal = max(status->signal,
status->chain_signal[i]);
}
ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
rxv, &mode);
if (ret < 0)
return ret;
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
rxd += 18;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
}
}
amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
@ -682,7 +684,7 @@ bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
EXPORT_SYMBOL_GPL(mt7921_rx_check);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
struct sk_buff *skb, u32 *info)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
@ -735,7 +737,7 @@ void mt7921_mac_reset_counters(struct mt7921_phy *phy)
}
dev->mt76.phy.survey_time = ktime_get_boottime();
memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
@ -856,7 +858,7 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
if (vif->type == NL80211_IFTYPE_AP) {
mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
true);
true, NULL);
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
@ -974,16 +976,16 @@ void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
mib->tx_amsdu_cnt += val;
}
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
u32 val2;
val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
dev->mt76.aggr_stats[aggr0++] += val >> 16;
dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
phy->mt76->aggr_stats[aggr0++] += val >> 16;
phy->mt76->aggr_stats[aggr1++] += val2 & 0xffff;
phy->mt76->aggr_stats[aggr1++] += val2 >> 16;
}
}

View File

@ -386,6 +386,116 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
static void mt7921_roc_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_phy *phy = priv;
mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id);
}
void mt7921_roc_work(struct work_struct *work)
{
struct mt7921_phy *phy;
phy = (struct mt7921_phy *)container_of(work, struct mt7921_phy,
roc_work);
if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
return;
mt7921_mutex_acquire(phy->dev);
ieee80211_iterate_active_interfaces(phy->mt76->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_roc_iter, phy);
mt7921_mutex_release(phy->dev);
ieee80211_remain_on_channel_expired(phy->mt76->hw);
}
void mt7921_roc_timer(struct timer_list *timer)
{
struct mt7921_phy *phy = from_timer(phy, timer, roc_timer);
ieee80211_queue_work(phy->mt76->hw, &phy->roc_work);
}
static int mt7921_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif)
{
int err;
if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
return 0;
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
return err;
}
static int mt7921_set_roc(struct mt7921_phy *phy,
struct mt7921_vif *vif,
struct ieee80211_channel *chan,
int duration,
enum mt7921_roc_req type)
{
int err;
if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))
return -EBUSY;
phy->roc_grant = false;
err = mt7921_mcu_set_roc(phy, vif, chan, duration, type,
++phy->roc_token_id);
if (err < 0) {
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
goto out;
}
if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) {
mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
err = -ETIMEDOUT;
}
out:
return err;
}
static int mt7921_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
int duration,
enum ieee80211_roc_type type)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_phy *phy = mt7921_hw_phy(hw);
int err;
mt7921_mutex_acquire(phy->dev);
err = mt7921_set_roc(phy, mvif, chan, duration, MT7921_ROC_REQ_ROC);
mt7921_mutex_release(phy->dev);
return err;
}
static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_phy *phy = mt7921_hw_phy(hw);
int err;
mt7921_mutex_acquire(phy->dev);
err = mt7921_abort_roc(phy, mvif);
mt7921_mutex_release(phy->dev);
return err;
}
static int mt7921_set_channel(struct mt7921_phy *phy)
{
struct mt7921_dev *dev = phy->dev;
@ -748,7 +858,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
true);
true, mvif->ctx);
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@ -780,7 +890,8 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
ewma_rssi_init(&mvif->rssi);
if (!sta->tdls)
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
&mvif->sta.wcid, false);
&mvif->sta.wcid, false,
mvif->ctx);
}
spin_lock_bh(&dev->sta_poll_lock);
@ -1075,7 +1186,7 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* Tx ampdu stat */
for (i = 0; i < 15; i++)
data[ei++] = dev->mt76.aggr_stats[i];
data[ei++] = phy->mt76->aggr_stats[i];
data[ei++] = phy->mib.ba_miss_cnt;
@ -1504,7 +1615,13 @@ static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
int err;
mt7921_mutex_acquire(dev);
err = mt7921_mcu_set_clc(dev, dev->mt76.alpha2,
dev->country_ie_env);
if (err < 0)
goto out;
err = mt7921_set_tx_sar_pwr(hw, sar);
out:
mt7921_mutex_release(dev);
return err;
@ -1534,7 +1651,7 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7921_mutex_acquire(dev);
err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
true);
true, mvif->ctx);
if (err)
goto out;
@ -1565,12 +1682,109 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (err)
goto out;
mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false);
mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false,
mvif->ctx);
out:
mt7921_mutex_release(dev);
}
static int
mt7921_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
return 0;
}
static void
mt7921_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
}
static void mt7921_ctx_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct ieee80211_chanctx_conf *ctx = priv;
if (ctx != mvif->ctx)
return;
mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
}
static void
mt7921_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
struct mt7921_phy *phy = mt7921_hw_phy(hw);
mt7921_mutex_acquire(phy->dev);
ieee80211_iterate_active_interfaces(phy->mt76->hw,
IEEE80211_IFACE_ITER_ACTIVE,
mt7921_ctx_iter, ctx);
mt7921_mutex_release(phy->dev);
}
static int
mt7921_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
mvif->ctx = ctx;
mutex_unlock(&dev->mt76.mutex);
return 0;
}
static void
mt7921_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
mvif->ctx = NULL;
mutex_unlock(&dev->mt76.mutex);
}
static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_prep_tx_info *info)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
u16 duration = info->duration ? info->duration :
jiffies_to_msecs(HZ);
mt7921_mutex_acquire(dev);
mt7921_set_roc(mvif->phy, mvif, mvif->ctx->def.chan, duration,
MT7921_ROC_REQ_JOIN);
mt7921_mutex_release(dev);
}
static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_prep_tx_info *info)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
mt7921_mutex_acquire(dev);
mt7921_abort_roc(mvif->phy, mvif);
mt7921_mutex_release(dev);
}
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@ -1621,6 +1835,15 @@ const struct ieee80211_ops mt7921_ops = {
#endif /* CONFIG_PM */
.flush = mt7921_flush,
.set_sar_specs = mt7921_set_sar_specs,
.remain_on_channel = mt7921_remain_on_channel,
.cancel_remain_on_channel = mt7921_cancel_remain_on_channel,
.add_chanctx = mt7921_add_chanctx,
.remove_chanctx = mt7921_remove_chanctx,
.change_chanctx = mt7921_change_chanctx,
.assign_vif_chanctx = mt7921_assign_vif_chanctx,
.unassign_vif_chanctx = mt7921_unassign_vif_chanctx,
.mgd_prepare_tx = mt7921_mgd_prepare_tx,
.mgd_complete_tx = mt7921_mgd_complete_tx,
};
EXPORT_SYMBOL_GPL(mt7921_ops);

View File

@ -154,6 +154,29 @@ void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
#endif /* CONFIG_PM */
static void
mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt7921_roc_grant_tlv *grant;
struct mt76_connac2_mcu_rxd *rxd;
int duration;
rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4);
/* should never happen */
WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT));
if (grant->reqtype == MT7921_ROC_REQ_ROC)
ieee80211_ready_on_channel(dev->mt76.phy.hw);
dev->phy.roc_grant = true;
wake_up(&dev->phy.roc_wait);
duration = le32_to_cpu(grant->max_interval);
mod_timer(&dev->phy.roc_timer,
round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
}
static void
mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
@ -199,20 +222,6 @@ mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mcu_connection_loss_iter, event);
}
static void
mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_mcu_bss_event *event;
skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt76_connac_mcu_bss_event *)skb->data;
if (event->is_absent)
ieee80211_stop_queues(mphy->hw);
else
ieee80211_wake_queues(mphy->hw);
}
static void
mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
@ -279,9 +288,6 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
case MCU_EVENT_SCAN_DONE:
mt7921_mcu_scan_event(dev, skb);
return;
case MCU_EVENT_BSS_ABSENCE:
mt7921_mcu_bss_event(dev, skb);
break;
case MCU_EVENT_DBG_MSG:
mt7921_mcu_debug_msg_event(dev, skb);
break;
@ -302,6 +308,24 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
dev_kfree_skb(skb);
}
static void
mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev,
struct sk_buff *skb)
{
struct mt76_connac2_mcu_rxd *rxd;
rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
switch (rxd->eid) {
case MCU_UNI_EVENT_ROC:
mt7921_mcu_uni_roc_event(dev, skb);
break;
default:
break;
}
dev_kfree_skb(skb);
}
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt76_connac2_mcu_rxd *rxd;
@ -311,6 +335,11 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) {
mt7921_mcu_uni_rx_unsolicited_event(dev, skb);
return;
}
if (rxd->eid == 0x6) {
mt76_mcu_rx_event(&dev->mt76, skb);
return;
@ -319,7 +348,6 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
rxd->eid == MCU_EVENT_BSS_ABSENCE ||
rxd->eid == MCU_EVENT_SCAN_DONE ||
rxd->eid == MCU_EVENT_TX_DONE ||
rxd->eid == MCU_EVENT_DBG_MSG ||
@ -636,6 +664,103 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
&req_mu, sizeof(req_mu), false);
}
int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
struct ieee80211_channel *chan, int duration,
enum mt7921_roc_req type, u8 token_id)
{
int center_ch = ieee80211_frequency_to_channel(chan->center_freq);
struct mt7921_dev *dev = phy->dev;
struct {
struct {
u8 rsv[4];
} __packed hdr;
struct roc_acquire_tlv {
__le16 tag;
__le16 len;
u8 bss_idx;
u8 tokenid;
u8 control_channel;
u8 sco;
u8 band;
u8 bw;
u8 center_chan;
u8 center_chan2;
u8 bw_from_ap;
u8 center_chan_from_ap;
u8 center_chan2_from_ap;
u8 reqtype;
__le32 maxinterval;
u8 dbdcband;
u8 rsv[3];
} __packed roc;
} __packed req = {
.roc = {
.tag = cpu_to_le16(UNI_ROC_ACQUIRE),
.len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
.tokenid = token_id,
.reqtype = type,
.maxinterval = cpu_to_le32(duration),
.bss_idx = vif->mt76.idx,
.control_channel = chan->hw_value,
.bw = CMD_CBW_20MHZ,
.bw_from_ap = CMD_CBW_20MHZ,
.center_chan = center_ch,
.center_chan_from_ap = center_ch,
.dbdcband = 0xff, /* auto */
},
};
if (chan->hw_value < center_ch)
req.roc.sco = 1; /* SCA */
else if (chan->hw_value > center_ch)
req.roc.sco = 3; /* SCB */
switch (chan->band) {
case NL80211_BAND_6GHZ:
req.roc.band = 3;
break;
case NL80211_BAND_5GHZ:
req.roc.band = 2;
break;
default:
req.roc.band = 1;
break;
}
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
&req, sizeof(req), false);
}
int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
u8 token_id)
{
struct mt7921_dev *dev = phy->dev;
struct {
struct {
u8 rsv[4];
} __packed hdr;
struct roc_abort_tlv {
__le16 tag;
__le16 len;
u8 bss_idx;
u8 tokenid;
u8 dbdcband;
u8 rsv[5];
} __packed abort;
} __packed req = {
.abort = {
.tag = cpu_to_le16(UNI_ROC_ABORT),
.len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
.tokenid = token_id,
.bss_idx = vif->mt76.idx,
.dbdcband = 0xff, /* auto*/
},
};
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
&req, sizeof(req), false);
}
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
{
struct mt7921_dev *dev = phy->dev;

View File

@ -32,6 +32,9 @@
#define MT7921_MCU_INIT_RETRY_COUNT 10
#define MT7921_WFSYS_INIT_RETRY_COUNT 2
#define MT7921_FW_TAG_FEATURE 4
#define MT7921_FW_CAP_CNM BIT(7)
#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
@ -53,6 +56,55 @@
#define MT7921_SDIO_HDR_TX_BYTES GENMASK(15, 0)
#define MT7921_SDIO_HDR_PKT_TYPE GENMASK(17, 16)
#define MCU_UNI_EVENT_ROC 0x27
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
UNI_ROC_NUM
};
enum mt7921_roc_req {
MT7921_ROC_REQ_JOIN,
MT7921_ROC_REQ_ROC,
MT7921_ROC_REQ_NUM
};
enum {
UNI_EVENT_ROC_GRANT = 0,
UNI_EVENT_ROC_TAG_NUM
};
struct mt7921_realease_info {
__le16 len;
u8 pad_len;
u8 tag;
} __packed;
struct mt7921_fw_features {
u8 segment;
u8 data;
u8 rsv[14];
} __packed;
struct mt7921_roc_grant_tlv {
__le16 tag;
__le16 len;
u8 bss_idx;
u8 tokenid;
u8 status;
u8 primarychannel;
u8 rfsco;
u8 rfband;
u8 channelwidth;
u8 centerfreqseg1;
u8 centerfreqseg2;
u8 reqtype;
u8 dbdcband;
u8 rsv[1];
__le32 max_interval;
} __packed;
enum mt7921_sdio_pkt_type {
MT7921_SDIO_TXD,
MT7921_SDIO_DATA,
@ -119,6 +171,7 @@ struct mt7921_vif {
struct ewma_rssi rssi;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
struct ieee80211_chanctx_conf *ctx;
};
struct mib_stats {
@ -171,7 +224,7 @@ struct mt7921_clc {
u8 type;
u8 rsv[8];
u8 data[];
};
} __packed;
struct mt7921_phy {
struct mt76_phy *mt76;
@ -200,6 +253,12 @@ struct mt7921_phy {
#endif
struct mt7921_clc *clc[MT7921_CLC_MAX_NUM];
struct work_struct roc_work;
struct timer_list roc_timer;
wait_queue_head_t roc_wait;
u8 roc_token_id;
bool roc_grant;
};
#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
@ -236,6 +295,7 @@ struct mt7921_dev {
struct work_struct init_work;
u8 fw_debug;
u8 fw_features;
struct mt76_connac_pm pm;
struct mt76_connac_coredump coredump;
@ -244,6 +304,8 @@ struct mt7921_dev {
struct work_struct ipv6_ns_work;
/* IPv6 addresses for WoWLAN */
struct sk_buff_head ipv6_ns_list;
enum environment_cap country_ie_env;
};
enum {
@ -408,7 +470,7 @@ void mt7921_tx_worker(struct mt76_worker *w);
void mt7921_tx_token_put(struct mt7921_dev *dev);
bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
struct sk_buff *skb, u32 *info);
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7921_stats_work(struct work_struct *work);
void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
@ -425,6 +487,8 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable);
void mt7921_scan_work(struct work_struct *work);
void mt7921_roc_work(struct work_struct *work);
void mt7921_roc_timer(struct timer_list *timer);
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
@ -508,4 +572,10 @@ int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
enum environment_cap env_cap);
int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
struct ieee80211_channel *chan, int duration,
enum mt7921_roc_req type, u8 token_id);
int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
u8 token_id);
u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm);
#endif

View File

@ -13,10 +13,14 @@
#include "../trace.h"
static const struct pci_device_id mt7921_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961),
.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
.driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ },
};
@ -228,7 +232,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_hw_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
MT_DRV_AMSDU_OFFLOAD,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
@ -252,9 +257,11 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.fw_own = mt7921e_mcu_fw_pmctrl,
};
struct ieee80211_ops *ops;
struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
u8 features;
int ret;
ret = pcim_enable_device(pdev);
@ -278,8 +285,28 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (mt7921_disable_aspm)
mt76_pci_disable_aspm(pdev);
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7921_ops,
&drv_ops);
features = mt7921_check_offload_capability(&pdev->dev, (const char *)
id->driver_data);
ops = devm_kmemdup(&pdev->dev, &mt7921_ops, sizeof(mt7921_ops),
GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto err_free_pci_vec;
}
if (!(features & MT7921_FW_CAP_CNM)) {
ops->remain_on_channel = NULL;
ops->cancel_remain_on_channel = NULL;
ops->add_chanctx = NULL;
ops->remove_chanctx = NULL;
ops->change_chanctx = NULL;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->mgd_prepare_tx = NULL;
ops->mgd_complete_tx = NULL;
}
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), ops, &drv_ops);
if (!mdev) {
ret = -ENOMEM;
goto err_free_pci_vec;
@ -288,8 +315,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mdev);
dev = container_of(mdev, struct mt7921_dev, mt76);
dev->fw_features = features;
dev->hif_ops = &mt7921_pcie_ops;
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
@ -480,6 +507,21 @@ failed:
return err;
}
static void mt7921_pci_shutdown(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
/* chip cleanup before reboot */
mt7921_mcu_drv_pmctrl(dev);
mt7921_dma_cleanup(dev);
mt7921_wfsys_reset(dev);
}
static DEFINE_SIMPLE_DEV_PM_OPS(mt7921_pm_ops, mt7921_pci_suspend, mt7921_pci_resume);
static struct pci_driver mt7921_pci_driver = {
@ -487,6 +529,7 @@ static struct pci_driver mt7921_pci_driver = {
.id_table = mt7921_pci_device_table,
.probe = mt7921_pci_probe,
.remove = mt7921_pci_remove,
.shutdown = mt7921_pci_shutdown,
.driver.pm = pm_sleep_ptr(&mt7921_pm_ops),
};

Some files were not shown because too many files have changed in this diff Show More