iwlwifi patches for v5.18
* Support UHB TAS enablement via BIOS; * Remove a bunch of W=1 warnings; * Add support for channel switch offload; * Support a new FW API command version; * Support 32 Rx AMPDU sessions in newer devices; * Support a few new FW API command versions; * Some debugging infra fixes; * A few fixes in the HE functionality; * Add a few new devices; * A bunch of fixes for W=1 and W=3 warnings; * Add support for a couple of new devices; * Fix a potential buffer underflow; * W=1 warnings clean up continues; * Some improvements and fixes in scanning; * More work on the Bz family of devices; * Add support for band disablement via BIOS; * Bump FW API version; * Fix config structure for one device; * Support a new FW API command version; * Support new queue allocation command; * Some more debugging improvements; * Some other small fixes, clean-ups and improvements. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAmIPZDEACgkQoUecoho8 xfrRRw//V8EUPt59cv96dbJOzMUkhlK3Hqq1/8OllsUsme9GVLL3NbdQUGMZTGn7 jI2y+w0jrgHjamZzCxmKlknUXaOQh25ngppdmunFak8mq9oaIq6Rbu+xva8DGATB JOt/Pd8f1HgPYr41qJqK564a38R+ehnfAmf/Q0GDCGKSSDBg82g0Ixp4klTi2p6J ItaE+BnzrAp2XUHSg5pH2qxr351exCBj2uWLPRdb/ka9902pYbkAVH6XFs1ehJFT nsGNYf4Qq5lWFPPjhzIsTKqGRNE6QLo5mqL7QKyJSw7C/Jfj1HcpInUHGc5wGd4w Pns4rlCqH1QX9DSvBcUudD83LF5cmhNaoEtziqcBX/cty3ABrXTMYKkZrefdv8Me XbuEVqJxFN+IYxB8mE16Iznx66GjSptJBen3jH4SoHYU9HXKmOvXQreQcqZ46iF2 /eqCljmO7SNq3ONnm9MvKA5S/gpL91hl3HR9h8PVdG92yoPyqL7juFDDdQC8ilXV nMEXap7JoHX1BfVGCJjpu0coL+y81iXeClo4cAXO3DEx7gckRW8GDUkUuQ5VyvW/ BBS830xNALfB+/icNLCoqvKhJf98BB+f/5U+/teTOXSGdux72q/PwhjCtZ5enw4k ewMfsyjSVjipx6pEdlS3I0IUX7inrLS7NblCj09SRovKwXFBdUs= =btl/ -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2022-02-18' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next iwlwifi patches for v5.18 * Support UHB TAS enablement via BIOS; * Remove a bunch of W=1 warnings; * Add support for channel switch offload; * Support a new FW API command version; * Support 32 Rx AMPDU sessions in newer devices; * Support a few new FW API command versions; * Some debugging infra fixes; * A few fixes in the HE functionality; * Add a few new devices; * A bunch of fixes for W=1 and W=3 warnings; * Add support for a couple of new devices; * Fix a potential buffer underflow; * W=1 warnings clean up continues; * Some improvements and fixes in scanning; * More work on the Bz family of devices; * Add support for band disablement via BIOS; * Bump FW API version; * Fix config structure for one device; * Support a new FW API command version; * Support new queue allocation command; * Some more debugging improvements; * Some other small fixes, clean-ups and improvements.
This commit is contained in:
commit
e03525794a
@ -7,9 +7,10 @@
|
||||
#include <linux/stringify.h>
|
||||
#include "iwl-config.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "fw/api/txq.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MAX 69
|
||||
#define IWL_22000_UCODE_API_MAX 70
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MIN 39
|
||||
@ -39,6 +40,7 @@
|
||||
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
|
||||
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
|
||||
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
|
||||
#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-"
|
||||
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
|
||||
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
|
||||
#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
|
||||
@ -119,8 +121,6 @@
|
||||
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
|
||||
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \
|
||||
IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
|
||||
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
|
||||
@ -224,7 +224,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
.trans.base_params = &iwl_ax210_base_params, \
|
||||
.min_txq_size = 128, \
|
||||
.gp2_reg_addr = 0xd02c68, \
|
||||
.min_256_ba_txq_size = 1024, \
|
||||
.min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \
|
||||
.mon_dram_regs = { \
|
||||
.write_ptr = { \
|
||||
.addr = DBGC_CUR_DBGBUF_STATUS, \
|
||||
@ -285,7 +285,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
.trans.base_params = &iwl_ax210_base_params, \
|
||||
.min_txq_size = 128, \
|
||||
.gp2_reg_addr = 0xd02c68, \
|
||||
.min_256_ba_txq_size = 1024, \
|
||||
.min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
|
||||
.mon_dram_regs = { \
|
||||
.write_ptr = { \
|
||||
.addr = DBGC_CUR_DBGBUF_STATUS, \
|
||||
@ -299,6 +299,12 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
.addr = DBGC_CUR_DBGBUF_STATUS, \
|
||||
.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
|
||||
}, \
|
||||
}, \
|
||||
.mon_dbgi_regs = { \
|
||||
.write_ptr = { \
|
||||
.addr = DBGI_SRAM_FIFO_POINTERS, \
|
||||
.mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
|
||||
}, \
|
||||
}
|
||||
|
||||
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
|
||||
@ -476,6 +482,7 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
|
||||
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
|
||||
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
|
||||
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
|
||||
const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
|
||||
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
|
||||
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
|
||||
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
|
||||
@ -816,6 +823,20 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = {
|
||||
.fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
|
||||
.uhb_supported = false,
|
||||
IWL_DEVICE_AX210,
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
|
||||
.fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
|
||||
.uhb_supported = false,
|
||||
IWL_DEVICE_AX210,
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
|
||||
.fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
@ -830,6 +851,13 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = {
|
||||
.fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
|
||||
.uhb_supported = false,
|
||||
IWL_DEVICE_AX210,
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
|
||||
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
|
||||
IWL_DEVICE_AX210,
|
||||
|
||||
@ -299,7 +299,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
|
||||
|
||||
priv->is_open = 1;
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwlagn_mac_stop(struct ieee80211_hw *hw)
|
||||
|
||||
@ -48,6 +48,7 @@
|
||||
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
|
||||
MODULE_DESCRIPTION(DRV_DESCRIPTION);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS(IWLWIFI);
|
||||
|
||||
/* Please keep this array *SORTED* by hex value.
|
||||
* Access is done through binary search.
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018, 2020 Intel Corporation
|
||||
* Copyright(c) 2018, 2020-2021 Intel Corporation
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
* as portionhelp of the ieee80211 subsystem header files.
|
||||
@ -915,7 +915,7 @@ static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
|
||||
len += 1 + 2;
|
||||
copylen += 1 + 2;
|
||||
|
||||
new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
|
||||
new_data = kmalloc(struct_size(new_data, data, len), GFP_ATOMIC);
|
||||
if (new_data) {
|
||||
new_data->length = len;
|
||||
new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
|
||||
@ -1015,8 +1015,7 @@ void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
|
||||
/* No handling needed */
|
||||
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
|
||||
iwl_get_cmd_string(priv->trans,
|
||||
iwl_cmd_id(pkt->hdr.cmd,
|
||||
0, 0)),
|
||||
WIDE_ID(0, pkt->hdr.cmd)),
|
||||
pkt->hdr.cmd);
|
||||
}
|
||||
}
|
||||
|
||||
@ -242,7 +242,7 @@ found:
|
||||
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
|
||||
|
||||
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_tas_config_cmd_v3 *cmd)
|
||||
union iwl_tas_config_cmd *cmd, int fw_ver)
|
||||
{
|
||||
union acpi_object *wifi_pkg, *data;
|
||||
int ret, tbl_rev, i, block_list_size, enabled;
|
||||
@ -268,10 +268,18 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
(tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
|
||||
u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
|
||||
ACPI_WTAS_ENABLE_IEC_POS;
|
||||
u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
|
||||
|
||||
|
||||
enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
|
||||
cmd->override_tas_iec = cpu_to_le16(override_iec);
|
||||
cmd->enable_tas_iec = cpu_to_le16(enabled_iec);
|
||||
if (fw_ver <= 3) {
|
||||
cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
|
||||
cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
|
||||
} else {
|
||||
cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
|
||||
cmd->v4.override_tas_iec = (u8)override_iec;
|
||||
cmd->v4.enable_tas_iec = (u8)enabled_iec;
|
||||
}
|
||||
|
||||
} else if (tbl_rev == 0 &&
|
||||
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
|
||||
@ -297,7 +305,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
goto out_free;
|
||||
}
|
||||
block_list_size = wifi_pkg->package.elements[2].integer.value;
|
||||
cmd->block_list_size = cpu_to_le32(block_list_size);
|
||||
cmd->v4.block_list_size = cpu_to_le32(block_list_size);
|
||||
|
||||
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
|
||||
if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
|
||||
@ -319,7 +327,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
country = wifi_pkg->package.elements[3 + i].integer.value;
|
||||
cmd->block_list_array[i] = cpu_to_le32(country);
|
||||
cmd->v4.block_list_array[i] = cpu_to_le32(country);
|
||||
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2018-2022 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_fw_acpi__
|
||||
#define __iwl_fw_acpi__
|
||||
@ -77,6 +77,8 @@
|
||||
#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
|
||||
#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
|
||||
#define ACPI_WTAS_ENABLE_IEC_POS 0x2
|
||||
#define ACPI_WTAS_USA_UHB_MSK BIT(16)
|
||||
#define ACPI_WTAS_USA_UHB_POS 16
|
||||
|
||||
|
||||
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
|
||||
@ -126,7 +128,8 @@ enum iwl_dsm_funcs_rev_0 {
|
||||
DSM_FUNC_ENABLE_6E = 3,
|
||||
DSM_FUNC_11AX_ENABLEMENT = 6,
|
||||
DSM_FUNC_ENABLE_UNII4_CHAN = 7,
|
||||
DSM_FUNC_ACTIVATE_CHANNEL = 8
|
||||
DSM_FUNC_ACTIVATE_CHANNEL = 8,
|
||||
DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
|
||||
};
|
||||
|
||||
enum iwl_dsm_values_srd {
|
||||
@ -213,7 +216,7 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
u32 n_bands, u32 n_profiles);
|
||||
|
||||
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_tas_config_cmd_v3 *cmd);
|
||||
union iwl_tas_config_cmd *cmd, int fw_ver);
|
||||
|
||||
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
|
||||
|
||||
@ -294,7 +297,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
|
||||
}
|
||||
|
||||
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_tas_config_cmd_v3 *cmd)
|
||||
union iwl_tas_config_cmd *cmd, int fw_ver)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
@ -322,14 +322,6 @@ enum iwl_legacy_cmds {
|
||||
*/
|
||||
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
|
||||
|
||||
/**
|
||||
* @DC2DC_CONFIG_CMD:
|
||||
* Set/Get DC2DC frequency tune
|
||||
* Command is &struct iwl_dc2dc_config_cmd,
|
||||
* response is &struct iwl_dc2dc_config_resp
|
||||
*/
|
||||
DC2DC_CONFIG_CMD = 0x83,
|
||||
|
||||
/**
|
||||
* @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
|
||||
*/
|
||||
@ -613,6 +605,11 @@ enum iwl_system_subcmd_ids {
|
||||
* @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
|
||||
*/
|
||||
SYSTEM_FEATURES_CONTROL_CMD = 0xd,
|
||||
|
||||
/**
|
||||
* @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
|
||||
*/
|
||||
RFI_DEACTIVATE_NOTIF = 0xff,
|
||||
};
|
||||
|
||||
#endif /* __iwl_fw_api_commands_h__ */
|
||||
|
||||
@ -114,37 +114,4 @@ enum iwl_dc2dc_config_id {
|
||||
DCDC_FREQ_TUNE_SET = 0x2,
|
||||
}; /* MARKER_ID_API_E_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_dc2dc_config_cmd - configure dc2dc values
|
||||
*
|
||||
* (DC2DC_CONFIG_CMD = 0x83)
|
||||
*
|
||||
* Set/Get & configure dc2dc values.
|
||||
* The command always returns the current dc2dc values.
|
||||
*
|
||||
* @flags: set/get dc2dc
|
||||
* @enable_low_power_mode: not used.
|
||||
* @dc2dc_freq_tune0: frequency divider - digital domain
|
||||
* @dc2dc_freq_tune1: frequency divider - analog domain
|
||||
*/
|
||||
struct iwl_dc2dc_config_cmd {
|
||||
__le32 flags;
|
||||
__le32 enable_low_power_mode; /* not used */
|
||||
__le32 dc2dc_freq_tune0;
|
||||
__le32 dc2dc_freq_tune1;
|
||||
} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
|
||||
*
|
||||
* Current dc2dc values returned by the FW.
|
||||
*
|
||||
* @dc2dc_freq_tune0: frequency divider - digital domain
|
||||
* @dc2dc_freq_tune1: frequency divider - analog domain
|
||||
*/
|
||||
struct iwl_dc2dc_config_resp {
|
||||
__le32 dc2dc_freq_tune0;
|
||||
__le32 dc2dc_freq_tune1;
|
||||
} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
|
||||
|
||||
#endif /* __iwl_fw_api_config_h__ */
|
||||
|
||||
@ -42,7 +42,7 @@ enum iwl_data_path_subcmd_ids {
|
||||
RFH_QUEUE_CONFIG_CMD = 0xD,
|
||||
|
||||
/**
|
||||
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
|
||||
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
|
||||
*/
|
||||
TLC_MNG_CONFIG_CMD = 0xF,
|
||||
|
||||
@ -57,6 +57,20 @@ enum iwl_data_path_subcmd_ids {
|
||||
*/
|
||||
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
|
||||
|
||||
/**
|
||||
* @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX
|
||||
* blockack session, uses &struct iwl_rx_baid_cfg_cmd for the
|
||||
* command, and &struct iwl_rx_baid_cfg_resp as a response.
|
||||
*/
|
||||
RX_BAID_ALLOCATION_CONFIG_CMD = 0x16,
|
||||
|
||||
/**
|
||||
* @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal
|
||||
* command, uses &struct iwl_scd_queue_cfg_cmd and the response
|
||||
* is (same as before) &struct iwl_tx_queue_cfg_rsp.
|
||||
*/
|
||||
SCD_QUEUE_CONFIG_CMD = 0x17,
|
||||
|
||||
/**
|
||||
* @MONITOR_NOTIF: Datapath monitoring notification, using
|
||||
* &struct iwl_datapath_monitor_notif
|
||||
@ -257,4 +271,136 @@ struct iwl_rlc_config_cmd {
|
||||
u8 reserved[3];
|
||||
} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
|
||||
|
||||
#define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
|
||||
#define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
|
||||
|
||||
/**
|
||||
* enum iwl_rx_baid_action - BAID allocation/config action
|
||||
* @IWL_RX_BAID_ACTION_ADD: add a new BAID session
|
||||
* @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session
|
||||
* @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session
|
||||
*/
|
||||
enum iwl_rx_baid_action {
|
||||
IWL_RX_BAID_ACTION_ADD,
|
||||
IWL_RX_BAID_ACTION_MODIFY,
|
||||
IWL_RX_BAID_ACTION_REMOVE,
|
||||
}; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data
|
||||
* @sta_id_mask: station ID mask
|
||||
* @tid: the TID for this session
|
||||
* @reserved: reserved
|
||||
* @ssn: the starting sequence number
|
||||
* @win_size: RX BA session window size
|
||||
*/
|
||||
struct iwl_rx_baid_cfg_cmd_alloc {
|
||||
__le32 sta_id_mask;
|
||||
u8 tid;
|
||||
u8 reserved[3];
|
||||
__le16 ssn;
|
||||
__le16 win_size;
|
||||
} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_rx_baid_cfg_cmd_modify - BAID modification data
|
||||
* @old_sta_id_mask: old station ID mask
|
||||
* @new_sta_id_mask: new station ID mask
|
||||
* @tid: TID of the BAID
|
||||
*/
|
||||
struct iwl_rx_baid_cfg_cmd_modify {
|
||||
__le32 old_sta_id_mask;
|
||||
__le32 new_sta_id_mask;
|
||||
__le32 tid;
|
||||
} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data
|
||||
* @baid: the BAID to remove
|
||||
*/
|
||||
struct iwl_rx_baid_cfg_cmd_remove_v1 {
|
||||
__le32 baid;
|
||||
} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_rx_baid_cfg_cmd_remove - BAID removal data
|
||||
* @sta_id_mask: the station mask of the BAID to remove
|
||||
* @tid: the TID of the BAID to remove
|
||||
*/
|
||||
struct iwl_rx_baid_cfg_cmd_remove {
|
||||
__le32 sta_id_mask;
|
||||
__le32 tid;
|
||||
} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
|
||||
* @action: the action, from &enum iwl_rx_baid_action
|
||||
*/
|
||||
struct iwl_rx_baid_cfg_cmd {
|
||||
__le32 action;
|
||||
union {
|
||||
struct iwl_rx_baid_cfg_cmd_alloc alloc;
|
||||
struct iwl_rx_baid_cfg_cmd_modify modify;
|
||||
struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1;
|
||||
struct iwl_rx_baid_cfg_cmd_remove remove;
|
||||
}; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
|
||||
} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_rx_baid_cfg_resp - BAID allocation response
|
||||
* @baid: the allocated BAID
|
||||
*/
|
||||
struct iwl_rx_baid_cfg_resp {
|
||||
__le32 baid;
|
||||
}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_scd_queue_cfg_operation - scheduler queue operation
|
||||
* @IWL_SCD_QUEUE_ADD: allocate a new queue
|
||||
* @IWL_SCD_QUEUE_REMOVE: remove a queue
|
||||
* @IWL_SCD_QUEUE_MODIFY: modify a queue
|
||||
*/
|
||||
enum iwl_scd_queue_cfg_operation {
|
||||
IWL_SCD_QUEUE_ADD = 0,
|
||||
IWL_SCD_QUEUE_REMOVE = 1,
|
||||
IWL_SCD_QUEUE_MODIFY = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
|
||||
* @operation: the operation, see &enum iwl_scd_queue_cfg_operation
|
||||
* @u.add.sta_mask: station mask
|
||||
* @u.add.tid: TID
|
||||
* @u.add.reserved: reserved
|
||||
* @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
|
||||
* %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
|
||||
* @u.add.cb_size: size code
|
||||
* @u.add.bc_dram_addr: byte-count table IOVA
|
||||
* @u.add.tfdq_dram_addr: TFD queue IOVA
|
||||
* @u.remove.queue: queue ID for removal
|
||||
* @u.modify.sta_mask: new station mask for modify
|
||||
* @u.modify.queue: queue ID to modify
|
||||
*/
|
||||
struct iwl_scd_queue_cfg_cmd {
|
||||
__le32 operation;
|
||||
union {
|
||||
struct {
|
||||
__le32 sta_mask;
|
||||
u8 tid;
|
||||
u8 reserved[3];
|
||||
__le32 flags;
|
||||
__le32 cb_size;
|
||||
__le64 bc_dram_addr;
|
||||
__le64 tfdq_dram_addr;
|
||||
} __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
|
||||
struct {
|
||||
__le32 queue;
|
||||
} __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
|
||||
struct {
|
||||
__le32 sta_mask;
|
||||
__le32 queue;
|
||||
} __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
|
||||
} __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
|
||||
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
|
||||
|
||||
#endif /* __iwl_fw_api_datapath_h__ */
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2018-2022 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_fw_dbg_tlv_h__
|
||||
#define __iwl_fw_dbg_tlv_h__
|
||||
@ -11,7 +11,8 @@
|
||||
#define IWL_FW_INI_MAX_NAME 32
|
||||
#define IWL_FW_INI_MAX_CFG_NAME 64
|
||||
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
|
||||
#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
|
||||
#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
|
||||
#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_hcmd
|
||||
@ -249,11 +250,10 @@ struct iwl_fw_ini_hcmd_tlv {
|
||||
} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_conf_tlv - preset configuration TLV
|
||||
* struct iwl_fw_ini_addr_val - Address and value to set it to
|
||||
*
|
||||
* @address: the base address
|
||||
* @value: value to set at address
|
||||
|
||||
*/
|
||||
struct iwl_fw_ini_addr_val {
|
||||
__le32 address;
|
||||
@ -475,6 +475,7 @@ enum iwl_fw_ini_time_point {
|
||||
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
|
||||
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
|
||||
* Append otherwise
|
||||
* @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
|
||||
*/
|
||||
enum iwl_fw_ini_trigger_apply_policy {
|
||||
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
|
||||
@ -482,6 +483,7 @@ enum iwl_fw_ini_trigger_apply_policy {
|
||||
IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
|
||||
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
|
||||
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
|
||||
IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -496,4 +498,31 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
|
||||
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
|
||||
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags
|
||||
*
|
||||
* @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
|
||||
* @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
|
||||
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
|
||||
*/
|
||||
enum iwl_fw_ini_dump_policy {
|
||||
IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
|
||||
IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
|
||||
IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
|
||||
*
|
||||
* @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
|
||||
* @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
|
||||
* @IWL_FW_INI_DUMP_VERBOSE : dump all regions
|
||||
*/
|
||||
enum iwl_fw_ini_dump_type {
|
||||
IWL_FW_INI_DUMP_BRIEF,
|
||||
IWL_FW_INI_DUMP_MEDIUM,
|
||||
IWL_FW_INI_DUMP_VERBOSE,
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -42,6 +42,12 @@ enum iwl_debug_cmds {
|
||||
* &struct iwl_buf_alloc_cmd
|
||||
*/
|
||||
BUFFER_ALLOCATION = 0x8,
|
||||
/**
|
||||
* @FW_DUMP_COMPLETE_CMD:
|
||||
* sends command to fw once dump collection completed
|
||||
* &struct iwl_dbg_dump_complete_cmd
|
||||
*/
|
||||
FW_DUMP_COMPLETE_CMD = 0xB,
|
||||
/**
|
||||
* @MFU_ASSERT_DUMP_NTF:
|
||||
* &struct iwl_mfu_assert_dump_notif
|
||||
@ -404,4 +410,15 @@ struct iwl_dbg_host_event_cfg_cmd {
|
||||
__le32 enabled_severities;
|
||||
} __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_dbg_dump_complete_cmd - dump complete cmd
|
||||
*
|
||||
* @tp: timepoint whose dump has completed
|
||||
* @tp_data: timepoint data
|
||||
*/
|
||||
struct iwl_dbg_dump_complete_cmd {
|
||||
__le32 tp;
|
||||
__le32 tp_data;
|
||||
} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
|
||||
|
||||
#endif /* __iwl_fw_api_debug_h__ */
|
||||
|
||||
@ -27,6 +27,10 @@ enum iwl_mac_conf_subcmd_ids {
|
||||
* @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
|
||||
*/
|
||||
SESSION_PROTECTION_CMD = 0x5,
|
||||
/**
|
||||
* @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd
|
||||
*/
|
||||
CANCEL_CHANNEL_SWITCH_CMD = 0x6,
|
||||
|
||||
/**
|
||||
* @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
|
||||
@ -42,6 +46,11 @@ enum iwl_mac_conf_subcmd_ids {
|
||||
* @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
|
||||
*/
|
||||
CHANNEL_SWITCH_START_NOTIF = 0xFF,
|
||||
|
||||
/**
|
||||
*@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif
|
||||
*/
|
||||
CHANNEL_SWITCH_ERROR_NOTIF = 0xF9,
|
||||
};
|
||||
|
||||
#define IWL_P2P_NOA_DESC_COUNT (2)
|
||||
@ -110,6 +119,31 @@ struct iwl_channel_switch_start_notif {
|
||||
__le32 id_and_color;
|
||||
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
|
||||
|
||||
#define CS_ERR_COUNT_ERROR BIT(0)
|
||||
#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1)
|
||||
#define CS_ERR_LONG_TX_BLOCK BIT(2)
|
||||
#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3)
|
||||
|
||||
/**
|
||||
* struct iwl_channel_switch_error_notif - Channel switch error notification
|
||||
*
|
||||
* @mac_id: the mac for which the ucode sends the notification for
|
||||
* @csa_err_mask: mask of channel switch error that can occur
|
||||
*/
|
||||
struct iwl_channel_switch_error_notif {
|
||||
__le32 mac_id;
|
||||
__le32 csa_err_mask;
|
||||
} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command
|
||||
*
|
||||
* @mac_id: the mac that should cancel the channel switch
|
||||
*/
|
||||
struct iwl_cancel_channel_switch_cmd {
|
||||
__le32 mac_id;
|
||||
} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
|
||||
*
|
||||
|
||||
@ -413,10 +413,11 @@ enum iwl_he_pkt_ext_constellations {
|
||||
};
|
||||
|
||||
#define MAX_HE_SUPP_NSS 2
|
||||
#define MAX_HE_CHANNEL_BW_INDX 4
|
||||
#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
|
||||
#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
|
||||
|
||||
/**
|
||||
* struct iwl_he_pkt_ext - QAM thresholds
|
||||
* struct iwl_he_pkt_ext_v1 - QAM thresholds
|
||||
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
|
||||
* The IE is organized in the following way:
|
||||
* Support for Nss x BW (or RU) matrix:
|
||||
@ -435,9 +436,34 @@ enum iwl_he_pkt_ext_constellations {
|
||||
* Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
|
||||
* (0-low_th, 1-high_th)
|
||||
*/
|
||||
struct iwl_he_pkt_ext {
|
||||
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
|
||||
} __packed; /* PKT_EXT_DOT11AX_API_S */
|
||||
struct iwl_he_pkt_ext_v1 {
|
||||
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
|
||||
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_he_pkt_ext_v2 - QAM thresholds
|
||||
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
|
||||
* The IE is organized in the following way:
|
||||
* Support for Nss x BW (or RU) matrix:
|
||||
* (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
|
||||
* Each entry contains 2 QAM thresholds for 8us and 16us:
|
||||
* 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
|
||||
* i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
|
||||
* QAM_tx < QAM_th1 --> PPE=0us
|
||||
* QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
|
||||
* QAM_th2 <= QAM_tx --> PPE=16us
|
||||
* @pkt_ext_qam_th: QAM thresholds
|
||||
* For each Nss/Bw define 2 QAM thrsholds (0..5)
|
||||
* For rates below the low_th, no need for PPE
|
||||
* For rates between low_th and high_th, need 8us PPE
|
||||
* For rates equal or higher then the high_th, need 16us PPE
|
||||
* Nss (0-siso, 1-mimo2) x
|
||||
* BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x
|
||||
* (0-low_th, 1-high_th)
|
||||
*/
|
||||
struct iwl_he_pkt_ext_v2 {
|
||||
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
|
||||
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* enum iwl_he_sta_ctxt_flags - HE STA context flags
|
||||
@ -464,6 +490,11 @@ struct iwl_he_pkt_ext {
|
||||
* @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
|
||||
* not allowed (as there are OBSS that might classify such transmissions as
|
||||
* radar pulses).
|
||||
* @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change
|
||||
* of threshold
|
||||
* @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid
|
||||
* @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be
|
||||
* extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM.
|
||||
*/
|
||||
enum iwl_he_sta_ctxt_flags {
|
||||
STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
|
||||
@ -477,6 +508,9 @@ enum iwl_he_sta_ctxt_flags {
|
||||
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
|
||||
STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
|
||||
STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
|
||||
STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15),
|
||||
STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16),
|
||||
STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -551,7 +585,7 @@ struct iwl_he_sta_context_cmd_v1 {
|
||||
u8 frag_min_size;
|
||||
|
||||
/* The below fields are set via PPE thresholds element */
|
||||
struct iwl_he_pkt_ext pkt_ext;
|
||||
struct iwl_he_pkt_ext_v1 pkt_ext;
|
||||
|
||||
/* The below fields are set via HE-Operation IE */
|
||||
u8 bss_color;
|
||||
@ -568,7 +602,7 @@ struct iwl_he_sta_context_cmd_v1 {
|
||||
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_he_sta_context_cmd - configure FW to work with HE AP
|
||||
* struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP
|
||||
* @sta_id: STA id
|
||||
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
|
||||
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
|
||||
@ -599,7 +633,7 @@ struct iwl_he_sta_context_cmd_v1 {
|
||||
* @bssid_count: actual number of VAPs in the MultiBSS Set
|
||||
* @reserved4: alignment
|
||||
*/
|
||||
struct iwl_he_sta_context_cmd {
|
||||
struct iwl_he_sta_context_cmd_v2 {
|
||||
u8 sta_id;
|
||||
u8 tid_limit;
|
||||
u8 reserved1;
|
||||
@ -619,7 +653,7 @@ struct iwl_he_sta_context_cmd {
|
||||
u8 frag_min_size;
|
||||
|
||||
/* The below fields are set via PPE thresholds element */
|
||||
struct iwl_he_pkt_ext pkt_ext;
|
||||
struct iwl_he_pkt_ext_v1 pkt_ext;
|
||||
|
||||
/* The below fields are set via HE-Operation IE */
|
||||
u8 bss_color;
|
||||
@ -642,6 +676,81 @@ struct iwl_he_sta_context_cmd {
|
||||
u8 reserved4[3];
|
||||
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP
|
||||
* @sta_id: STA id
|
||||
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
|
||||
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
|
||||
* @reserved1: reserved byte for future use
|
||||
* @reserved2: reserved byte for future use
|
||||
* @flags: see %iwl_11ax_sta_ctxt_flags
|
||||
* @ref_bssid_addr: reference BSSID used by the AP
|
||||
* @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
|
||||
* @htc_flags: which features are supported in HTC
|
||||
* @frag_flags: frag support in A-MSDU
|
||||
* @frag_level: frag support level
|
||||
* @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
|
||||
* @frag_min_size: min frag size (except last frag)
|
||||
* @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
|
||||
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
|
||||
* @htc_trig_based_pkt_ext: default PE in 4us units
|
||||
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
|
||||
* @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
|
||||
* @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
|
||||
* @puncture_mask: puncture mask for EHT
|
||||
* @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
|
||||
* @max_bssid_indicator: indicator of the max bssid supported on the associated
|
||||
* bss
|
||||
* @bssid_index: index of the associated VAP
|
||||
* @ema_ap: AP supports enhanced Multi BSSID advertisement
|
||||
* @profile_periodicity: number of Beacon periods that are needed to receive the
|
||||
* complete VAPs info
|
||||
* @bssid_count: actual number of VAPs in the MultiBSS Set
|
||||
* @reserved4: alignment
|
||||
*/
|
||||
struct iwl_he_sta_context_cmd_v3 {
|
||||
u8 sta_id;
|
||||
u8 tid_limit;
|
||||
u8 reserved1;
|
||||
u8 reserved2;
|
||||
__le32 flags;
|
||||
|
||||
/* The below fields are set via Multiple BSSID IE */
|
||||
u8 ref_bssid_addr[6];
|
||||
__le16 reserved0;
|
||||
|
||||
/* The below fields are set via HE-capabilities IE */
|
||||
__le32 htc_flags;
|
||||
|
||||
u8 frag_flags;
|
||||
u8 frag_level;
|
||||
u8 frag_max_num;
|
||||
u8 frag_min_size;
|
||||
|
||||
/* The below fields are set via PPE thresholds element */
|
||||
struct iwl_he_pkt_ext_v2 pkt_ext;
|
||||
|
||||
/* The below fields are set via HE-Operation IE */
|
||||
u8 bss_color;
|
||||
u8 htc_trig_based_pkt_ext;
|
||||
__le16 frame_time_rts_th;
|
||||
|
||||
/* Random access parameter set (i.e. RAPS) */
|
||||
u8 rand_alloc_ecwmin;
|
||||
u8 rand_alloc_ecwmax;
|
||||
__le16 puncture_mask;
|
||||
|
||||
/* The below fields are set via MU EDCA parameter set element */
|
||||
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
|
||||
|
||||
u8 max_bssid_indicator;
|
||||
u8 bssid_index;
|
||||
u8 ema_ap;
|
||||
u8 profile_periodicity;
|
||||
u8 bssid_count;
|
||||
u8 reserved4[3];
|
||||
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_he_monitor_cmd - configure air sniffer for HE
|
||||
* @bssid: the BSSID to sniff for
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -419,6 +419,30 @@ struct iwl_tas_config_cmd_v3 {
|
||||
__le16 enable_tas_iec;
|
||||
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* struct iwl_tas_config_cmd_v3 - configures the TAS
|
||||
* @block_list_size: size of relevant field in block_list_array
|
||||
* @block_list_array: list of countries where TAS must be disabled
|
||||
* @override_tas_iec: indicates whether to override default value of IEC regulatory
|
||||
* @enable_tas_iec: in case override_tas_iec is set -
|
||||
* indicates whether IEC regulatory is enabled or disabled
|
||||
* @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct iwl_tas_config_cmd_v4 {
|
||||
__le32 block_list_size;
|
||||
__le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
|
||||
u8 override_tas_iec;
|
||||
u8 enable_tas_iec;
|
||||
u8 usa_tas_uhb_allowed;
|
||||
u8 reserved;
|
||||
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
|
||||
|
||||
union iwl_tas_config_cmd {
|
||||
struct iwl_tas_config_cmd_v2 v2;
|
||||
struct iwl_tas_config_cmd_v3 v3;
|
||||
struct iwl_tas_config_cmd_v4 v4;
|
||||
};
|
||||
/**
|
||||
* enum iwl_lari_configs - bit masks for the various LARI config operations
|
||||
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
|
||||
@ -514,6 +538,32 @@ struct iwl_lari_config_change_cmd_v5 {
|
||||
__le32 chan_state_active_bitmap;
|
||||
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
|
||||
|
||||
/**
|
||||
* struct iwl_lari_config_change_cmd_v6 - change LARI configuration
|
||||
* @config_bitmap: Bitmap of the config commands. Each bit will trigger a
|
||||
* different predefined FW config operation.
|
||||
* @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
|
||||
* @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
|
||||
* per country, one to indicate whether to override and the other to
|
||||
* indicate the value to use.
|
||||
* @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
|
||||
* per country, one to indicate whether to override and the other to
|
||||
* indicate allow/disallow unii4 channels.
|
||||
* @chan_state_active_bitmap: Bitmap for overriding channel state to active.
|
||||
* Each bit represents a country or region to activate, according to the BIOS
|
||||
* definitions.
|
||||
* @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
|
||||
* Each bit represents a set of channels in a specific band that should be disabled
|
||||
*/
|
||||
struct iwl_lari_config_change_cmd_v6 {
|
||||
__le32 config_bitmap;
|
||||
__le32 oem_uhb_allow_bitmap;
|
||||
__le32 oem_11ax_allow_bitmap;
|
||||
__le32 oem_unii4_allow_bitmap;
|
||||
__le32 chan_state_active_bitmap;
|
||||
__le32 force_disable_channels_bitmap;
|
||||
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
|
||||
|
||||
/**
|
||||
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
|
||||
* @status: PNVM image loading status
|
||||
|
||||
@ -166,14 +166,24 @@ struct iwl_dts_measurement_resp {
|
||||
|
||||
/**
|
||||
* struct ct_kill_notif - CT-kill entry notification
|
||||
* This structure represent both versions of this notification.
|
||||
*
|
||||
* @temperature: the current temperature in celsius
|
||||
* @reserved: reserved
|
||||
* @dts: only in v2: DTS that trigger the CT Kill bitmap:
|
||||
* bit 0: ToP master
|
||||
* bit 1: PA chain A master
|
||||
* bit 2: PA chain B master
|
||||
* bit 3: ToP slave
|
||||
* bit 4: PA chain A slave
|
||||
* bit 5: PA chain B slave)
|
||||
* bits 6,7: reserved (set to 0)
|
||||
* @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW)
|
||||
*/
|
||||
struct ct_kill_notif {
|
||||
__le16 temperature;
|
||||
__le16 reserved;
|
||||
} __packed; /* GRP_PHY_CT_KILL_NTF */
|
||||
u8 dts;
|
||||
u8 scheme;
|
||||
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* enum ctdp_cmd_operation - CTDP command operations
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2020 Intel Corporation
|
||||
* Copyright (C) 2020-2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_fw_api_rfi_h__
|
||||
#define __iwl_fw_api_rfi_h__
|
||||
@ -57,4 +57,12 @@ struct iwl_rfi_freq_table_resp_cmd {
|
||||
__le32 status;
|
||||
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm
|
||||
*
|
||||
* @reason: used only for a log message
|
||||
*/
|
||||
struct iwl_rfi_deactivate_notif {
|
||||
__le32 reason;
|
||||
} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */
|
||||
#endif /* __iwl_fw_api_rfi_h__ */
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2017 Intel Deutschland GmbH
|
||||
*/
|
||||
#ifndef __iwl_fw_api_rs_h__
|
||||
@ -133,7 +133,7 @@ enum IWL_TLC_MCS_PER_BW {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tlc_config_cmd - TLC configuration
|
||||
* struct iwl_tlc_config_cmd_v3 - TLC configuration
|
||||
* @sta_id: station id
|
||||
* @reserved1: reserved
|
||||
* @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
|
||||
@ -168,7 +168,7 @@ struct iwl_tlc_config_cmd_v3 {
|
||||
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* struct tlc_config_cmd - TLC configuration
|
||||
* struct iwl_tlc_config_cmd_v4 - TLC configuration
|
||||
* @sta_id: station id
|
||||
* @reserved1: reserved
|
||||
* @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
#ifndef __iwl_fw_api_tx_h__
|
||||
@ -296,8 +296,7 @@ struct iwl_tx_cmd_gen2 {
|
||||
* @dram_info: FW internal DRAM storage
|
||||
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
|
||||
* cleared. Combination of RATE_MCS_*
|
||||
* @ttl: time to live - packet lifetime limit. The FW should drop if
|
||||
* passed.
|
||||
* @reserved: reserved
|
||||
* @hdr: 802.11 header
|
||||
*/
|
||||
struct iwl_tx_cmd_gen3 {
|
||||
@ -306,7 +305,7 @@ struct iwl_tx_cmd_gen3 {
|
||||
__le32 offload_assist;
|
||||
struct iwl_dram_sec_info dram_info;
|
||||
__le32 rate_n_flags;
|
||||
__le64 ttl;
|
||||
u8 reserved[8];
|
||||
struct ieee80211_hdr hdr[];
|
||||
} __packed; /* TX_CMD_API_S_VER_8,
|
||||
TX_CMD_API_S_VER_10 */
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2019-2020 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2019-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -76,6 +76,8 @@ enum iwl_tx_queue_cfg_actions {
|
||||
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
|
||||
};
|
||||
|
||||
#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
|
||||
#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
|
||||
#define IWL_DEFAULT_QUEUE_SIZE 256
|
||||
#define IWL_MGMT_QUEUE_SIZE 16
|
||||
#define IWL_CMD_QUEUE_SIZE 32
|
||||
|
||||
@ -12,7 +12,7 @@
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-csr.h"
|
||||
|
||||
#include "iwl-fh.h"
|
||||
/**
|
||||
* struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
|
||||
*
|
||||
@ -303,9 +303,6 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
|
||||
iwl_trans_release_nic_access(fwrt->trans);
|
||||
}
|
||||
|
||||
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
|
||||
#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
|
||||
|
||||
struct iwl_prph_range {
|
||||
u32 start, end;
|
||||
};
|
||||
@ -1027,7 +1024,7 @@ struct iwl_dump_ini_region_data {
|
||||
static int
|
||||
iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1052,7 +1049,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
|
||||
static int
|
||||
iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1102,7 +1099,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1121,7 +1118,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
@ -1153,7 +1150,7 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1175,7 +1172,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1195,7 +1192,7 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_error_dump_range *range;
|
||||
u32 page_size;
|
||||
@ -1204,7 +1201,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
|
||||
idx++;
|
||||
|
||||
if (!fwrt->trans->trans_cfg->gen2)
|
||||
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
|
||||
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
|
||||
|
||||
range = range_ptr;
|
||||
page_size = fwrt->trans->init_dram.paging[idx].size;
|
||||
@ -1220,7 +1217,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
|
||||
static int
|
||||
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1239,7 +1236,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1307,7 +1304,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1442,7 +1439,7 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1509,7 +1506,7 @@ out:
|
||||
static int
|
||||
iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_region_err_table *err_table = ®->err_table;
|
||||
@ -1528,7 +1525,7 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
|
||||
static int
|
||||
iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_region_special_device_memory *special_mem =
|
||||
@ -1549,7 +1546,7 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
|
||||
static int
|
||||
iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
@ -1561,8 +1558,6 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
|
||||
return -EBUSY;
|
||||
|
||||
range->range_data_size = reg->dev_addr.size;
|
||||
iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
|
||||
DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
|
||||
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
|
||||
prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
|
||||
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
|
||||
@ -1579,7 +1574,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
|
||||
@ -1598,10 +1593,37 @@ static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
|
||||
return sizeof(*range) + le32_to_cpu(range->range_data_size);
|
||||
}
|
||||
|
||||
static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, u32 range_len, int idx)
|
||||
{
|
||||
/* read the IMR memory and DMA it to SRAM */
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
|
||||
u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
|
||||
u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
|
||||
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
|
||||
u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes;
|
||||
|
||||
range->range_data_size = cpu_to_le32(size_to_dump);
|
||||
if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
|
||||
imr_curr_addr, size_to_dump)) {
|
||||
IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
|
||||
fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
|
||||
|
||||
iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
|
||||
size_to_dump);
|
||||
return sizeof(*range) + le32_to_cpu(range->range_data_size);
|
||||
}
|
||||
|
||||
static void *
|
||||
iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *data)
|
||||
void *data, u32 data_len)
|
||||
{
|
||||
struct iwl_fw_ini_error_dump *dump = data;
|
||||
|
||||
@ -1677,7 +1699,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
static void *
|
||||
iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *data)
|
||||
void *data, u32 data_len)
|
||||
{
|
||||
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
|
||||
|
||||
@ -1688,7 +1710,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
static void *
|
||||
iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *data)
|
||||
void *data, u32 data_len)
|
||||
{
|
||||
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
|
||||
|
||||
@ -1696,10 +1718,21 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
&fwrt->trans->cfg->mon_smem_regs);
|
||||
}
|
||||
|
||||
static void *
|
||||
iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *data, u32 data_len)
|
||||
{
|
||||
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
|
||||
|
||||
return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
|
||||
&fwrt->trans->cfg->mon_dbgi_regs);
|
||||
}
|
||||
|
||||
static void *
|
||||
iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *data)
|
||||
void *data, u32 data_len)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_err_table_dump *dump = data;
|
||||
@ -1713,7 +1746,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
static void *
|
||||
iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *data)
|
||||
void *data, u32 data_len)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_special_device_memory *dump = data;
|
||||
@ -1725,6 +1758,18 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
return dump->data;
|
||||
}
|
||||
|
||||
static void *
|
||||
iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *data, u32 data_len)
|
||||
{
|
||||
struct iwl_fw_ini_error_dump *dump = data;
|
||||
|
||||
dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
|
||||
|
||||
return dump->data;
|
||||
}
|
||||
|
||||
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data)
|
||||
{
|
||||
@ -1784,6 +1829,26 @@ static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data)
|
||||
{
|
||||
/* range is total number of pages need to copied from
|
||||
*IMR memory to SRAM and later from SRAM to DRAM
|
||||
*/
|
||||
u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
|
||||
u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
|
||||
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
|
||||
|
||||
if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
|
||||
IWL_DEBUG_INFO(fwrt,
|
||||
"WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
|
||||
imr_enable, imr_size, sram_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size));
|
||||
}
|
||||
|
||||
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data)
|
||||
{
|
||||
@ -1861,6 +1926,20 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
|
||||
return size;
|
||||
}
|
||||
|
||||
static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
u32 size = le32_to_cpu(reg->dev_addr.size);
|
||||
u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
|
||||
|
||||
if (!size || !ranges)
|
||||
return 0;
|
||||
|
||||
return sizeof(struct iwl_fw_ini_monitor_dump) + ranges *
|
||||
(size + sizeof(struct iwl_fw_ini_error_dump_range));
|
||||
}
|
||||
|
||||
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data)
|
||||
{
|
||||
@ -1948,6 +2027,33 @@ iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
|
||||
return size;
|
||||
}
|
||||
|
||||
static u32
|
||||
iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data)
|
||||
{
|
||||
u32 size = 0;
|
||||
u32 ranges = 0;
|
||||
u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
|
||||
u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
|
||||
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
|
||||
|
||||
if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
|
||||
IWL_DEBUG_INFO(fwrt,
|
||||
"WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
|
||||
imr_enable, imr_size, sram_size);
|
||||
return size;
|
||||
}
|
||||
size = imr_size;
|
||||
ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data);
|
||||
if (!size && !ranges) {
|
||||
IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges);
|
||||
return 0;
|
||||
}
|
||||
size += sizeof(struct iwl_fw_ini_error_dump) +
|
||||
ranges * sizeof(struct iwl_fw_ini_error_dump_range);
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct iwl_dump_ini_mem_ops - ini memory dump operations
|
||||
* @get_num_of_ranges: returns the number of memory ranges in the region.
|
||||
@ -1964,10 +2070,10 @@ struct iwl_dump_ini_mem_ops {
|
||||
struct iwl_dump_ini_region_data *reg_data);
|
||||
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *data);
|
||||
void *data, u32 data_len);
|
||||
int (*fill_range)(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range, int idx);
|
||||
void *range, u32 range_len, int idx);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1990,24 +2096,53 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
|
||||
struct iwl_fw_ini_error_dump_data *tlv;
|
||||
struct iwl_fw_ini_error_dump_header *header;
|
||||
u32 type = reg->type;
|
||||
u32 id = le32_to_cpu(reg->id);
|
||||
u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
|
||||
u32 num_of_ranges, i, size;
|
||||
void *range;
|
||||
u8 *range;
|
||||
u32 free_size;
|
||||
u64 header_size;
|
||||
u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE;
|
||||
|
||||
/*
|
||||
* The higher part of the ID from 2 is irrelevant for
|
||||
* us, so mask it out.
|
||||
*/
|
||||
if (le32_to_cpu(reg->hdr.version) >= 2)
|
||||
id &= IWL_FW_INI_REGION_V2_MASK;
|
||||
IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n",
|
||||
dump_policy, id, type);
|
||||
|
||||
if (le32_to_cpu(reg->hdr.version) >= 2) {
|
||||
u32 dp = le32_get_bits(reg->id,
|
||||
IWL_FW_INI_REGION_DUMP_POLICY_MASK);
|
||||
|
||||
if (dump_policy == IWL_FW_INI_DUMP_VERBOSE &&
|
||||
!(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: no dump - type %d and policy mismatch=%d\n",
|
||||
dump_policy, dp);
|
||||
return 0;
|
||||
} else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM &&
|
||||
!(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: no dump - type %d and policy mismatch=%d\n",
|
||||
dump_policy, dp);
|
||||
return 0;
|
||||
} else if (dump_policy == IWL_FW_INI_DUMP_BRIEF &&
|
||||
!(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: no dump - type %d and policy mismatch=%d\n",
|
||||
dump_policy, dp);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
|
||||
!ops->fill_range)
|
||||
!ops->fill_range) {
|
||||
IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = ops->get_size(fwrt, reg_data);
|
||||
if (!size)
|
||||
|
||||
if (size < sizeof(*header)) {
|
||||
IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
|
||||
if (!entry)
|
||||
@ -2022,9 +2157,6 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
|
||||
tlv->reserved = reg->reserved;
|
||||
tlv->len = cpu_to_le32(size);
|
||||
|
||||
IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
|
||||
type);
|
||||
|
||||
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
|
||||
|
||||
header = (void *)tlv->data;
|
||||
@ -2033,7 +2165,8 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
|
||||
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
|
||||
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
|
||||
|
||||
range = ops->fill_mem_hdr(fwrt, reg_data, header);
|
||||
free_size = size;
|
||||
range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
|
||||
if (!range) {
|
||||
IWL_ERR(fwrt,
|
||||
"WRT: Failed to fill region header: id=%d, type=%d\n",
|
||||
@ -2041,8 +2174,21 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
header_size = range - (u8 *)header;
|
||||
|
||||
if (WARN(header_size > free_size,
|
||||
"header size %llu > free_size %d",
|
||||
header_size, free_size)) {
|
||||
IWL_ERR(fwrt,
|
||||
"WRT: fill_mem_hdr used more than given free_size\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
free_size -= header_size;
|
||||
|
||||
for (i = 0; i < num_of_ranges; i++) {
|
||||
int range_size = ops->fill_range(fwrt, reg_data, range, i);
|
||||
int range_size = ops->fill_range(fwrt, reg_data, range,
|
||||
free_size, i);
|
||||
|
||||
if (range_size < 0) {
|
||||
IWL_ERR(fwrt,
|
||||
@ -2050,6 +2196,15 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
|
||||
id, type);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (WARN(range_size > free_size, "range_size %d > free_size %d",
|
||||
range_size, free_size)) {
|
||||
IWL_ERR(fwrt,
|
||||
"WRT: fill_raged used more than given free_size\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
free_size -= range_size;
|
||||
range = range + range_size;
|
||||
}
|
||||
|
||||
@ -2240,7 +2395,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
|
||||
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
|
||||
.fill_range = iwl_dump_ini_csr_iter,
|
||||
},
|
||||
[IWL_FW_INI_REGION_DRAM_IMR] = {},
|
||||
[IWL_FW_INI_REGION_DRAM_IMR] = {
|
||||
.get_num_of_ranges = iwl_dump_ini_imr_ranges,
|
||||
.get_size = iwl_dump_ini_imr_get_size,
|
||||
.fill_mem_hdr = iwl_dump_ini_imr_fill_header,
|
||||
.fill_range = iwl_dump_ini_imr_iter,
|
||||
},
|
||||
[IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
|
||||
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
|
||||
.get_size = iwl_dump_ini_mem_get_size,
|
||||
@ -2255,8 +2415,8 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
|
||||
},
|
||||
[IWL_FW_INI_REGION_DBGI_SRAM] = {
|
||||
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
|
||||
.get_size = iwl_dump_ini_mem_get_size,
|
||||
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
|
||||
.get_size = iwl_dump_ini_mon_dbgi_get_size,
|
||||
.fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
|
||||
.fill_range = iwl_dump_ini_dbgi_sram_iter,
|
||||
},
|
||||
};
|
||||
@ -2444,7 +2604,7 @@ static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
|
||||
static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fwrt_dump_data *dump_data)
|
||||
{
|
||||
struct list_head dump_list = LIST_HEAD_INIT(dump_list);
|
||||
LIST_HEAD(dump_list);
|
||||
struct scatterlist *sg_dump_data;
|
||||
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
|
||||
|
||||
@ -2589,7 +2749,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
|
||||
}
|
||||
|
||||
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
|
||||
desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC);
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2685,6 +2845,28 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
|
||||
|
||||
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
|
||||
u32 timepoint,
|
||||
u32 timepoint_data)
|
||||
{
|
||||
struct iwl_dbg_dump_complete_cmd hcmd_data;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD),
|
||||
.data[0] = &hcmd_data,
|
||||
.len[0] = sizeof(hcmd_data),
|
||||
};
|
||||
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
|
||||
return;
|
||||
|
||||
if (fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) {
|
||||
hcmd_data.tp = cpu_to_le32(timepoint);
|
||||
hcmd_data.tp_data = cpu_to_le32(timepoint_data);
|
||||
iwl_trans_send_cmd(fwrt->trans, &hcmd);
|
||||
}
|
||||
}
|
||||
|
||||
/* this function assumes dump_start was called beforehand and dump_end will be
|
||||
* called afterwards
|
||||
*/
|
||||
@ -2693,7 +2875,8 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
|
||||
struct iwl_fw_dbg_params params = {0};
|
||||
struct iwl_fwrt_dump_data *dump_data =
|
||||
&fwrt->dump.wks[wk_idx].dump_data;
|
||||
|
||||
u32 policy;
|
||||
u32 time_point;
|
||||
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
|
||||
return;
|
||||
|
||||
@ -2719,6 +2902,13 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
|
||||
|
||||
iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false);
|
||||
|
||||
policy = le32_to_cpu(dump_data->trig->apply_policy);
|
||||
time_point = le32_to_cpu(dump_data->trig->time_point);
|
||||
|
||||
if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
|
||||
IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
|
||||
iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
|
||||
}
|
||||
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
|
||||
iwl_force_nmi(fwrt->trans);
|
||||
|
||||
@ -2795,9 +2985,8 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
|
||||
/* assumes the op mode mutex is locked in dump_start since
|
||||
* iwl_fw_dbg_collect_sync can't run in parallel
|
||||
*/
|
||||
if (fwrt->ops && fwrt->ops->dump_start &&
|
||||
fwrt->ops->dump_start(fwrt->ops_ctx))
|
||||
return;
|
||||
if (fwrt->ops && fwrt->ops->dump_start)
|
||||
fwrt->ops->dump_start(fwrt->ops_ctx);
|
||||
|
||||
iwl_fw_dbg_collect_sync(fwrt, wks->idx);
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -324,4 +324,7 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
|
||||
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
|
||||
u32 timepoint,
|
||||
u32 timepoint_data);
|
||||
#endif /* __iwl_fw_dbg_h__ */
|
||||
|
||||
@ -150,7 +150,7 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
struct iwl_dbg_host_event_cfg_cmd event_cfg;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(HOST_EVENT_CFG, DEBUG_GROUP, 0),
|
||||
.id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG),
|
||||
.flags = CMD_ASYNC,
|
||||
.data[0] = &event_cfg,
|
||||
.len[0] = sizeof(event_cfg),
|
||||
@ -358,7 +358,7 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
|
||||
|
||||
ver = &fw->ucode_capa.cmd_versions[state->pos];
|
||||
|
||||
cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0);
|
||||
cmd_id = WIDE_ID(ver->group, ver->cmd);
|
||||
|
||||
seq_printf(seq, " 0x%04x:\n", cmd_id);
|
||||
seq_printf(seq, " name: %s\n",
|
||||
|
||||
@ -119,7 +119,7 @@ enum iwl_ucode_tlv_type {
|
||||
struct iwl_ucode_tlv {
|
||||
__le32 type; /* see above */
|
||||
__le32 length; /* not including type/length fields */
|
||||
u8 data[0];
|
||||
u8 data[];
|
||||
};
|
||||
|
||||
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
|
||||
@ -312,7 +312,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
|
||||
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
|
||||
* @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
|
||||
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
|
||||
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
|
||||
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
|
||||
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
|
||||
* @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
|
||||
@ -370,6 +369,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
|
||||
* reset flow
|
||||
* @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
|
||||
* channels even when these are not enabled.
|
||||
* @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
|
||||
* complete to FW.
|
||||
*
|
||||
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
|
||||
*/
|
||||
@ -388,7 +389,6 @@ enum iwl_ucode_tlv_capa {
|
||||
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
|
||||
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
|
||||
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
|
||||
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
|
||||
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
|
||||
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
|
||||
IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
|
||||
@ -421,6 +421,7 @@ enum iwl_ucode_tlv_capa {
|
||||
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
|
||||
IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61,
|
||||
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62,
|
||||
IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63,
|
||||
|
||||
/* set 2 */
|
||||
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
|
||||
@ -455,6 +456,7 @@ enum iwl_ucode_tlv_capa {
|
||||
|
||||
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
|
||||
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
|
||||
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
|
||||
|
||||
#ifdef __CHECKER__
|
||||
/* sparse says it cannot increment the previous enum member */
|
||||
|
||||
@ -2,13 +2,16 @@
|
||||
/*
|
||||
* Copyright(c) 2019 - 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <fw/api/commands.h>
|
||||
#include "img.h"
|
||||
|
||||
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
|
||||
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
|
||||
{
|
||||
const struct iwl_fw_cmd_version *entry;
|
||||
unsigned int i;
|
||||
/* prior to LONG_GROUP, we never used this CMD version API */
|
||||
u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
|
||||
u8 cmd = iwl_cmd_opcode(cmd_id);
|
||||
|
||||
if (!fw->ucode_capa.cmd_versions ||
|
||||
!fw->ucode_capa.n_cmd_versions)
|
||||
|
||||
@ -275,7 +275,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
|
||||
return &fw->img[ucode_type];
|
||||
}
|
||||
|
||||
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
|
||||
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
|
||||
|
||||
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
|
||||
const char *iwl_fw_lookup_assert_desc(u32 num);
|
||||
|
||||
@ -58,7 +58,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_soc_configuration_cmd cmd = {};
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
|
||||
.id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
};
|
||||
@ -87,8 +87,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
|
||||
cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
|
||||
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
|
||||
SCAN_REQ_UMAC,
|
||||
if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
|
||||
IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
|
||||
fwrt->trans->trans_cfg->low_latency_xtal)
|
||||
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
|
||||
|
||||
@ -197,7 +197,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
memcpy(page_address(block->fw_paging_block),
|
||||
image->sec[sec_idx].data + offset, len);
|
||||
(const u8 *)image->sec[sec_idx].data + offset, len);
|
||||
block->fw_offs = image->sec[sec_idx].offset + offset;
|
||||
dma_sync_single_for_device(fwrt->trans->dev,
|
||||
block->fw_paging_phys,
|
||||
@ -243,7 +243,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
|
||||
.block_num = cpu_to_le32(fwrt->num_of_paging_blk),
|
||||
};
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
|
||||
.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD),
|
||||
.len = { sizeof(paging_cmd), },
|
||||
.data = { &paging_cmd, },
|
||||
};
|
||||
|
||||
@ -33,7 +33,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
|
||||
static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
||||
size_t len)
|
||||
{
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
const struct iwl_ucode_tlv *tlv;
|
||||
u32 sha1 = 0;
|
||||
u16 mac_type = 0, rf_id = 0;
|
||||
u8 *pnvm_data = NULL, *tmp;
|
||||
@ -47,7 +47,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
||||
u32 tlv_len, tlv_type;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
tlv = (const void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
@ -70,7 +70,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
||||
break;
|
||||
}
|
||||
|
||||
sha1 = le32_to_cpup((__le32 *)data);
|
||||
sha1 = le32_to_cpup((const __le32 *)data);
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
|
||||
@ -87,8 +87,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
||||
if (hw_match)
|
||||
break;
|
||||
|
||||
mac_type = le16_to_cpup((__le16 *)data);
|
||||
rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
|
||||
mac_type = le16_to_cpup((const __le16 *)data);
|
||||
rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16)));
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
|
||||
@ -99,7 +99,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
||||
hw_match = true;
|
||||
break;
|
||||
case IWL_UCODE_TLV_SEC_RT: {
|
||||
struct iwl_pnvm_section *section = (void *)data;
|
||||
const struct iwl_pnvm_section *section = (const void *)data;
|
||||
u32 data_len = tlv_len - sizeof(*section);
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
@ -107,7 +107,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
||||
tlv_len);
|
||||
|
||||
/* TODO: remove, this is a deprecated separator */
|
||||
if (le32_to_cpup((__le32 *)data) == 0xddddeeee) {
|
||||
if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) {
|
||||
IWL_DEBUG_FW(trans, "Ignoring separator.\n");
|
||||
break;
|
||||
}
|
||||
@ -173,7 +173,7 @@ out:
|
||||
static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
|
||||
size_t len)
|
||||
{
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
const struct iwl_ucode_tlv *tlv;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
|
||||
|
||||
@ -181,7 +181,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
|
||||
u32 tlv_len, tlv_type;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
tlv = (const void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
@ -193,8 +193,8 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
|
||||
}
|
||||
|
||||
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
|
||||
struct iwl_sku_id *sku_id =
|
||||
(void *)(data + sizeof(*tlv));
|
||||
const struct iwl_sku_id *sku_id =
|
||||
(const void *)(data + sizeof(*tlv));
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
#include "fw/acpi.h"
|
||||
|
||||
struct iwl_fw_runtime_ops {
|
||||
int (*dump_start)(void *ctx);
|
||||
void (*dump_start)(void *ctx);
|
||||
void (*dump_end)(void *ctx);
|
||||
bool (*fw_running)(void *ctx);
|
||||
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -89,7 +89,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
|
||||
|
||||
if (fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
|
||||
cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
|
||||
cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD);
|
||||
else
|
||||
cmd.id = SHARED_MEM_CFG;
|
||||
|
||||
|
||||
@ -69,7 +69,7 @@ out:
|
||||
static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
|
||||
const u8 *data, size_t len)
|
||||
{
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
const struct iwl_ucode_tlv *tlv;
|
||||
u8 *reduce_power_data = NULL, *tmp;
|
||||
u32 size = 0;
|
||||
|
||||
@ -79,7 +79,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
|
||||
u32 tlv_len, tlv_type;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
tlv = (const void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
@ -154,7 +154,7 @@ out:
|
||||
static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
|
||||
const u8 *data, size_t len)
|
||||
{
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
const struct iwl_ucode_tlv *tlv;
|
||||
void *sec_data;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
|
||||
@ -163,7 +163,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
|
||||
u32 tlv_len, tlv_type;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
tlv = (const void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
@ -175,8 +175,8 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
|
||||
struct iwl_sku_id *sku_id =
|
||||
(void *)(data + sizeof(*tlv));
|
||||
const struct iwl_sku_id *sku_id =
|
||||
(const void *)(data + sizeof(*tlv));
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
|
||||
|
||||
@ -343,8 +343,8 @@ struct iwl_fw_mon_regs {
|
||||
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
|
||||
* @min_txq_size: minimum number of slots required in a TX queue
|
||||
* @uhb_supported: ultra high band channels supported
|
||||
* @min_256_ba_txq_size: minimum number of slots required in a TX queue which
|
||||
* supports 256 BA aggregation
|
||||
* @min_ba_txq_size: minimum number of slots required in a TX queue which
|
||||
* based on hardware support (HE - 256, EHT - 1K).
|
||||
* @num_rbds: number of receive buffer descriptors to use
|
||||
* (only used for multi-queue capable devices)
|
||||
* @mac_addr_csr_base: CSR base register for MAC address access, if not set
|
||||
@ -405,9 +405,10 @@ struct iwl_cfg {
|
||||
u32 d3_debug_data_length;
|
||||
u32 min_txq_size;
|
||||
u32 gp2_reg_addr;
|
||||
u32 min_256_ba_txq_size;
|
||||
u32 min_ba_txq_size;
|
||||
const struct iwl_fw_mon_regs mon_dram_regs;
|
||||
const struct iwl_fw_mon_regs mon_smem_regs;
|
||||
const struct iwl_fw_mon_regs mon_dbgi_regs;
|
||||
};
|
||||
|
||||
#define IWL_CFG_ANY (~0)
|
||||
@ -433,6 +434,7 @@ struct iwl_cfg {
|
||||
#define IWL_CFG_RF_TYPE_HR1 0x10C
|
||||
#define IWL_CFG_RF_TYPE_GF 0x10D
|
||||
#define IWL_CFG_RF_TYPE_MR 0x110
|
||||
#define IWL_CFG_RF_TYPE_MS 0x111
|
||||
#define IWL_CFG_RF_TYPE_FM 0x112
|
||||
|
||||
#define IWL_CFG_RF_ID_TH 0x1
|
||||
@ -509,6 +511,7 @@ extern const char iwl9560_killer_1550i_name[];
|
||||
extern const char iwl9560_killer_1550s_name[];
|
||||
extern const char iwl_ax200_name[];
|
||||
extern const char iwl_ax203_name[];
|
||||
extern const char iwl_ax204_name[];
|
||||
extern const char iwl_ax201_name[];
|
||||
extern const char iwl_ax101_name[];
|
||||
extern const char iwl_ax200_killer_1650w_name[];
|
||||
@ -631,9 +634,12 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
|
||||
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
|
||||
extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
|
||||
extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
|
||||
|
||||
@ -533,6 +533,9 @@ enum {
|
||||
* 11-8: queue selector
|
||||
*/
|
||||
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
|
||||
/* This register is common for Tx and Rx, Rx queues start from 512 */
|
||||
#define HBUS_TARG_WRPTR_Q_SHIFT (16)
|
||||
#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT)
|
||||
|
||||
/**********************************************************
|
||||
* CSR values
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2018-2022 Intel Corporation
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include "iwl-drv.h"
|
||||
@ -74,7 +74,8 @@ static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
|
||||
memcpy(&node->tlv, tlv, sizeof(node->tlv));
|
||||
memcpy(node->tlv.data, tlv->data, len);
|
||||
list_add_tail(&node->list, list);
|
||||
|
||||
return 0;
|
||||
@ -181,11 +182,11 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
|
||||
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
|
||||
|
||||
/*
|
||||
* The higher part of the ID in from version 2 is irrelevant for
|
||||
* us, so mask it out.
|
||||
* The higher part of the ID from version 2 is debug policy.
|
||||
* The id will be only lsb 16 bits, so mask it out.
|
||||
*/
|
||||
if (le32_to_cpu(reg->hdr.version) >= 2)
|
||||
id &= IWL_FW_INI_REGION_V2_MASK;
|
||||
id &= IWL_FW_INI_REGION_ID_MASK;
|
||||
|
||||
if (le32_to_cpu(tlv->length) < sizeof(*reg))
|
||||
return -EINVAL;
|
||||
@ -211,6 +212,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
|
||||
trans->dbg.imr_data.sram_addr =
|
||||
le32_to_cpu(reg->internal_buffer.base_addr);
|
||||
trans->dbg.imr_data.sram_size =
|
||||
le32_to_cpu(reg->internal_buffer.size);
|
||||
}
|
||||
|
||||
|
||||
active_reg = &trans->dbg.active_regions[id];
|
||||
if (*active_reg) {
|
||||
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
|
||||
@ -271,7 +280,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
|
||||
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
|
||||
const struct iwl_ucode_tlv *tlv)
|
||||
{
|
||||
struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data;
|
||||
const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
|
||||
u32 tp = le32_to_cpu(conf_set->time_point);
|
||||
u32 type = le32_to_cpu(conf_set->set_type);
|
||||
|
||||
@ -460,7 +469,7 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
tlv = (const void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
|
||||
@ -577,8 +586,7 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
|
||||
return 0;
|
||||
|
||||
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
|
||||
if (!fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
|
||||
if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
|
||||
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
|
||||
return -EIO;
|
||||
num_frags = 1;
|
||||
@ -762,33 +770,40 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
int ret, i, dram_alloc = 0;
|
||||
struct iwl_dram_info dram_info;
|
||||
int ret, i;
|
||||
bool dram_alloc = false;
|
||||
struct iwl_dram_data *frags =
|
||||
&fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
|
||||
struct iwl_dram_info *dram_info;
|
||||
|
||||
if (!frags || !frags->block)
|
||||
return;
|
||||
|
||||
dram_info = frags->block;
|
||||
|
||||
if (!fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
|
||||
return;
|
||||
|
||||
dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
|
||||
dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
|
||||
dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
|
||||
dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
|
||||
|
||||
for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
|
||||
i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
|
||||
ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info);
|
||||
ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
|
||||
if (!ret)
|
||||
dram_alloc++;
|
||||
dram_alloc = true;
|
||||
else
|
||||
IWL_WARN(fwrt,
|
||||
"WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
|
||||
i, ret);
|
||||
}
|
||||
if (dram_alloc) {
|
||||
memcpy(frags->block, &dram_info, sizeof(dram_info));
|
||||
IWL_DEBUG_FW(fwrt, "block data after %016x\n",
|
||||
*((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block));
|
||||
}
|
||||
|
||||
if (dram_alloc)
|
||||
IWL_DEBUG_FW(fwrt, "block data after %08x\n",
|
||||
dram_info->first_word);
|
||||
else
|
||||
memset(frags->block, 0, sizeof(*dram_info));
|
||||
}
|
||||
|
||||
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
|
||||
@ -811,11 +826,11 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
|
||||
struct list_head *config_list)
|
||||
struct list_head *conf_list)
|
||||
{
|
||||
struct iwl_dbg_tlv_node *node;
|
||||
|
||||
list_for_each_entry(node, config_list, list) {
|
||||
list_for_each_entry(node, conf_list, list) {
|
||||
struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
|
||||
u32 count, address, value;
|
||||
u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
|
||||
@ -861,11 +876,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
|
||||
case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
|
||||
struct iwl_dbgc1_info dram_info = {};
|
||||
struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
|
||||
__le64 dram_base_addr = cpu_to_le64(frags->physical);
|
||||
__le32 dram_size = cpu_to_le32(frags->size);
|
||||
u64 dram_addr = le64_to_cpu(dram_base_addr);
|
||||
__le64 dram_base_addr;
|
||||
__le32 dram_size;
|
||||
u64 dram_addr;
|
||||
u32 ret;
|
||||
|
||||
if (!frags)
|
||||
break;
|
||||
|
||||
dram_base_addr = cpu_to_le64(frags->physical);
|
||||
dram_size = cpu_to_le32(frags->size);
|
||||
dram_addr = le64_to_cpu(dram_base_addr);
|
||||
|
||||
IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
|
||||
dram_base_addr, dram_size);
|
||||
IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
|
||||
|
||||
@ -243,14 +243,14 @@ struct iwl_firmware_pieces {
|
||||
|
||||
/* FW debug data parsed for driver usage */
|
||||
bool dbg_dest_tlv_init;
|
||||
u8 *dbg_dest_ver;
|
||||
const u8 *dbg_dest_ver;
|
||||
union {
|
||||
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
|
||||
struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
|
||||
const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
|
||||
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
|
||||
};
|
||||
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
|
||||
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
|
||||
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
|
||||
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
|
||||
const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
|
||||
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
|
||||
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
|
||||
size_t n_mem_tlv;
|
||||
@ -327,8 +327,9 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
|
||||
static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
|
||||
{
|
||||
int i, j;
|
||||
struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
|
||||
struct iwl_fw_cipher_scheme *fwcs;
|
||||
const struct iwl_fw_cscheme_list *l =
|
||||
(const struct iwl_fw_cscheme_list *)data;
|
||||
const struct iwl_fw_cipher_scheme *fwcs;
|
||||
|
||||
if (len < sizeof(*l) ||
|
||||
len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
|
||||
@ -356,13 +357,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
|
||||
{
|
||||
struct fw_img_parsing *img;
|
||||
struct fw_sec *sec;
|
||||
struct fw_sec_parsing *sec_parse;
|
||||
const struct fw_sec_parsing *sec_parse;
|
||||
size_t alloc_size;
|
||||
|
||||
if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
|
||||
return -1;
|
||||
|
||||
sec_parse = (struct fw_sec_parsing *)data;
|
||||
sec_parse = (const struct fw_sec_parsing *)data;
|
||||
|
||||
img = &pieces->img[type];
|
||||
|
||||
@ -385,8 +386,8 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
|
||||
|
||||
static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
|
||||
{
|
||||
struct iwl_tlv_calib_data *def_calib =
|
||||
(struct iwl_tlv_calib_data *)data;
|
||||
const struct iwl_tlv_calib_data *def_calib =
|
||||
(const struct iwl_tlv_calib_data *)data;
|
||||
u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
|
||||
if (ucode_type >= IWL_UCODE_TYPE_MAX) {
|
||||
IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
|
||||
@ -404,7 +405,7 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
|
||||
static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
|
||||
struct iwl_ucode_capabilities *capa)
|
||||
{
|
||||
const struct iwl_ucode_api *ucode_api = (void *)data;
|
||||
const struct iwl_ucode_api *ucode_api = (const void *)data;
|
||||
u32 api_index = le32_to_cpu(ucode_api->api_index);
|
||||
u32 api_flags = le32_to_cpu(ucode_api->api_flags);
|
||||
int i;
|
||||
@ -425,7 +426,7 @@ static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
|
||||
static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
|
||||
struct iwl_ucode_capabilities *capa)
|
||||
{
|
||||
const struct iwl_ucode_capa *ucode_capa = (void *)data;
|
||||
const struct iwl_ucode_capa *ucode_capa = (const void *)data;
|
||||
u32 api_index = le32_to_cpu(ucode_capa->api_index);
|
||||
u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
|
||||
int i;
|
||||
@ -457,7 +458,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
|
||||
const struct firmware *ucode_raw,
|
||||
struct iwl_firmware_pieces *pieces)
|
||||
{
|
||||
struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
|
||||
const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data;
|
||||
u32 api_ver, hdr_size, build;
|
||||
char buildstr[25];
|
||||
const u8 *src;
|
||||
@ -600,7 +601,7 @@ static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
|
||||
sizeof(region->special_mem))
|
||||
return;
|
||||
|
||||
region = (void *)tlv->data;
|
||||
region = (const void *)tlv->data;
|
||||
addr = le32_to_cpu(region->special_mem.base_addr);
|
||||
addr += le32_to_cpu(region->special_mem.offset);
|
||||
addr &= ~FW_ADDR_CACHE_CONTROL;
|
||||
@ -655,7 +656,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
struct iwl_ucode_capabilities *capa,
|
||||
bool *usniffer_images)
|
||||
{
|
||||
struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
|
||||
const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data;
|
||||
const struct iwl_ucode_tlv *tlv;
|
||||
size_t len = ucode_raw->size;
|
||||
const u8 *data;
|
||||
@ -704,8 +705,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
|
||||
tlv = (const void *)data;
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
tlv_data = tlv->data;
|
||||
@ -762,7 +763,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
capa->max_probe_length =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_PAN:
|
||||
if (tlv_len)
|
||||
@ -783,7 +784,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
* will not work with the new firmware, or
|
||||
* it'll not take advantage of new features.
|
||||
*/
|
||||
capa->flags = le32_to_cpup((__le32 *)tlv_data);
|
||||
capa->flags = le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_API_CHANGES_SET:
|
||||
if (tlv_len != sizeof(struct iwl_ucode_api))
|
||||
@ -799,37 +800,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_evtlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_evtlog_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_errlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_evtlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_evtlog_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_errlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
|
||||
if (tlv_len)
|
||||
@ -858,7 +859,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
capa->standard_phy_calibration_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_SEC_RT:
|
||||
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
|
||||
@ -884,7 +885,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
case IWL_UCODE_TLV_PHY_SKU:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
|
||||
drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data);
|
||||
drv->fw.valid_tx_ant = (drv->fw.phy_config &
|
||||
FW_PHY_CFG_TX_CHAIN) >>
|
||||
FW_PHY_CFG_TX_CHAIN_POS;
|
||||
@ -911,7 +912,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
num_of_cpus =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
|
||||
if (num_of_cpus == 2) {
|
||||
drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
|
||||
@ -933,10 +934,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
capa->n_scan_channels =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_FW_VERSION: {
|
||||
__le32 *ptr = (void *)tlv_data;
|
||||
const __le32 *ptr = (const void *)tlv_data;
|
||||
u32 major, minor;
|
||||
u8 local_comp;
|
||||
|
||||
@ -960,15 +961,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_FW_DBG_DEST: {
|
||||
struct iwl_fw_dbg_dest_tlv *dest = NULL;
|
||||
struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
|
||||
const struct iwl_fw_dbg_dest_tlv *dest = NULL;
|
||||
const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
|
||||
u8 mon_mode;
|
||||
|
||||
pieces->dbg_dest_ver = (u8 *)tlv_data;
|
||||
pieces->dbg_dest_ver = (const u8 *)tlv_data;
|
||||
if (*pieces->dbg_dest_ver == 1) {
|
||||
dest = (void *)tlv_data;
|
||||
dest = (const void *)tlv_data;
|
||||
} else if (*pieces->dbg_dest_ver == 0) {
|
||||
dest_v1 = (void *)tlv_data;
|
||||
dest_v1 = (const void *)tlv_data;
|
||||
} else {
|
||||
IWL_ERR(drv,
|
||||
"The version is %d, and it is invalid\n",
|
||||
@ -1009,7 +1010,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_FW_DBG_CONF: {
|
||||
struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
|
||||
const struct iwl_fw_dbg_conf_tlv *conf =
|
||||
(const void *)tlv_data;
|
||||
|
||||
if (!pieces->dbg_dest_tlv_init) {
|
||||
IWL_ERR(drv,
|
||||
@ -1043,8 +1045,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
|
||||
struct iwl_fw_dbg_trigger_tlv *trigger =
|
||||
(void *)tlv_data;
|
||||
const struct iwl_fw_dbg_trigger_tlv *trigger =
|
||||
(const void *)tlv_data;
|
||||
u32 trigger_id = le32_to_cpu(trigger->id);
|
||||
|
||||
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
|
||||
@ -1075,7 +1077,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
}
|
||||
|
||||
drv->fw.dbg.dump_mask =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
|
||||
@ -1087,7 +1089,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
case IWL_UCODE_TLV_PAGING:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
|
||||
paging_mem_size = le32_to_cpup((const __le32 *)tlv_data);
|
||||
|
||||
IWL_DEBUG_FW(drv,
|
||||
"Paging: paging enabled (size = %u bytes)\n",
|
||||
@ -1117,8 +1119,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
/* ignored */
|
||||
break;
|
||||
case IWL_UCODE_TLV_FW_MEM_SEG: {
|
||||
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
|
||||
(void *)tlv_data;
|
||||
const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
|
||||
(const void *)tlv_data;
|
||||
size_t size;
|
||||
struct iwl_fw_dbg_mem_seg_tlv *n;
|
||||
|
||||
@ -1146,10 +1148,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
|
||||
struct {
|
||||
const struct {
|
||||
__le32 buf_addr;
|
||||
__le32 buf_size;
|
||||
} *recov_info = (void *)tlv_data;
|
||||
} *recov_info = (const void *)tlv_data;
|
||||
|
||||
if (tlv_len != sizeof(*recov_info))
|
||||
goto invalid_tlv_len;
|
||||
@ -1160,10 +1162,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
}
|
||||
break;
|
||||
case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
|
||||
struct {
|
||||
const struct {
|
||||
u8 version[32];
|
||||
u8 sha1[20];
|
||||
} *fseq_ver = (void *)tlv_data;
|
||||
} *fseq_ver = (const void *)tlv_data;
|
||||
|
||||
if (tlv_len != sizeof(*fseq_ver))
|
||||
goto invalid_tlv_len;
|
||||
@ -1174,19 +1176,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
case IWL_UCODE_TLV_FW_NUM_STATIONS:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
if (le32_to_cpup((__le32 *)tlv_data) >
|
||||
if (le32_to_cpup((const __le32 *)tlv_data) >
|
||||
IWL_MVM_STATION_COUNT_MAX) {
|
||||
IWL_ERR(drv,
|
||||
"%d is an invalid number of station\n",
|
||||
le32_to_cpup((__le32 *)tlv_data));
|
||||
le32_to_cpup((const __le32 *)tlv_data));
|
||||
goto tlv_error;
|
||||
}
|
||||
capa->num_stations =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
le32_to_cpup((const __le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
|
||||
struct iwl_umac_debug_addrs *dbg_ptrs =
|
||||
(void *)tlv_data;
|
||||
const struct iwl_umac_debug_addrs *dbg_ptrs =
|
||||
(const void *)tlv_data;
|
||||
|
||||
if (tlv_len != sizeof(*dbg_ptrs))
|
||||
goto invalid_tlv_len;
|
||||
@ -1201,8 +1203,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
|
||||
struct iwl_lmac_debug_addrs *dbg_ptrs =
|
||||
(void *)tlv_data;
|
||||
const struct iwl_lmac_debug_addrs *dbg_ptrs =
|
||||
(const void *)tlv_data;
|
||||
|
||||
if (tlv_len != sizeof(*dbg_ptrs))
|
||||
goto invalid_tlv_len;
|
||||
@ -1277,7 +1279,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
|
||||
if (len) {
|
||||
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
|
||||
iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
|
||||
iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1418,7 +1420,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
||||
{
|
||||
struct iwl_drv *drv = context;
|
||||
struct iwl_fw *fw = &drv->fw;
|
||||
struct iwl_ucode_header *ucode;
|
||||
const struct iwl_ucode_header *ucode;
|
||||
struct iwlwifi_opmode_table *op;
|
||||
int err;
|
||||
struct iwl_firmware_pieces *pieces;
|
||||
@ -1456,7 +1458,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
||||
}
|
||||
|
||||
/* Data from ucode file: header followed by uCode images */
|
||||
ucode = (struct iwl_ucode_header *)ucode_raw->data;
|
||||
ucode = (const struct iwl_ucode_header *)ucode_raw->data;
|
||||
|
||||
if (ucode->ver)
|
||||
err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
|
||||
@ -1645,6 +1647,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
||||
/* We have our copies now, allow OS release its copies */
|
||||
release_firmware(ucode_raw);
|
||||
|
||||
iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
|
||||
|
||||
mutex_lock(&iwlwifi_opmode_table_mtx);
|
||||
switch (fw->type) {
|
||||
case IWL_FW_DVM:
|
||||
@ -1661,8 +1665,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
||||
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
|
||||
drv->fw.fw_version, op->name);
|
||||
|
||||
iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
|
||||
|
||||
/* add this device to the list of devices using this op_mode */
|
||||
list_add_tail(&drv->list, &op->drv);
|
||||
|
||||
|
||||
@ -84,7 +84,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
|
||||
* everything is built-in, then we can avoid that.
|
||||
*/
|
||||
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
|
||||
#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
|
||||
#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI)
|
||||
#else
|
||||
#define IWL_EXPORT_SYMBOL(sym)
|
||||
#endif
|
||||
|
||||
@ -23,26 +23,22 @@
|
||||
*/
|
||||
#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
|
||||
|
||||
#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
|
||||
#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
|
||||
|
||||
|
||||
/*
|
||||
* The device's EEPROM semaphore prevents conflicts between driver and uCode
|
||||
* when accessing the EEPROM; each access is a series of pulses to/from the
|
||||
* EEPROM chip, not a single event, so even reads could conflict if they
|
||||
* weren't arbitrated by the semaphore.
|
||||
*/
|
||||
#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
|
||||
#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
|
||||
|
||||
#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
|
||||
#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
|
||||
|
||||
static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
|
||||
{
|
||||
u16 count;
|
||||
int ret;
|
||||
|
||||
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
|
||||
for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
|
||||
/* Request semaphore */
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
|
||||
@ -51,7 +47,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
|
||||
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
|
||||
EEPROM_SEM_TIMEOUT);
|
||||
IWL_EEPROM_SEM_TIMEOUT);
|
||||
if (ret >= 0) {
|
||||
IWL_DEBUG_EEPROM(trans->dev,
|
||||
"Acquired semaphore after %d tries.\n",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
#ifndef __iwl_fh_h__
|
||||
@ -590,11 +590,31 @@ struct iwl_rb_status {
|
||||
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
|
||||
#define TFD_QUEUE_SIZE_BC_DUP (64)
|
||||
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
|
||||
#define TFD_QUEUE_BC_SIZE_GEN3 1024
|
||||
#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
|
||||
#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
|
||||
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
|
||||
#define IWL_NUM_OF_TBS 20
|
||||
#define IWL_TFH_NUM_TBS 25
|
||||
|
||||
/* IMR DMA registers */
|
||||
#define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c
|
||||
#define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520
|
||||
#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524
|
||||
#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528
|
||||
#define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c
|
||||
#define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530
|
||||
|
||||
/* RFH S2D DMA registers */
|
||||
#define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c
|
||||
#define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002
|
||||
|
||||
/* TFH D2S DMA registers */
|
||||
#define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000
|
||||
#define IMR_UREG_CHICK 0x00d05c00
|
||||
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000
|
||||
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030
|
||||
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000
|
||||
|
||||
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
|
||||
{
|
||||
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
|
||||
@ -707,14 +727,14 @@ struct iwlagn_scd_bc_tbl {
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_gen3_bc_tbl scheduler byte count table gen3
|
||||
* struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
|
||||
* For AX210 and on:
|
||||
* @tfd_offset: 0-12 - tx command byte count
|
||||
* 12-13 - number of 64 byte chunks
|
||||
* 14-16 - reserved
|
||||
*/
|
||||
struct iwl_gen3_bc_tbl {
|
||||
__le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
|
||||
struct iwl_gen3_bc_tbl_entry {
|
||||
__le16 tfd_offset;
|
||||
} __packed;
|
||||
|
||||
#endif /* !__iwl_fh_h__ */
|
||||
|
||||
@ -65,14 +65,14 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
|
||||
|
||||
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
|
||||
{
|
||||
u32 value = 0x5a5a5a5a;
|
||||
|
||||
if (iwl_trans_grab_nic_access(trans)) {
|
||||
value = iwl_read32(trans, reg);
|
||||
u32 value = iwl_read32(trans, reg);
|
||||
|
||||
iwl_trans_release_nic_access(trans);
|
||||
return value;
|
||||
}
|
||||
|
||||
return value;
|
||||
return 0x5a5a5a5a;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_read_direct32);
|
||||
|
||||
@ -135,13 +135,15 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
|
||||
|
||||
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
|
||||
{
|
||||
u32 val = 0x5a5a5a5a;
|
||||
|
||||
if (iwl_trans_grab_nic_access(trans)) {
|
||||
val = iwl_read_prph_no_grab(trans, ofs);
|
||||
u32 val = iwl_read_prph_no_grab(trans, ofs);
|
||||
|
||||
iwl_trans_release_nic_access(trans);
|
||||
|
||||
return val;
|
||||
}
|
||||
return val;
|
||||
|
||||
return 0x5a5a5a5a;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_read_prph);
|
||||
|
||||
|
||||
@ -375,10 +375,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
|
||||
if (v4)
|
||||
ch_flags =
|
||||
__le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx);
|
||||
__le32_to_cpup((const __le32 *)nvm_ch_flags + ch_idx);
|
||||
else
|
||||
ch_flags =
|
||||
__le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
|
||||
__le16_to_cpup((const __le16 *)nvm_ch_flags + ch_idx);
|
||||
|
||||
if (band == NL80211_BAND_5GHZ &&
|
||||
!data->sku_cap_band_52ghz_enable)
|
||||
@ -584,9 +584,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
|
||||
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
|
||||
.phy_cap_info[3] =
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
|
||||
.phy_cap_info[4] =
|
||||
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
|
||||
@ -654,9 +654,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
|
||||
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
|
||||
.phy_cap_info[3] =
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
|
||||
.phy_cap_info[6] =
|
||||
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
|
||||
@ -732,7 +732,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
|
||||
IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa);
|
||||
|
||||
/* we know it's writable - we set it before ourselves */
|
||||
iftype_data = (void *)sband->iftype_data;
|
||||
iftype_data = (void *)(uintptr_t)sband->iftype_data;
|
||||
for (i = 0; i < sband->n_iftype_data; i++)
|
||||
iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
|
||||
}
|
||||
@ -784,6 +784,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
|
||||
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
|
||||
case IWL_CFG_RF_TYPE_GF:
|
||||
case IWL_CFG_RF_TYPE_MR:
|
||||
case IWL_CFG_RF_TYPE_MS:
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
|
||||
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
|
||||
if (!is_ap)
|
||||
@ -912,7 +913,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
return le16_to_cpup(nvm_sw + SKU);
|
||||
|
||||
return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
|
||||
return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000));
|
||||
}
|
||||
|
||||
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
|
||||
@ -920,8 +921,8 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
return le16_to_cpup(nvm_sw + NVM_VERSION);
|
||||
else
|
||||
return le32_to_cpup((__le32 *)(nvm_sw +
|
||||
NVM_VERSION_EXT_NVM));
|
||||
return le32_to_cpup((const __le32 *)(nvm_sw +
|
||||
NVM_VERSION_EXT_NVM));
|
||||
}
|
||||
|
||||
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
@ -930,7 +931,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
return le16_to_cpup(nvm_sw + RADIO_CFG);
|
||||
|
||||
return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
|
||||
return le32_to_cpup((const __le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
|
||||
|
||||
}
|
||||
|
||||
@ -941,7 +942,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
|
||||
|
||||
n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
|
||||
n_hw_addr = le32_to_cpup((const __le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
|
||||
|
||||
return n_hw_addr & N_HW_ADDR_MASK;
|
||||
}
|
||||
@ -1080,7 +1081,9 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
|
||||
if (!trans->csme_own)
|
||||
IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n",
|
||||
data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1385,8 +1388,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
nvm_chan = iwl_nvm_channels;
|
||||
}
|
||||
|
||||
if (WARN_ON(num_of_ch > max_num_ch))
|
||||
if (num_of_ch > max_num_ch) {
|
||||
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
|
||||
"Num of channels (%d) is greater than expected. Truncating to %d\n",
|
||||
num_of_ch, max_num_ch);
|
||||
num_of_ch = max_num_ch;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -1592,7 +1599,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
eof = fw_entry->data + fw_entry->size;
|
||||
dword_buff = (__le32 *)fw_entry->data;
|
||||
dword_buff = (const __le32 *)fw_entry->data;
|
||||
|
||||
/* some NVM file will contain a header.
|
||||
* The header is identified by 2 dwords header as follow:
|
||||
@ -1604,7 +1611,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
|
||||
if (fw_entry->size > NVM_HEADER_SIZE &&
|
||||
dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
|
||||
dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
|
||||
file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
|
||||
file_sec = (const void *)(fw_entry->data + NVM_HEADER_SIZE);
|
||||
IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
|
||||
IWL_INFO(trans, "NVM Manufacturing date %08X\n",
|
||||
le32_to_cpu(dword_buff[3]));
|
||||
@ -1617,7 +1624,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
file_sec = (void *)fw_entry->data;
|
||||
file_sec = (const void *)fw_entry->data;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
@ -1685,7 +1692,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
|
||||
nvm_sections[section_id].length = section_size;
|
||||
|
||||
/* advance to the next section */
|
||||
file_sec = (void *)(file_sec->data + section_size);
|
||||
file_sec = (const void *)(file_sec->data + section_size);
|
||||
}
|
||||
out:
|
||||
release_firmware(fw_entry);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2020 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2020-2021 Intel Corporation
|
||||
* Copyright (C) 2016 Intel Deutschland GmbH
|
||||
*/
|
||||
#include <linux/slab.h>
|
||||
@ -13,8 +13,6 @@
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
|
||||
|
||||
struct iwl_phy_db_entry {
|
||||
u16 size;
|
||||
u8 *data;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -354,10 +354,10 @@
|
||||
#define WFPM_GP2 0xA030B4
|
||||
|
||||
/* DBGI SRAM Register details */
|
||||
#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C
|
||||
#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000
|
||||
#define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154
|
||||
#define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158
|
||||
#define DBGI_SRAM_FIFO_POINTERS 0x00A2E148
|
||||
#define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF
|
||||
|
||||
enum {
|
||||
ENABLE_WFPM = BIT(31),
|
||||
@ -386,6 +386,11 @@ enum {
|
||||
#define UREG_LMAC1_CURRENT_PC 0xa05c1c
|
||||
#define UREG_LMAC2_CURRENT_PC 0xa05c20
|
||||
|
||||
#define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c
|
||||
#define WFPM_ARC1_PD_NOTIFICATION 0xa03044
|
||||
#define HPM_SECONDARY_DEVICE_STATE 0xa03404
|
||||
|
||||
|
||||
/* For UMAG_GEN_HW_STATUS reg check */
|
||||
enum {
|
||||
UMAG_GEN_HW_IS_FPGA = BIT(1),
|
||||
@ -491,4 +496,6 @@ enum {
|
||||
#define HBUS_TIMEOUT 0xA5A5A5A1
|
||||
#define WFPM_DPHY_OFF 0xDF10FF
|
||||
|
||||
#define REG_OTP_MINOR 0xA0333C
|
||||
|
||||
#endif /* __iwl_prph_h__ */
|
||||
|
||||
@ -78,8 +78,12 @@ int iwl_trans_init(struct iwl_trans *trans)
|
||||
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
|
||||
return -EINVAL;
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
|
||||
trans->txqs.bc_tbl_size =
|
||||
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
|
||||
else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
trans->txqs.bc_tbl_size =
|
||||
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
|
||||
else
|
||||
trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
|
||||
/*
|
||||
@ -203,10 +207,10 @@ IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
|
||||
static int iwl_hcmd_names_cmp(const void *key, const void *elt)
|
||||
{
|
||||
const struct iwl_hcmd_names *name = elt;
|
||||
u8 cmd1 = *(u8 *)key;
|
||||
const u8 *cmd1 = key;
|
||||
u8 cmd2 = name->cmd_id;
|
||||
|
||||
return (cmd1 - cmd2);
|
||||
return (*cmd1 - cmd2);
|
||||
}
|
||||
|
||||
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -406,6 +406,9 @@ struct iwl_dump_sanitize_ops {
|
||||
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
|
||||
* space for at least two pointers
|
||||
* @fw_reset_handshake: firmware supports reset flow handshake
|
||||
* @queue_alloc_cmd_ver: queue allocation command version, set to 0
|
||||
* for using the older SCD_QUEUE_CFG, set to the version of
|
||||
* SCD_QUEUE_CONFIG_CMD otherwise.
|
||||
*/
|
||||
struct iwl_trans_config {
|
||||
struct iwl_op_mode *op_mode;
|
||||
@ -424,6 +427,7 @@ struct iwl_trans_config {
|
||||
|
||||
u8 cb_data_offs;
|
||||
bool fw_reset_handshake;
|
||||
u8 queue_alloc_cmd_ver;
|
||||
};
|
||||
|
||||
struct iwl_trans_dump_data {
|
||||
@ -569,10 +573,9 @@ struct iwl_trans_ops {
|
||||
void (*txq_disable)(struct iwl_trans *trans, int queue,
|
||||
bool configure_scd);
|
||||
/* 22000 functions */
|
||||
int (*txq_alloc)(struct iwl_trans *trans,
|
||||
__le16 flags, u8 sta_id, u8 tid,
|
||||
int cmd_id, int size,
|
||||
unsigned int queue_wdg_timeout);
|
||||
int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
|
||||
u32 sta_mask, u8 tid,
|
||||
int size, unsigned int queue_wdg_timeout);
|
||||
void (*txq_free)(struct iwl_trans *trans, int queue);
|
||||
int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
|
||||
struct iwl_trans_rxq_dma_data *data);
|
||||
@ -615,6 +618,10 @@ struct iwl_trans_ops {
|
||||
int (*set_reduce_power)(struct iwl_trans *trans,
|
||||
const void *data, u32 len);
|
||||
void (*interrupts)(struct iwl_trans *trans, bool enable);
|
||||
int (*imr_dma_data)(struct iwl_trans *trans,
|
||||
u32 dst_addr, u64 src_addr,
|
||||
u32 byte_cnt);
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
@ -721,6 +728,26 @@ struct iwl_self_init_dram {
|
||||
int paging_cnt;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_imr_data - imr dram data used during debug process
|
||||
* @imr_enable: imr enable status received from fw
|
||||
* @imr_size: imr dram size received from fw
|
||||
* @sram_addr: sram address from debug tlv
|
||||
* @sram_size: sram size from debug tlv
|
||||
* @imr2sram_remainbyte`: size remained after each dma transfer
|
||||
* @imr_curr_addr: current dst address used during dma transfer
|
||||
* @imr_base_addr: imr address received from fw
|
||||
*/
|
||||
struct iwl_imr_data {
|
||||
u32 imr_enable;
|
||||
u32 imr_size;
|
||||
u32 sram_addr;
|
||||
u32 sram_size;
|
||||
u32 imr2sram_remainbyte;
|
||||
u64 imr_curr_addr;
|
||||
__le64 imr_base_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans_debug - transport debug related data
|
||||
*
|
||||
@ -785,6 +812,7 @@ struct iwl_trans_debug {
|
||||
u32 ucode_preset;
|
||||
bool restart_required;
|
||||
u32 last_tp_resetfw;
|
||||
struct iwl_imr_data imr_data;
|
||||
};
|
||||
|
||||
struct iwl_dma_ptr {
|
||||
@ -904,6 +932,7 @@ struct iwl_txq {
|
||||
* @queue_used - bit mask of used queues
|
||||
* @queue_stopped - bit mask of stopped queues
|
||||
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
|
||||
* @queue_alloc_cmd_ver: queue allocation command version
|
||||
*/
|
||||
struct iwl_trans_txqs {
|
||||
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
||||
@ -929,6 +958,8 @@ struct iwl_trans_txqs {
|
||||
} tfd;
|
||||
|
||||
struct iwl_dma_ptr scd_bc_tbls;
|
||||
|
||||
u8 queue_alloc_cmd_ver;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1220,9 +1251,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
|
||||
|
||||
static inline int
|
||||
iwl_trans_txq_alloc(struct iwl_trans *trans,
|
||||
__le16 flags, u8 sta_id, u8 tid,
|
||||
int cmd_id, int size,
|
||||
unsigned int wdg_timeout)
|
||||
u32 flags, u32 sta_mask, u8 tid,
|
||||
int size, unsigned int wdg_timeout)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
@ -1234,8 +1264,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return trans->ops->txq_alloc(trans, flags, sta_id, tid,
|
||||
cmd_id, size, wdg_timeout);
|
||||
return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
|
||||
size, wdg_timeout);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
|
||||
@ -1368,6 +1398,15 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
|
||||
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
|
||||
} while (0)
|
||||
|
||||
static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
|
||||
u32 dst_addr, u64 src_addr,
|
||||
u32 byte_cnt)
|
||||
{
|
||||
if (trans->ops->imr_dma_data)
|
||||
return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
@ -312,7 +312,7 @@ static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
|
||||
memcpy(q_head + wr, hdr, tx_sz);
|
||||
} else {
|
||||
memcpy(q_head + wr, hdr, q_sz - wr);
|
||||
memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
|
||||
memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
|
||||
}
|
||||
|
||||
WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
|
||||
@ -432,7 +432,7 @@ void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
|
||||
u32 q_sz;
|
||||
u32 rd;
|
||||
u32 wr;
|
||||
void *q_head;
|
||||
u8 *q_head;
|
||||
|
||||
if (!iwl_mei_global_cldev)
|
||||
return;
|
||||
@ -1980,7 +1980,11 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
|
||||
}
|
||||
|
||||
static const struct mei_cl_device_id iwl_mei_tbl[] = {
|
||||
{ KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY},
|
||||
{
|
||||
.name = KBUILD_MODNAME,
|
||||
.uuid = MEI_WLAN_UUID,
|
||||
.version = MEI_CL_VERSION_ANY,
|
||||
},
|
||||
|
||||
/* required last entry */
|
||||
{ }
|
||||
|
||||
@ -102,8 +102,8 @@ static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
|
||||
* src IP address - 4 bytes
|
||||
* target MAC addess - 6 bytes
|
||||
*/
|
||||
target_ip = (void *)((u8 *)(arp + 1) +
|
||||
ETH_ALEN + sizeof(__be32) + ETH_ALEN);
|
||||
target_ip = (const void *)((const u8 *)(arp + 1) +
|
||||
ETH_ALEN + sizeof(__be32) + ETH_ALEN);
|
||||
|
||||
/*
|
||||
* ARP request is forwarded to ME only if IP address match in the
|
||||
|
||||
@ -31,7 +31,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
|
||||
memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
|
||||
mvmvif->rekey_data.akm = data->akm & 0xFF;
|
||||
mvmvif->rekey_data.replay_ctr =
|
||||
cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
|
||||
cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
|
||||
mvmvif->rekey_data.valid = true;
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
@ -453,8 +453,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_TSC_RSC_PARAM,
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
int ret;
|
||||
|
||||
@ -672,8 +671,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
|
||||
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
||||
};
|
||||
int i, err;
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_PATTERNS,
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
if (!wowlan->n_patterns)
|
||||
@ -921,8 +919,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
|
||||
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
|
||||
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_CONFIGURATION, 0) < 6) {
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
|
||||
/* Query the last used seqno and set it */
|
||||
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
|
||||
|
||||
@ -1017,8 +1014,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
||||
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_TKIP_PARAM,
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
struct wowlan_key_tkip_data tkip_data = {};
|
||||
int size;
|
||||
@ -1058,7 +1054,6 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
||||
};
|
||||
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
IWL_ALWAYS_LONG_GROUP,
|
||||
WOWLAN_KEK_KCK_MATERIAL,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
|
||||
@ -1089,7 +1084,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
||||
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
|
||||
/* skip the sta_id at the beginning */
|
||||
_kek_kck_cmd = (void *)
|
||||
((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
|
||||
((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id));
|
||||
}
|
||||
|
||||
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
|
||||
@ -1489,7 +1484,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
int pktsize = status->wake_packet_bufsize;
|
||||
int pktlen = status->wake_packet_length;
|
||||
const u8 *pktdata = status->wake_packet;
|
||||
struct ieee80211_hdr *hdr = (void *)pktdata;
|
||||
const struct ieee80211_hdr *hdr = (const void *)pktdata;
|
||||
int truncated = pktlen - pktsize;
|
||||
|
||||
/* this would be a firmware bug */
|
||||
@ -2074,8 +2069,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
|
||||
};
|
||||
int ret, len;
|
||||
u8 notif_ver;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_GET_STATUSES,
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
|
||||
@ -2182,8 +2176,7 @@ out_free_resp:
|
||||
static struct iwl_wowlan_status_data *
|
||||
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
|
||||
{
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
OFFLOADS_QUERY_CMD,
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
__le32 station_id = cpu_to_le32(sta_id);
|
||||
u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
|
||||
@ -2704,7 +2697,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
|
||||
|
||||
/* start pseudo D3 */
|
||||
rtnl_lock();
|
||||
wiphy_lock(mvm->hw->wiphy);
|
||||
err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
|
||||
wiphy_unlock(mvm->hw->wiphy);
|
||||
rtnl_unlock();
|
||||
if (err > 0)
|
||||
err = -EINVAL;
|
||||
@ -2760,7 +2755,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
|
||||
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
|
||||
|
||||
rtnl_lock();
|
||||
wiphy_lock(mvm->hw->wiphy);
|
||||
__iwl_mvm_resume(mvm, true);
|
||||
wiphy_unlock(mvm->hw->wiphy);
|
||||
rtnl_unlock();
|
||||
|
||||
iwl_mvm_resume_tcm(mvm);
|
||||
|
||||
@ -425,8 +425,7 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
|
||||
return -EINVAL;
|
||||
|
||||
/* only change from debug set <-> debug unset */
|
||||
if ((amsdu_len && mvmsta->orig_amsdu_len) ||
|
||||
(!!amsdu_len && mvmsta->orig_amsdu_len))
|
||||
if (amsdu_len && mvmsta->orig_amsdu_len)
|
||||
return -EBUSY;
|
||||
|
||||
if (amsdu_len) {
|
||||
@ -1661,7 +1660,7 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
|
||||
.mvm = mvm,
|
||||
};
|
||||
u16 wait_cmds[] = {
|
||||
iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0),
|
||||
WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
|
||||
};
|
||||
u32 aid;
|
||||
int ret;
|
||||
@ -1696,8 +1695,9 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
|
||||
wait_cmds, ARRAY_SIZE(wait_cmds),
|
||||
iwl_mvm_sniffer_apply, &apply);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
|
||||
DATA_PATH_GROUP, 0), 0,
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
|
||||
0,
|
||||
sizeof(he_mon_cmd), &he_mon_cmd);
|
||||
|
||||
/* no need to really wait, we already did anyway */
|
||||
@ -1914,8 +1914,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
|
||||
DEBUG_GROUP, 0);
|
||||
hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
|
||||
cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
|
||||
|
||||
/* Take care of alignment of both the position and the length */
|
||||
@ -1945,7 +1944,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
|
||||
ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len);
|
||||
*ppos += ret;
|
||||
|
||||
out:
|
||||
@ -1969,8 +1968,7 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
|
||||
DEBUG_GROUP, 0);
|
||||
hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
|
||||
|
||||
if (*ppos & 0x3 || count < 4) {
|
||||
op = DEBUG_MEM_OP_WRITE_BYTES;
|
||||
|
||||
@ -346,8 +346,8 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
|
||||
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
|
||||
break;
|
||||
case NL80211_CHAN_WIDTH_160:
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
|
||||
TOF_RANGE_REQ_CMD,
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
if (cmd_ver >= 13) {
|
||||
@ -548,7 +548,7 @@ static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
{
|
||||
struct iwl_tof_range_req_cmd_v5 cmd_v5;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
.dataflags[0] = IWL_HCMD_DFL_DUP,
|
||||
.data[0] = &cmd_v5,
|
||||
.len[0] = sizeof(cmd_v5),
|
||||
@ -574,7 +574,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
{
|
||||
struct iwl_tof_range_req_cmd_v7 cmd_v7;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
.dataflags[0] = IWL_HCMD_DFL_DUP,
|
||||
.data[0] = &cmd_v7,
|
||||
.len[0] = sizeof(cmd_v7),
|
||||
@ -604,7 +604,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
{
|
||||
struct iwl_tof_range_req_cmd_v8 cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
.dataflags[0] = IWL_HCMD_DFL_DUP,
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
@ -630,7 +630,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
{
|
||||
struct iwl_tof_range_req_cmd_v9 cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
.dataflags[0] = IWL_HCMD_DFL_DUP,
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
@ -728,7 +728,7 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_tof_range_req_cmd_v11 cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
.dataflags[0] = IWL_HCMD_DFL_DUP,
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
@ -799,7 +799,7 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_tof_range_req_cmd_v12 cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
.dataflags[0] = IWL_HCMD_DFL_DUP,
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
@ -827,7 +827,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_tof_range_req_cmd_v13 cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
.dataflags[0] = IWL_HCMD_DFL_DUP,
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
@ -877,8 +877,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
return -EBUSY;
|
||||
|
||||
if (new_api) {
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
|
||||
TOF_RANGE_REQ_CMD,
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
switch (cmd_ver) {
|
||||
@ -927,8 +927,7 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
|
||||
|
||||
iwl_mvm_ftm_reset(mvm);
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
|
||||
LOCATION_GROUP, 0),
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
|
||||
0, sizeof(cmd), &cmd))
|
||||
IWL_ERR(mvm, "failed to abort FTM process\n");
|
||||
}
|
||||
|
||||
@ -106,6 +106,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct cfg80211_chan_def *chandef)
|
||||
{
|
||||
u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
/*
|
||||
* The command structure is the same for versions 6, 7 and 8 (only the
|
||||
@ -120,8 +121,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
|
||||
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
|
||||
.sta_id = mvmvif->bcast_sta.sta_id,
|
||||
};
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
|
||||
TOF_RESPONDER_CONFIG_CMD, 6);
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6);
|
||||
int err;
|
||||
int cmd_size;
|
||||
|
||||
@ -161,9 +161,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
|
||||
|
||||
memcpy(cmd.bssid, vif->addr, ETH_ALEN);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
|
||||
LOCATION_GROUP, 0),
|
||||
0, cmd_size, &cmd);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -177,8 +175,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
|
||||
};
|
||||
u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
|
||||
LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
.data[1] = &data,
|
||||
@ -220,8 +217,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_tof_responder_dyn_config_cmd cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
|
||||
LOCATION_GROUP, 0),
|
||||
.id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
/* may not be able to DMA from stack */
|
||||
@ -278,8 +274,9 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
|
||||
struct ieee80211_ftm_responder_params *params)
|
||||
{
|
||||
int ret;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
|
||||
TOF_RESPONDER_DYN_CONFIG_CMD, 2);
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
|
||||
2);
|
||||
|
||||
switch (cmd_ver) {
|
||||
case 2:
|
||||
@ -320,8 +317,9 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
|
||||
.addr = addr,
|
||||
.hltk = hltk,
|
||||
};
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
|
||||
TOF_RESPONDER_DYN_CONFIG_CMD, 2);
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
|
||||
2);
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -25,8 +25,6 @@
|
||||
#define MVM_UCODE_ALIVE_TIMEOUT (HZ)
|
||||
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
|
||||
|
||||
#define UCODE_VALID_OK cpu_to_le32(0x1)
|
||||
|
||||
#define IWL_PPAG_MASK 3
|
||||
#define IWL_PPAG_ETSI_MASK BIT(0)
|
||||
|
||||
@ -79,7 +77,7 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
|
||||
struct iwl_dqa_enable_cmd dqa_cmd = {
|
||||
.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
|
||||
};
|
||||
u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
|
||||
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
|
||||
int ret;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
|
||||
@ -127,12 +125,30 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
|
||||
u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
|
||||
UCODE_ALIVE_NTFY, 0);
|
||||
|
||||
/*
|
||||
* For v5 and above, we can check the version, for older
|
||||
* versions we need to check the size.
|
||||
*/
|
||||
if (version == 5 || version == 6) {
|
||||
/* v5 and v6 are compatible (only IMR addition) */
|
||||
if (version == 6) {
|
||||
struct iwl_alive_ntf_v6 *palive;
|
||||
|
||||
if (pkt_len < sizeof(*palive))
|
||||
return false;
|
||||
|
||||
palive = (void *)pkt->data;
|
||||
mvm->trans->dbg.imr_data.imr_enable =
|
||||
le32_to_cpu(palive->imr.enabled);
|
||||
mvm->trans->dbg.imr_data.imr_size =
|
||||
le32_to_cpu(palive->imr.size);
|
||||
mvm->trans->dbg.imr_data.imr2sram_remainbyte =
|
||||
mvm->trans->dbg.imr_data.imr_size;
|
||||
mvm->trans->dbg.imr_data.imr_base_addr =
|
||||
palive->imr.base_addr;
|
||||
mvm->trans->dbg.imr_data.imr_curr_addr =
|
||||
le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
|
||||
IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
|
||||
mvm->trans->dbg.imr_data.imr_enable,
|
||||
mvm->trans->dbg.imr_data.imr_size,
|
||||
le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
|
||||
}
|
||||
|
||||
if (version >= 5) {
|
||||
struct iwl_alive_ntf_v5 *palive;
|
||||
|
||||
if (pkt_len < sizeof(*palive))
|
||||
@ -249,6 +265,26 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
enum iwl_device_family device_family = trans->trans_cfg->device_family;
|
||||
|
||||
if (device_family < IWL_DEVICE_FAMILY_8000)
|
||||
return;
|
||||
|
||||
if (device_family <= IWL_DEVICE_FAMILY_9000)
|
||||
IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n",
|
||||
iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION));
|
||||
else
|
||||
IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n",
|
||||
iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION));
|
||||
|
||||
IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n",
|
||||
iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE));
|
||||
|
||||
}
|
||||
|
||||
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
enum iwl_ucode_type ucode_type)
|
||||
{
|
||||
@ -314,6 +350,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
iwl_read_prph(trans, SB_CPU_2_STATUS));
|
||||
}
|
||||
|
||||
iwl_mvm_print_pd_notification(mvm);
|
||||
|
||||
/* LMAC/UMAC PC info */
|
||||
if (trans->trans_cfg->device_family >=
|
||||
IWL_DEVICE_FAMILY_9000) {
|
||||
@ -546,8 +584,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
|
||||
SAR_OFFSET_MAPPING_TABLE_CMD,
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
if (cmd_ver != 2) {
|
||||
@ -572,6 +609,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
|
||||
|
||||
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||
{
|
||||
u32 cmd_id = PHY_CONFIGURATION_CMD;
|
||||
struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
|
||||
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
|
||||
struct iwl_phy_specific_cfg phy_filters = {};
|
||||
@ -603,8 +641,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||
phy_cfg_cmd.calib_control.flow_trigger =
|
||||
mvm->fw->default_calib[ucode_type].flow_trigger;
|
||||
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
|
||||
PHY_CONFIGURATION_CMD,
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
if (cmd_ver == 3) {
|
||||
iwl_mvm_phy_filter_init(mvm, &phy_filters);
|
||||
@ -616,8 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||
phy_cfg_cmd.phy_cfg);
|
||||
cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
|
||||
sizeof(struct iwl_phy_cfg_cmd_v1);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
|
||||
cmd_size, &phy_cfg_cmd);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
|
||||
}
|
||||
|
||||
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
|
||||
@ -737,7 +773,7 @@ out:
|
||||
mvm->nvm_data->bands[0].n_channels = 1;
|
||||
mvm->nvm_data->bands[0].n_bitrates = 1;
|
||||
mvm->nvm_data->bands[0].bitrates =
|
||||
(void *)mvm->nvm_data->channels + 1;
|
||||
(void *)((u8 *)mvm->nvm_data->channels + 1);
|
||||
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
|
||||
}
|
||||
|
||||
@ -760,6 +796,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
|
||||
#ifdef CONFIG_ACPI
|
||||
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
|
||||
{
|
||||
u32 cmd_id = REDUCE_TX_POWER_CMD;
|
||||
struct iwl_dev_tx_power_cmd cmd = {
|
||||
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
|
||||
};
|
||||
@ -767,8 +804,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
|
||||
int ret;
|
||||
u16 len = 0;
|
||||
u32 n_subbands;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
REDUCE_TX_POWER_CMD,
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
if (cmd_ver == 6) {
|
||||
@ -805,7 +841,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
|
||||
iwl_mei_set_power_limit(per_chain);
|
||||
|
||||
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
|
||||
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
|
||||
}
|
||||
|
||||
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
||||
@ -814,9 +850,12 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
||||
struct iwl_geo_tx_power_profiles_resp *resp;
|
||||
u16 len;
|
||||
int ret;
|
||||
struct iwl_host_cmd cmd;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
|
||||
PER_CHAIN_LIMIT_OFFSET_CMD,
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
|
||||
.flags = CMD_WANT_SKB,
|
||||
.data = { &geo_tx_cmd },
|
||||
};
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
/* the ops field is at the same spot for all versions, so set in v1 */
|
||||
@ -838,12 +877,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
||||
if (!iwl_sar_geo_support(&mvm->fwrt))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cmd = (struct iwl_host_cmd){
|
||||
.id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
|
||||
.len = { len, },
|
||||
.flags = CMD_WANT_SKB,
|
||||
.data = { &geo_tx_cmd },
|
||||
};
|
||||
cmd.len[0] = len;
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &cmd);
|
||||
if (ret) {
|
||||
@ -863,14 +897,14 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
||||
|
||||
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
|
||||
union iwl_geo_tx_power_profiles_cmd cmd;
|
||||
u16 len;
|
||||
u32 n_bands;
|
||||
u32 n_profiles;
|
||||
u32 sk = 0;
|
||||
int ret;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
|
||||
PER_CHAIN_LIMIT_OFFSET_CMD,
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
|
||||
@ -948,10 +982,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
|
||||
IWL_UCODE_TLV_API_SAR_TABLE_VER))
|
||||
cmd.v2.table_revision = cpu_to_le32(sk);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(PHY_OPS_GROUP,
|
||||
PER_CHAIN_LIMIT_OFFSET_CMD),
|
||||
0, len, &cmd);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
|
||||
}
|
||||
|
||||
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
|
||||
@ -1070,8 +1101,8 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
|
||||
* use v1 to access it.
|
||||
*/
|
||||
cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags);
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
|
||||
PER_PLATFORM_ANT_GAIN_CMD,
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD),
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
if (cmd_ver == 1) {
|
||||
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
|
||||
@ -1205,11 +1236,12 @@ static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigne
|
||||
|
||||
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
|
||||
int ret;
|
||||
struct iwl_tas_config_cmd_v3 cmd = {};
|
||||
int cmd_size;
|
||||
union iwl_tas_config_cmd cmd = {};
|
||||
int cmd_size, fw_ver;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) <
|
||||
BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
|
||||
APCI_WTAS_BLACK_LIST_MAX);
|
||||
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
|
||||
@ -1217,7 +1249,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd);
|
||||
fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"TAS table invalid or unavailable. (%d)\n",
|
||||
@ -1232,25 +1267,24 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
|
||||
dmi_get_system_info(DMI_SYS_VENDOR));
|
||||
if ((!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
|
||||
&cmd.block_list_size, IWL_TAS_US_MCC)) ||
|
||||
(!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
|
||||
&cmd.block_list_size, IWL_TAS_CANADA_MCC))) {
|
||||
if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
|
||||
&cmd.v4.block_list_size,
|
||||
IWL_TAS_US_MCC)) ||
|
||||
(!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
|
||||
&cmd.v4.block_list_size,
|
||||
IWL_TAS_CANADA_MCC))) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"Unable to add US/Canada to TAS block list, disabling TAS\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
|
||||
TAS_CONFIG,
|
||||
IWL_FW_CMD_VER_UNKNOWN) < 3 ?
|
||||
/* v4 is the same size as v3, so no need to differentiate here */
|
||||
cmd_size = fw_ver < 3 ?
|
||||
sizeof(struct iwl_tas_config_cmd_v2) :
|
||||
sizeof(struct iwl_tas_config_cmd_v3);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
TAS_CONFIG),
|
||||
0, cmd_size, &cmd);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
|
||||
if (ret < 0)
|
||||
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
|
||||
}
|
||||
@ -1283,7 +1317,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
u32 value;
|
||||
struct iwl_lari_config_change_cmd_v5 cmd = {};
|
||||
struct iwl_lari_config_change_cmd_v6 cmd = {};
|
||||
|
||||
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
|
||||
|
||||
@ -1310,25 +1344,43 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
|
||||
if (!ret)
|
||||
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
|
||||
|
||||
ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
|
||||
DSM_FUNC_FORCE_DISABLE_CHANNELS,
|
||||
&iwl_guid, &value);
|
||||
if (!ret)
|
||||
cmd.force_disable_channels_bitmap = cpu_to_le32(value);
|
||||
|
||||
if (cmd.config_bitmap ||
|
||||
cmd.oem_uhb_allow_bitmap ||
|
||||
cmd.oem_11ax_allow_bitmap ||
|
||||
cmd.oem_unii4_allow_bitmap ||
|
||||
cmd.chan_state_active_bitmap) {
|
||||
cmd.chan_state_active_bitmap ||
|
||||
cmd.force_disable_channels_bitmap) {
|
||||
size_t cmd_size;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
REGULATORY_AND_NVM_GROUP,
|
||||
LARI_CONFIG_CHANGE, 1);
|
||||
if (cmd_ver == 5)
|
||||
WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
LARI_CONFIG_CHANGE),
|
||||
1);
|
||||
switch (cmd_ver) {
|
||||
case 6:
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
|
||||
break;
|
||||
case 5:
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
|
||||
else if (cmd_ver == 4)
|
||||
break;
|
||||
case 4:
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
|
||||
else if (cmd_ver == 3)
|
||||
break;
|
||||
case 3:
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
|
||||
else if (cmd_ver == 2)
|
||||
break;
|
||||
case 2:
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
|
||||
break;
|
||||
}
|
||||
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
|
||||
@ -1340,8 +1392,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
|
||||
le32_to_cpu(cmd.chan_state_active_bitmap),
|
||||
cmd_ver);
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n",
|
||||
le32_to_cpu(cmd.oem_uhb_allow_bitmap));
|
||||
"sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
|
||||
le32_to_cpu(cmd.oem_uhb_allow_bitmap),
|
||||
le32_to_cpu(cmd.force_disable_channels_bitmap));
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
LARI_CONFIG_CHANGE),
|
||||
@ -1641,9 +1694,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
* internal aux station for all aux activities that don't
|
||||
* requires a dedicated data queue.
|
||||
*/
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA,
|
||||
0) < 12) {
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
|
||||
/*
|
||||
* In old version the aux station uses mac id like other
|
||||
* station and not lmac id
|
||||
@ -1658,8 +1709,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
while (!sband && i < NUM_NL80211_BANDS)
|
||||
sband = mvm->hw->wiphy->bands[i++];
|
||||
|
||||
if (WARN_ON_ONCE(!sband))
|
||||
if (WARN_ON_ONCE(!sband)) {
|
||||
ret = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
|
||||
chan = &sband->channels[0];
|
||||
|
||||
@ -1800,9 +1853,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
|
||||
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA,
|
||||
0) < 12) {
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
|
||||
/*
|
||||
* Add auxiliary station for scanning.
|
||||
* Newer versions of this command implies that the fw uses
|
||||
|
||||
@ -821,10 +821,7 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
|
||||
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
|
||||
{
|
||||
u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
|
||||
bool is_new_rate = iwl_fw_lookup_cmd_ver(fw,
|
||||
LONG_GROUP,
|
||||
BEACON_TEMPLATE_CMD,
|
||||
0) > 10;
|
||||
bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
|
||||
|
||||
if (rate_idx <= IWL_FIRST_CCK_RATE)
|
||||
flags |= is_new_rate ? IWL_MAC_BEACON_CCK
|
||||
@ -960,8 +957,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
|
||||
WARN_ON(channel == 0);
|
||||
if (cfg80211_channel_is_psc(ctx->def.chan) &&
|
||||
!IWL_MVM_DISABLE_AP_FILS) {
|
||||
flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
BEACON_TEMPLATE_CMD,
|
||||
flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
|
||||
0) > 10 ?
|
||||
IWL_MAC_BEACON_FILS :
|
||||
IWL_MAC_BEACON_FILS_V1;
|
||||
@ -1458,8 +1454,9 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
|
||||
struct sk_buff *skb;
|
||||
u8 *data;
|
||||
u32 size = le32_to_cpu(sb->byte_count);
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP,
|
||||
STORED_BEACON_NTF, 0);
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF),
|
||||
0);
|
||||
|
||||
if (size == 0)
|
||||
return;
|
||||
@ -1602,6 +1599,18 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
|
||||
RCU_INIT_POINTER(mvm->csa_vif, NULL);
|
||||
return;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/*
|
||||
* if we don't know about an ongoing channel switch,
|
||||
* make sure FW cancels it
|
||||
*/
|
||||
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
|
||||
CHANNEL_SWITCH_ERROR_NOTIF,
|
||||
0) && !vif->csa_active) {
|
||||
IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n");
|
||||
iwl_mvm_cancel_channel_switch(mvm, vif, mac_id);
|
||||
break;
|
||||
}
|
||||
|
||||
iwl_mvm_csa_client_absent(mvm, vif);
|
||||
cancel_delayed_work(&mvmvif->csa_work);
|
||||
ieee80211_chswitch_done(vif, true);
|
||||
@ -1615,6 +1624,31 @@ out_unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
|
||||
struct ieee80211_vif *vif;
|
||||
u32 id = le32_to_cpu(notif->mac_id);
|
||||
u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
|
||||
|
||||
rcu_read_lock();
|
||||
vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
|
||||
if (!vif) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n",
|
||||
id, csa_err_mask);
|
||||
if (csa_err_mask & (CS_ERR_COUNT_ERROR |
|
||||
CS_ERR_LONG_DELAY_AFTER_CS |
|
||||
CS_ERR_TX_BLOCK_TIMER_EXPIRED))
|
||||
ieee80211_channel_switch_disconnect(vif, true);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -627,8 +627,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
|
||||
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
|
||||
WOWLAN_KEK_KCK_MATERIAL,
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
|
||||
IWL_FW_CMD_VER_UNKNOWN) == 3)
|
||||
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
|
||||
|
||||
@ -641,9 +640,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_oce_supported(mvm)) {
|
||||
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
IWL_ALWAYS_LONG_GROUP,
|
||||
SCAN_REQ_UMAC, 0);
|
||||
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0);
|
||||
|
||||
wiphy_ext_feature_set(hw->wiphy,
|
||||
NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
|
||||
@ -1233,7 +1230,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
||||
|
||||
/* async_handlers_wk is now blocked */
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12)
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12)
|
||||
iwl_mvm_rm_aux_sta(mvm);
|
||||
|
||||
iwl_mvm_stop_device(mvm);
|
||||
@ -1325,6 +1322,7 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
|
||||
static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
s16 tx_power)
|
||||
{
|
||||
u32 cmd_id = REDUCE_TX_POWER_CMD;
|
||||
int len;
|
||||
struct iwl_dev_tx_power_cmd cmd = {
|
||||
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
|
||||
@ -1332,8 +1330,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
|
||||
.common.pwr_restriction = cpu_to_le16(8 * tx_power),
|
||||
};
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
REDUCE_TX_POWER_CMD,
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
|
||||
@ -1353,7 +1350,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
/* all structs have the same common part, add it */
|
||||
len += sizeof(cmd.common);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
|
||||
}
|
||||
|
||||
static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
|
||||
@ -1414,6 +1411,15 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
|
||||
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
|
||||
};
|
||||
|
||||
/*
|
||||
* In the new flow since FW is in charge of the timing,
|
||||
* if driver has canceled the channel switch he will receive the
|
||||
* CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it
|
||||
*/
|
||||
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
|
||||
CHANNEL_SWITCH_ERROR_NOTIF, 0))
|
||||
return;
|
||||
|
||||
IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
@ -2080,11 +2086,108 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
|
||||
return res;
|
||||
}
|
||||
|
||||
static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
|
||||
struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
|
||||
u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* FW currently supports only nss == MAX_HE_SUPP_NSS
|
||||
*
|
||||
* If nss > MAX: we can ignore values we don't support
|
||||
* If nss < MAX: we can set zeros in other streams
|
||||
*/
|
||||
if (nss > MAX_HE_SUPP_NSS) {
|
||||
IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
|
||||
MAX_HE_SUPP_NSS);
|
||||
nss = MAX_HE_SUPP_NSS;
|
||||
}
|
||||
|
||||
for (i = 0; i < nss; i++) {
|
||||
u8 ru_index_tmp = ru_index_bitmap << 1;
|
||||
u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
|
||||
u8 bw;
|
||||
|
||||
for (bw = 0;
|
||||
bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
|
||||
bw++) {
|
||||
ru_index_tmp >>= 1;
|
||||
|
||||
if (!(ru_index_tmp & 1))
|
||||
continue;
|
||||
|
||||
high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
|
||||
ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
|
||||
low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
|
||||
ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
|
||||
|
||||
pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
|
||||
pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
struct iwl_he_pkt_ext_v2 *pkt_ext)
|
||||
{
|
||||
u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
|
||||
u8 *ppe = &sta->he_cap.ppe_thres[0];
|
||||
u8 ru_index_bitmap =
|
||||
u8_get_bits(*ppe,
|
||||
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
|
||||
/* Starting after PPE header */
|
||||
u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
|
||||
|
||||
iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit);
|
||||
}
|
||||
|
||||
static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
|
||||
u8 nominal_padding,
|
||||
u32 *flags)
|
||||
{
|
||||
int low_th = -1;
|
||||
int high_th = -1;
|
||||
int i;
|
||||
|
||||
switch (nominal_padding) {
|
||||
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
|
||||
low_th = IWL_HE_PKT_EXT_NONE;
|
||||
high_th = IWL_HE_PKT_EXT_NONE;
|
||||
break;
|
||||
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
|
||||
low_th = IWL_HE_PKT_EXT_BPSK;
|
||||
high_th = IWL_HE_PKT_EXT_NONE;
|
||||
break;
|
||||
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
|
||||
low_th = IWL_HE_PKT_EXT_NONE;
|
||||
high_th = IWL_HE_PKT_EXT_BPSK;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Set the PPE thresholds accordingly */
|
||||
if (low_th >= 0 && high_th >= 0) {
|
||||
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
|
||||
u8 bw;
|
||||
|
||||
for (bw = 0;
|
||||
bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
|
||||
bw++) {
|
||||
pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
|
||||
pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
|
||||
}
|
||||
}
|
||||
|
||||
*flags |= STA_CTXT_HE_PACKET_EXT;
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif, u8 sta_id)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
|
||||
struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = {
|
||||
.sta_id = sta_id,
|
||||
.tid_limit = IWL_MAX_TID_COUNT,
|
||||
.bss_color = vif->bss_conf.he_bss_color.color,
|
||||
@ -2092,16 +2195,39 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
||||
.frame_time_rts_th =
|
||||
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
|
||||
};
|
||||
int size = fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_MBSSID_HE) ?
|
||||
sizeof(sta_ctxt_cmd) :
|
||||
sizeof(struct iwl_he_sta_context_cmd_v1);
|
||||
struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {};
|
||||
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD);
|
||||
u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2);
|
||||
int size;
|
||||
struct ieee80211_sta *sta;
|
||||
u32 flags;
|
||||
int i;
|
||||
const struct ieee80211_sta_he_cap *own_he_cap = NULL;
|
||||
struct ieee80211_chanctx_conf *chanctx_conf;
|
||||
const struct ieee80211_supported_band *sband;
|
||||
void *cmd;
|
||||
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
|
||||
ver = 1;
|
||||
|
||||
switch (ver) {
|
||||
case 1:
|
||||
/* same layout as v2 except some data at the end */
|
||||
cmd = &sta_ctxt_cmd_v2;
|
||||
size = sizeof(struct iwl_he_sta_context_cmd_v1);
|
||||
break;
|
||||
case 2:
|
||||
cmd = &sta_ctxt_cmd_v2;
|
||||
size = sizeof(struct iwl_he_sta_context_cmd_v2);
|
||||
break;
|
||||
case 3:
|
||||
cmd = &sta_ctxt_cmd;
|
||||
size = sizeof(struct iwl_he_sta_context_cmd_v3);
|
||||
break;
|
||||
default:
|
||||
IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver);
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@ -2166,97 +2292,25 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
||||
* Initialize the PPE thresholds to "None" (7), as described in Table
|
||||
* 9-262ac of 80211.ax/D3.0.
|
||||
*/
|
||||
memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
|
||||
memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE,
|
||||
sizeof(sta_ctxt_cmd.pkt_ext));
|
||||
|
||||
/* If PPE Thresholds exist, parse them into a FW-familiar format. */
|
||||
if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
|
||||
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
|
||||
u8 nss = (sta->he_cap.ppe_thres[0] &
|
||||
IEEE80211_PPE_THRES_NSS_MASK) + 1;
|
||||
u8 ru_index_bitmap =
|
||||
(sta->he_cap.ppe_thres[0] &
|
||||
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
|
||||
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
|
||||
u8 *ppe = &sta->he_cap.ppe_thres[0];
|
||||
u8 ppe_pos_bit = 7; /* Starting after PPE header */
|
||||
|
||||
/*
|
||||
* FW currently supports only nss == MAX_HE_SUPP_NSS
|
||||
*
|
||||
* If nss > MAX: we can ignore values we don't support
|
||||
* If nss < MAX: we can set zeros in other streams
|
||||
*/
|
||||
if (nss > MAX_HE_SUPP_NSS) {
|
||||
IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
|
||||
MAX_HE_SUPP_NSS);
|
||||
nss = MAX_HE_SUPP_NSS;
|
||||
}
|
||||
|
||||
for (i = 0; i < nss; i++) {
|
||||
u8 ru_index_tmp = ru_index_bitmap << 1;
|
||||
u8 bw;
|
||||
|
||||
for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
|
||||
ru_index_tmp >>= 1;
|
||||
if (!(ru_index_tmp & 1))
|
||||
continue;
|
||||
|
||||
sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
|
||||
iwl_mvm_he_get_ppe_val(ppe,
|
||||
ppe_pos_bit);
|
||||
ppe_pos_bit +=
|
||||
IEEE80211_PPE_THRES_INFO_PPET_SIZE;
|
||||
sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
|
||||
iwl_mvm_he_get_ppe_val(ppe,
|
||||
ppe_pos_bit);
|
||||
ppe_pos_bit +=
|
||||
IEEE80211_PPE_THRES_INFO_PPET_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
|
||||
iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
|
||||
&sta_ctxt_cmd.pkt_ext);
|
||||
flags |= STA_CTXT_HE_PACKET_EXT;
|
||||
} else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
|
||||
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)
|
||||
!= IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) {
|
||||
int low_th = -1;
|
||||
int high_th = -1;
|
||||
|
||||
/* Take the PPE thresholds from the nominal padding info */
|
||||
switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
|
||||
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
|
||||
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
|
||||
low_th = IWL_HE_PKT_EXT_NONE;
|
||||
high_th = IWL_HE_PKT_EXT_NONE;
|
||||
break;
|
||||
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
|
||||
low_th = IWL_HE_PKT_EXT_BPSK;
|
||||
high_th = IWL_HE_PKT_EXT_NONE;
|
||||
break;
|
||||
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
|
||||
low_th = IWL_HE_PKT_EXT_NONE;
|
||||
high_th = IWL_HE_PKT_EXT_BPSK;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Set the PPE thresholds accordingly */
|
||||
if (low_th >= 0 && high_th >= 0) {
|
||||
struct iwl_he_pkt_ext *pkt_ext =
|
||||
(struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
|
||||
|
||||
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
|
||||
u8 bw;
|
||||
|
||||
for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
|
||||
bw++) {
|
||||
pkt_ext->pkt_ext_qam_th[i][bw][0] =
|
||||
low_th;
|
||||
pkt_ext->pkt_ext_qam_th[i][bw][1] =
|
||||
high_th;
|
||||
}
|
||||
}
|
||||
|
||||
flags |= STA_CTXT_HE_PACKET_EXT;
|
||||
}
|
||||
/* PPE Thresholds doesn't exist - set the API PPE values
|
||||
* according to Common Nominal Packet Padding fiels. */
|
||||
} else {
|
||||
u8 nominal_padding =
|
||||
u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
|
||||
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
|
||||
if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
|
||||
iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
|
||||
nominal_padding,
|
||||
&flags);
|
||||
}
|
||||
|
||||
if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
|
||||
@ -2319,9 +2373,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
||||
|
||||
sta_ctxt_cmd.flags = cpu_to_le32(flags);
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
|
||||
DATA_PATH_GROUP, 0),
|
||||
0, size, &sta_ctxt_cmd))
|
||||
if (ver < 3) {
|
||||
/* fields before pkt_ext */
|
||||
BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) !=
|
||||
offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext));
|
||||
memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd,
|
||||
offsetof(typeof(sta_ctxt_cmd), pkt_ext));
|
||||
|
||||
/* pkt_ext */
|
||||
for (i = 0;
|
||||
i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th);
|
||||
i++) {
|
||||
u8 bw;
|
||||
|
||||
for (bw = 0;
|
||||
bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]);
|
||||
bw++) {
|
||||
BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) !=
|
||||
sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw]));
|
||||
|
||||
memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw],
|
||||
&sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw],
|
||||
sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]));
|
||||
}
|
||||
}
|
||||
|
||||
/* fields after pkt_ext */
|
||||
BUILD_BUG_ON(sizeof(sta_ctxt_cmd) -
|
||||
offsetofend(typeof(sta_ctxt_cmd), pkt_ext) !=
|
||||
sizeof(sta_ctxt_cmd_v2) -
|
||||
offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext));
|
||||
memcpy((u8 *)&sta_ctxt_cmd_v2 +
|
||||
offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext),
|
||||
(u8 *)&sta_ctxt_cmd +
|
||||
offsetofend(typeof(sta_ctxt_cmd), pkt_ext),
|
||||
sizeof(sta_ctxt_cmd) -
|
||||
offsetofend(typeof(sta_ctxt_cmd), pkt_ext));
|
||||
sta_ctxt_cmd_v2.reserved3 = 0;
|
||||
}
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd))
|
||||
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
|
||||
}
|
||||
|
||||
@ -2537,11 +2628,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
||||
/*
|
||||
* We received a beacon from the associated AP so
|
||||
* remove the session protection.
|
||||
* A firmware with the new API will remove it automatically.
|
||||
*/
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
|
||||
iwl_mvm_stop_session_protection(mvm, vif);
|
||||
iwl_mvm_stop_session_protection(mvm, vif);
|
||||
|
||||
iwl_mvm_sf_update(mvm, vif, false);
|
||||
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
|
||||
@ -3190,7 +3278,7 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
|
||||
|
||||
if (he_cap) {
|
||||
/* we know that ours is writable */
|
||||
struct ieee80211_sta_he_cap *he = (void *)he_cap;
|
||||
struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
|
||||
|
||||
he->he_cap_elem.phy_cap_info[0] |=
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
|
||||
@ -4042,8 +4130,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
|
||||
/* Use aux roc framework (HS20) */
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA, 0) >= 12) {
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) {
|
||||
u32 lmac_id;
|
||||
|
||||
lmac_id = iwl_mvm_get_lmac_id(mvm->fw,
|
||||
@ -4846,6 +4933,15 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
|
||||
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/*
|
||||
* In the new flow FW is in charge of timing the switch so there
|
||||
* is no need for all of this
|
||||
*/
|
||||
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
|
||||
CHANNEL_SWITCH_ERROR_NOTIF,
|
||||
0))
|
||||
break;
|
||||
|
||||
/*
|
||||
* We haven't configured the firmware to be associated yet since
|
||||
* we don't know the dtim period. In this case, the firmware can't
|
||||
@ -4917,6 +5013,14 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
|
||||
.cs_mode = chsw->block_tx,
|
||||
};
|
||||
|
||||
/*
|
||||
* In the new flow FW is in charge of timing the switch so there is no
|
||||
* need for all of this
|
||||
*/
|
||||
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
|
||||
CHANNEL_SWITCH_ERROR_NOTIF, 0))
|
||||
return;
|
||||
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
|
||||
return;
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -1097,7 +1097,6 @@ struct iwl_mvm {
|
||||
} cmd_ver;
|
||||
|
||||
struct ieee80211_vif *nan_vif;
|
||||
#define IWL_MAX_BAID 32
|
||||
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
|
||||
|
||||
/*
|
||||
@ -1117,6 +1116,8 @@ struct iwl_mvm {
|
||||
|
||||
unsigned long last_6ghz_passive_scan_jiffies;
|
||||
unsigned long last_reset_or_resume_time_jiffies;
|
||||
|
||||
bool sta_remove_requires_queue_remove;
|
||||
};
|
||||
|
||||
/* Extract MVM priv from op_mode and _hw */
|
||||
@ -1684,6 +1685,8 @@ void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
/* Bindings */
|
||||
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
@ -1945,10 +1948,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
|
||||
|
||||
void iwl_mvm_stop_device(struct iwl_mvm *mvm);
|
||||
|
||||
/* Re-configure the SCD for a queue that has already been configured */
|
||||
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
int tid, int frame_limit, u16 ssn);
|
||||
|
||||
/* Thermal management and CT-kill */
|
||||
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
|
||||
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
|
||||
@ -2098,6 +2097,8 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
|
||||
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
|
||||
struct iwl_rfi_lut_entry *rfi_table);
|
||||
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
|
||||
void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
|
||||
static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
|
||||
{
|
||||
@ -2172,8 +2173,7 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
|
||||
|
||||
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
|
||||
{
|
||||
u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
|
||||
SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
|
||||
u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
|
||||
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
|
||||
|
||||
@ -47,8 +47,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
|
||||
struct iwl_proto_offload_cmd_common *common;
|
||||
u32 enabled = 0, size;
|
||||
u32 capa_flags = mvm->fw->ucode_capa.flags;
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
PROT_OFFLOAD_CONFIG_CMD, 0);
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
#include "iwl-prph.h"
|
||||
#include "rs.h"
|
||||
#include "fw/api/scan.h"
|
||||
#include "fw/api/rfi.h"
|
||||
#include "time-event.h"
|
||||
#include "fw-api.h"
|
||||
#include "fw/acpi.h"
|
||||
@ -32,6 +33,7 @@
|
||||
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
|
||||
MODULE_DESCRIPTION(DRV_DESCRIPTION);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS(IWLWIFI);
|
||||
|
||||
static const struct iwl_op_mode_ops iwl_mvm_ops;
|
||||
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
|
||||
@ -191,7 +193,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
|
||||
|
||||
if (he_cap) {
|
||||
/* we know that ours is writable */
|
||||
struct ieee80211_sta_he_cap *he = (void *)he_cap;
|
||||
struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
|
||||
|
||||
WARN_ON(!he->has_he);
|
||||
WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
|
||||
@ -235,7 +237,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
|
||||
*/
|
||||
mvm->fw_static_smps_request =
|
||||
req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
|
||||
ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
ieee80211_iterate_interfaces(mvm->hw,
|
||||
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
|
||||
iwl_mvm_intf_dual_chain_req, NULL);
|
||||
}
|
||||
|
||||
@ -382,6 +385,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
||||
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
|
||||
iwl_mvm_channel_switch_start_notif,
|
||||
RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
|
||||
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
|
||||
iwl_mvm_channel_switch_error_notif,
|
||||
RX_HANDLER_ASYNC_UNLOCKED,
|
||||
struct iwl_channel_switch_error_notif),
|
||||
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
|
||||
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
|
||||
struct iwl_datapath_monitor_notif),
|
||||
@ -390,6 +397,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
||||
iwl_mvm_rx_thermal_dual_chain_req,
|
||||
RX_HANDLER_ASYNC_LOCKED,
|
||||
struct iwl_thermal_dual_chain_request),
|
||||
|
||||
RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
|
||||
iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
|
||||
struct iwl_rfi_deactivate_notif),
|
||||
};
|
||||
#undef RX_HANDLER
|
||||
#undef RX_HANDLER_GRP
|
||||
@ -443,7 +454,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
|
||||
HCMD_NAME(POWER_TABLE_CMD),
|
||||
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
|
||||
HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
|
||||
HCMD_NAME(DC2DC_CONFIG_CMD),
|
||||
HCMD_NAME(NVM_ACCESS_CMD),
|
||||
HCMD_NAME(BEACON_NOTIFICATION),
|
||||
HCMD_NAME(BEACON_TEMPLATE_CMD),
|
||||
@ -500,6 +510,7 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
|
||||
HCMD_NAME(RFI_CONFIG_CMD),
|
||||
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
|
||||
HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
|
||||
HCMD_NAME(RFI_DEACTIVATE_NOTIF),
|
||||
};
|
||||
|
||||
/* Please keep this array *SORTED* by hex value.
|
||||
@ -536,6 +547,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
|
||||
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
|
||||
HCMD_NAME(TLC_MNG_CONFIG_CMD),
|
||||
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
|
||||
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
|
||||
HCMD_NAME(MONITOR_NOTIF),
|
||||
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
|
||||
HCMD_NAME(STA_PM_NOTIF),
|
||||
@ -634,13 +646,11 @@ unlock:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
||||
static int iwl_mvm_fwrt_dump_start(void *ctx)
|
||||
static void iwl_mvm_fwrt_dump_start(void *ctx)
|
||||
{
|
||||
struct iwl_mvm *mvm = ctx;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_mvm_fwrt_dump_end(void *ctx)
|
||||
@ -1245,6 +1255,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
|
||||
|
||||
trans_cfg.queue_alloc_cmd_ver =
|
||||
iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(DATA_PATH_GROUP,
|
||||
SCD_QUEUE_CONFIG_CMD),
|
||||
0);
|
||||
mvm->sta_remove_requires_queue_remove =
|
||||
trans_cfg.queue_alloc_cmd_ver > 0;
|
||||
|
||||
/* Configure transport layer */
|
||||
iwl_trans_configure(mvm->trans, &trans_cfg);
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -158,8 +158,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
|
||||
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
|
||||
|
||||
/* we only support RLC command version 2 */
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
|
||||
RLC_CONFIG_CMD, 0) < 2)
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
|
||||
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
|
||||
chains_static, chains_dynamic);
|
||||
}
|
||||
@ -172,8 +171,7 @@ static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm,
|
||||
.phy_id = cpu_to_le32(ctxt->id),
|
||||
};
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
|
||||
RLC_CONFIG_CMD, 0) < 2)
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
|
||||
return 0;
|
||||
|
||||
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE !=
|
||||
@ -209,8 +207,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
|
||||
u32 action)
|
||||
{
|
||||
int ret;
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
|
||||
PHY_CONTEXT_CMD, 1);
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
|
||||
|
||||
if (ver == 3 || ver == 4) {
|
||||
struct iwl_phy_context_cmd cmd = {};
|
||||
@ -301,8 +298,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
|
||||
RLC_CONFIG_CMD, 0) >= 2 &&
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 &&
|
||||
ctxt->channel == chandef->chan &&
|
||||
ctxt->width == chandef->width &&
|
||||
ctxt->center_freq1 == chandef->center_freq1)
|
||||
@ -349,19 +345,32 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
|
||||
* otherwise we might not be able to reuse this phy.
|
||||
*/
|
||||
if (ctxt->ref == 0) {
|
||||
struct ieee80211_channel *chan;
|
||||
struct ieee80211_channel *chan = NULL;
|
||||
struct cfg80211_chan_def chandef;
|
||||
struct ieee80211_supported_band *sband = NULL;
|
||||
enum nl80211_band band = NL80211_BAND_2GHZ;
|
||||
struct ieee80211_supported_band *sband;
|
||||
enum nl80211_band band;
|
||||
int channel;
|
||||
|
||||
while (!sband && band < NUM_NL80211_BANDS)
|
||||
sband = mvm->hw->wiphy->bands[band++];
|
||||
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
|
||||
sband = mvm->hw->wiphy->bands[band];
|
||||
|
||||
if (WARN_ON(!sband))
|
||||
if (!sband)
|
||||
continue;
|
||||
|
||||
for (channel = 0; channel < sband->n_channels; channel++)
|
||||
if (!(sband->channels[channel].flags &
|
||||
IEEE80211_CHAN_DISABLED)) {
|
||||
chan = &sband->channels[channel];
|
||||
break;
|
||||
}
|
||||
|
||||
if (chan)
|
||||
break;
|
||||
}
|
||||
|
||||
if (WARN_ON(!chan))
|
||||
return;
|
||||
|
||||
chan = &sband->channels[0];
|
||||
|
||||
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
|
||||
iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
|
||||
}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018, 2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
||||
@ -125,12 +125,19 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
|
||||
if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
|
||||
return ERR_PTR(-EIO);
|
||||
|
||||
resp = kzalloc(resp_size, GFP_KERNEL);
|
||||
resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
|
||||
if (!resp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
memcpy(resp, cmd.resp_pkt->data, resp_size);
|
||||
|
||||
iwl_free_resp(&cmd);
|
||||
return resp;
|
||||
}
|
||||
|
||||
void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data;
|
||||
|
||||
IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason);
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2018-2022 Intel Corporation
|
||||
*/
|
||||
#include "rs.h"
|
||||
#include "fw-api.h"
|
||||
@ -97,7 +97,10 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
|
||||
|
||||
if (he_cap->has_he &&
|
||||
(he_cap->he_cap_elem.phy_cap_info[3] &
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
|
||||
sband->iftype_data &&
|
||||
sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] &
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
|
||||
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
|
||||
|
||||
return flags;
|
||||
@ -420,7 +423,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct ieee80211_hw *hw = mvm->hw;
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
|
||||
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
|
||||
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
|
||||
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
|
||||
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
|
||||
struct iwl_tlc_config_cmd_v4 cfg_cmd = {
|
||||
@ -449,8 +452,22 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
*/
|
||||
sta->max_amsdu_len = max_amsdu_len;
|
||||
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
|
||||
TLC_MNG_CONFIG_CMD, 0);
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(DATA_PATH_GROUP,
|
||||
TLC_MNG_CONFIG_CMD),
|
||||
0);
|
||||
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
|
||||
cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
|
||||
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
|
||||
cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags);
|
||||
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n",
|
||||
cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op);
|
||||
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n",
|
||||
cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]);
|
||||
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n",
|
||||
cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]);
|
||||
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n",
|
||||
cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]);
|
||||
if (cmd_ver == 4) {
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
|
||||
sizeof(cfg_cmd), &cfg_cmd);
|
||||
@ -474,8 +491,9 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
u16 cmd_size = sizeof(cfg_cmd_v3);
|
||||
|
||||
/* In old versions of the API the struct is 4 bytes smaller */
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
|
||||
TLC_MNG_CONFIG_CMD, 0) < 3)
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(DATA_PATH_GROUP,
|
||||
TLC_MNG_CONFIG_CMD), 0) < 3)
|
||||
cmd_size -= 4;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
|
||||
|
||||
@ -454,8 +454,6 @@ static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
|
||||
{0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
|
||||
};
|
||||
|
||||
#define MCS_INDEX_PER_STREAM (8)
|
||||
|
||||
static const char *rs_pretty_lq_type(enum iwl_table_type type)
|
||||
{
|
||||
static const char * const lq_types[] = {
|
||||
|
||||
@ -83,8 +83,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
||||
fraglen = len - hdrlen;
|
||||
|
||||
if (fraglen) {
|
||||
int offset = (void *)hdr + hdrlen -
|
||||
rxb_addr(rxb) + rxb_offset(rxb);
|
||||
int offset = (u8 *)hdr + hdrlen -
|
||||
(u8 *)rxb_addr(rxb) + rxb_offset(rxb);
|
||||
|
||||
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
|
||||
fraglen, rxb->truesize);
|
||||
@ -640,7 +640,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
u16 vif_id = mvmvif->id;
|
||||
|
||||
if (WARN_ONCE(vif_id > MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
|
||||
if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
|
||||
return;
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION)
|
||||
|
||||
@ -217,8 +217,8 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
fraglen = len - headlen;
|
||||
|
||||
if (fraglen) {
|
||||
int offset = (void *)hdr + headlen + pad_len -
|
||||
rxb_addr(rxb) + rxb_offset(rxb);
|
||||
int offset = (u8 *)hdr + headlen + pad_len -
|
||||
(u8 *)rxb_addr(rxb) + rxb_offset(rxb);
|
||||
|
||||
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
|
||||
fraglen, rxb->truesize);
|
||||
|
||||
@ -20,7 +20,6 @@
|
||||
#define IWL_SCAN_DWELL_FRAGMENTED 44
|
||||
#define IWL_SCAN_DWELL_EXTENDED 90
|
||||
#define IWL_SCAN_NUM_OF_FRAGS 3
|
||||
#define IWL_SCAN_LAST_2_4_CHN 14
|
||||
|
||||
/* adaptive dwell max budget time [TU] for full scan */
|
||||
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
|
||||
@ -98,6 +97,7 @@ struct iwl_mvm_scan_params {
|
||||
u32 n_6ghz_params;
|
||||
bool scan_6ghz;
|
||||
bool enable_6ghz_passive;
|
||||
bool respect_p2p_go, respect_p2p_go_hb;
|
||||
};
|
||||
|
||||
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
|
||||
@ -169,17 +169,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
|
||||
return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
|
||||
}
|
||||
|
||||
static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int *global_cnt = data;
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
|
||||
mvmvif->phy_ctxt->id < NUM_PHY_CTX)
|
||||
*global_cnt += 1;
|
||||
}
|
||||
|
||||
static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
|
||||
{
|
||||
return mvm->tcm.result.global_load;
|
||||
@ -191,26 +180,31 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
|
||||
return mvm->tcm.result.band_load[band];
|
||||
}
|
||||
|
||||
struct iwl_is_dcm_with_go_iterator_data {
|
||||
struct iwl_mvm_scan_iter_data {
|
||||
u32 global_cnt;
|
||||
struct ieee80211_vif *current_vif;
|
||||
bool is_dcm_with_p2p_go;
|
||||
};
|
||||
|
||||
static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_is_dcm_with_go_iterator_data *data = _data;
|
||||
struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_vif *curr_mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(data->current_vif);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_scan_iter_data *data = _data;
|
||||
struct iwl_mvm_vif *curr_mvmvif;
|
||||
|
||||
/* exclude the given vif */
|
||||
if (vif == data->current_vif)
|
||||
if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
|
||||
mvmvif->phy_ctxt->id < NUM_PHY_CTX)
|
||||
data->global_cnt += 1;
|
||||
|
||||
if (!data->current_vif || vif == data->current_vif)
|
||||
return;
|
||||
|
||||
curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
|
||||
other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
|
||||
other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
|
||||
mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
|
||||
mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
|
||||
data->is_dcm_with_p2p_go = true;
|
||||
}
|
||||
|
||||
@ -220,13 +214,18 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
|
||||
enum iwl_mvm_traffic_load load,
|
||||
bool low_latency)
|
||||
{
|
||||
int global_cnt = 0;
|
||||
struct iwl_mvm_scan_iter_data data = {
|
||||
.current_vif = vif,
|
||||
.is_dcm_with_p2p_go = false,
|
||||
.global_cnt = 0,
|
||||
};
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_scan_condition_iterator,
|
||||
&global_cnt);
|
||||
if (!global_cnt)
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_scan_iterator,
|
||||
&data);
|
||||
|
||||
if (!data.global_cnt)
|
||||
return IWL_SCAN_TYPE_UNASSOC;
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
@ -235,23 +234,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
|
||||
(!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
|
||||
return IWL_SCAN_TYPE_FRAGMENTED;
|
||||
|
||||
/* in case of DCM with GO where BSS DTIM interval < 220msec
|
||||
/*
|
||||
* in case of DCM with GO where BSS DTIM interval < 220msec
|
||||
* set all scan requests as fast-balance scan
|
||||
* */
|
||||
*/
|
||||
if (vif && vif->type == NL80211_IFTYPE_STATION &&
|
||||
vif->bss_conf.dtim_period < 220) {
|
||||
struct iwl_is_dcm_with_go_iterator_data data = {
|
||||
.current_vif = vif,
|
||||
.is_dcm_with_p2p_go = false,
|
||||
};
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_is_dcm_with_go_iterator,
|
||||
&data);
|
||||
if (data.is_dcm_with_p2p_go)
|
||||
return IWL_SCAN_TYPE_FAST_BALANCE;
|
||||
}
|
||||
vif->bss_conf.dtim_period < 220 &&
|
||||
data.is_dcm_with_p2p_go)
|
||||
return IWL_SCAN_TYPE_FAST_BALANCE;
|
||||
}
|
||||
|
||||
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
|
||||
@ -651,9 +641,7 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
|
||||
NL80211_BAND_2GHZ,
|
||||
no_cck);
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA,
|
||||
0) < 12) {
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
|
||||
tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
|
||||
tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
|
||||
|
||||
@ -1090,8 +1078,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
|
||||
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
|
||||
|
||||
/* This function should not be called when using ADD_STA ver >=12 */
|
||||
WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA, 0) >= 12);
|
||||
WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
|
||||
|
||||
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
|
||||
cfg->channel_flags = channel_flags;
|
||||
@ -1142,8 +1129,7 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
|
||||
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
|
||||
|
||||
/* This function should not be called when using ADD_STA ver >=12 */
|
||||
WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA, 0) >= 12);
|
||||
WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
|
||||
|
||||
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
|
||||
cfg->channel_flags = channel_flags;
|
||||
@ -1156,7 +1142,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
|
||||
void *cfg;
|
||||
int ret, cmd_size;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
|
||||
.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
|
||||
};
|
||||
enum iwl_mvm_scan_type type;
|
||||
enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
|
||||
@ -1247,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_scan_config cfg;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
|
||||
.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
|
||||
.len[0] = sizeof(cfg),
|
||||
.data[0] = &cfg,
|
||||
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
||||
@ -1258,11 +1244,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA, 0) < 12) {
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
|
||||
cfg.bcast_sta_id = mvm->aux_sta.sta_id;
|
||||
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
SCAN_CFG_CMD, 0) < 5) {
|
||||
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
|
||||
/*
|
||||
* Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
|
||||
* version 5.
|
||||
@ -1662,7 +1646,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
static void
|
||||
iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_scan_params *params,
|
||||
struct iwl_scan_probe_params_v4 *pp)
|
||||
@ -1731,31 +1715,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
|
||||
|
||||
pp->short_ssid_num = idex_s;
|
||||
pp->bssid_num = idex_b;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */
|
||||
static void
|
||||
iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
static u32
|
||||
iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_scan_params *params,
|
||||
u32 n_channels,
|
||||
struct iwl_scan_probe_params_v4 *pp,
|
||||
struct iwl_scan_channel_params_v6 *cp,
|
||||
enum nl80211_iftype vif_type)
|
||||
{
|
||||
struct iwl_scan_channel_cfg_umac *channel_cfg = cp->channel_config;
|
||||
int i;
|
||||
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
|
||||
params->scan_6ghz_params;
|
||||
u32 ch_cnt;
|
||||
|
||||
for (i = 0; i < params->n_channels; i++) {
|
||||
for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
|
||||
struct iwl_scan_channel_cfg_umac *cfg =
|
||||
&cp->channel_config[i];
|
||||
&cp->channel_config[ch_cnt];
|
||||
|
||||
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
|
||||
u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
|
||||
bool force_passive, found = false, allow_passive = true,
|
||||
unsolicited_probe_on_chan = false, psc_no_listen = false;
|
||||
|
||||
/*
|
||||
* Avoid performing passive scan on non PSC channels unless the
|
||||
* scan is specifically a passive scan, i.e., no SSIDs
|
||||
* configured in the scan command.
|
||||
*/
|
||||
if (!cfg80211_channel_is_psc(params->channels[i]) &&
|
||||
!params->n_6ghz_params && params->n_ssids)
|
||||
continue;
|
||||
|
||||
cfg->v1.channel_num = params->channels[i]->hw_value;
|
||||
cfg->v2.band = 2;
|
||||
cfg->v2.iter_count = 1;
|
||||
@ -1875,8 +1868,16 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
else
|
||||
flags |= bssid_bitmap | (s_ssid_bitmap << 16);
|
||||
|
||||
channel_cfg[i].flags |= cpu_to_le32(flags);
|
||||
cfg->flags |= cpu_to_le32(flags);
|
||||
ch_cnt++;
|
||||
}
|
||||
|
||||
if (params->n_channels > ch_cnt)
|
||||
IWL_DEBUG_SCAN(mvm,
|
||||
"6GHz: reducing number channels: (%u->%u)\n",
|
||||
params->n_channels, ch_cnt);
|
||||
|
||||
return ch_cnt;
|
||||
}
|
||||
|
||||
static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
|
||||
@ -1893,9 +1894,25 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
|
||||
|
||||
/* set fragmented ebs for fragmented scan on HB channels */
|
||||
if (iwl_mvm_is_scan_fragmented(params->hb_type))
|
||||
if ((!iwl_mvm_is_cdb_supported(mvm) &&
|
||||
iwl_mvm_is_scan_fragmented(params->type)) ||
|
||||
(iwl_mvm_is_cdb_supported(mvm) &&
|
||||
iwl_mvm_is_scan_fragmented(params->hb_type)))
|
||||
flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
|
||||
|
||||
/*
|
||||
* force EBS in case the scan is a fragmented and there is a need to take P2P
|
||||
* GO operation into consideration during scan operation.
|
||||
*/
|
||||
if ((!iwl_mvm_is_cdb_supported(mvm) &&
|
||||
iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
|
||||
(iwl_mvm_is_cdb_supported(mvm) &&
|
||||
iwl_mvm_is_scan_fragmented(params->hb_type) &&
|
||||
params->respect_p2p_go_hb)) {
|
||||
IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
|
||||
flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
@ -2046,6 +2063,26 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
|
||||
return flags;
|
||||
}
|
||||
|
||||
static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_scan_params *params,
|
||||
struct ieee80211_vif *vif, int type)
|
||||
{
|
||||
u8 flags = 0;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
if (params->respect_p2p_go)
|
||||
flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
|
||||
if (params->respect_p2p_go_hb)
|
||||
flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
|
||||
} else {
|
||||
if (params->respect_p2p_go)
|
||||
flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
|
||||
IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_scan_params *params,
|
||||
struct ieee80211_vif *vif)
|
||||
@ -2164,7 +2201,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
|
||||
struct iwl_scan_umac_chan_param *chan_param;
|
||||
void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
|
||||
void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
|
||||
void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
|
||||
mvm->fw->ucode_capa.n_scan_channels;
|
||||
struct iwl_scan_req_umac_tail_v2 *tail_v2 =
|
||||
(struct iwl_scan_req_umac_tail_v2 *)sec_part;
|
||||
@ -2248,13 +2285,17 @@ iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_scan_params *params,
|
||||
struct ieee80211_vif *vif,
|
||||
struct iwl_scan_general_params_v11 *gp,
|
||||
u16 gen_flags)
|
||||
u16 gen_flags, u8 gen_flags2)
|
||||
{
|
||||
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
|
||||
|
||||
IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n",
|
||||
gen_flags, gen_flags2);
|
||||
|
||||
gp->flags = cpu_to_le16(gen_flags);
|
||||
gp->flags2 = gen_flags2;
|
||||
|
||||
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
|
||||
gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
|
||||
@ -2358,7 +2399,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
|
||||
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
|
||||
&scan_p->general_params,
|
||||
gen_flags);
|
||||
gen_flags, 0);
|
||||
|
||||
ret = iwl_mvm_fill_scan_sched_params(params,
|
||||
scan_p->periodic_params.schedule,
|
||||
@ -2384,6 +2425,7 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
|
||||
struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
|
||||
int ret;
|
||||
u16 gen_flags;
|
||||
u8 gen_flags2;
|
||||
u32 bitmap_ssid = 0;
|
||||
|
||||
mvm->scan_uid_status[uid] = type;
|
||||
@ -2392,9 +2434,15 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
|
||||
cmd->uid = cpu_to_le32(uid);
|
||||
|
||||
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
|
||||
|
||||
if (version >= 15)
|
||||
gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
|
||||
else
|
||||
gen_flags2 = 0;
|
||||
|
||||
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
|
||||
&scan_p->general_params,
|
||||
gen_flags);
|
||||
gen_flags, gen_flags2);
|
||||
|
||||
ret = iwl_mvm_fill_scan_sched_params(params,
|
||||
scan_p->periodic_params.schedule,
|
||||
@ -2417,14 +2465,16 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
|
||||
cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
|
||||
cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
|
||||
|
||||
ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
|
||||
if (ret)
|
||||
return ret;
|
||||
iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
|
||||
|
||||
cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params,
|
||||
params->n_channels,
|
||||
pb, cp, vif->type);
|
||||
if (!cp->count) {
|
||||
mvm->scan_uid_status[uid] = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
iwl_mvm_umac_scan_cfg_channels_v6_6g(params,
|
||||
params->n_channels,
|
||||
pb, cp, vif->type);
|
||||
cp->count = params->n_channels;
|
||||
if (!params->n_ssids ||
|
||||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
|
||||
cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
|
||||
@ -2588,10 +2638,9 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
|
||||
if (uid < 0)
|
||||
return uid;
|
||||
|
||||
hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
|
||||
hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
|
||||
|
||||
scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
|
||||
SCAN_REQ_UMAC,
|
||||
scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
|
||||
@ -2611,6 +2660,85 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
|
||||
return uid;
|
||||
}
|
||||
|
||||
struct iwl_mvm_scan_respect_p2p_go_iter_data {
|
||||
struct ieee80211_vif *current_vif;
|
||||
bool p2p_go;
|
||||
enum nl80211_band band;
|
||||
};
|
||||
|
||||
static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
/* exclude the given vif */
|
||||
if (vif == data->current_vif)
|
||||
return;
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
|
||||
mvmvif->phy_ctxt->id < NUM_PHY_CTX &&
|
||||
(data->band == NUM_NL80211_BANDS ||
|
||||
mvmvif->phy_ctxt->channel->band == data->band))
|
||||
data->p2p_go = true;
|
||||
}
|
||||
|
||||
static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
bool low_latency,
|
||||
enum nl80211_band band)
|
||||
{
|
||||
struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
|
||||
.current_vif = vif,
|
||||
.p2p_go = false,
|
||||
.band = band,
|
||||
};
|
||||
|
||||
if (!low_latency)
|
||||
return false;
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_scan_respect_p2p_go_iter,
|
||||
&data);
|
||||
|
||||
return data.p2p_go;
|
||||
}
|
||||
|
||||
static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
enum nl80211_band band)
|
||||
{
|
||||
bool low_latency = iwl_mvm_low_latency_band(mvm, band);
|
||||
|
||||
return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
|
||||
}
|
||||
|
||||
static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
bool low_latency = iwl_mvm_low_latency(mvm);
|
||||
|
||||
return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
|
||||
NUM_NL80211_BANDS);
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_scan_params *params,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
params->respect_p2p_go =
|
||||
iwl_mvm_get_respect_p2p_go_band(mvm, vif,
|
||||
NL80211_BAND_2GHZ);
|
||||
params->respect_p2p_go_hb =
|
||||
iwl_mvm_get_respect_p2p_go_band(mvm, vif,
|
||||
NL80211_BAND_5GHZ);
|
||||
} else {
|
||||
params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct cfg80211_scan_request *req,
|
||||
struct ieee80211_scan_ies *ies)
|
||||
@ -2662,6 +2790,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
params.scan_6ghz_params = req->scan_6ghz_params;
|
||||
params.scan_6ghz = req->scan_6ghz;
|
||||
iwl_mvm_fill_scan_type(mvm, ¶ms, vif);
|
||||
iwl_mvm_fill_respect_p2p_go(mvm, ¶ms, vif);
|
||||
|
||||
if (req->duration)
|
||||
params.iter_notif = true;
|
||||
@ -2753,6 +2882,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
|
||||
params.scan_plans = req->scan_plans;
|
||||
|
||||
iwl_mvm_fill_scan_type(mvm, ¶ms, vif);
|
||||
iwl_mvm_fill_respect_p2p_go(mvm, ¶ms, vif);
|
||||
|
||||
/* In theory, LMAC scans can handle a 32-bit delay, but since
|
||||
* waiting for over 18 hours to start the scan is a bit silly
|
||||
@ -2922,8 +3052,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
|
||||
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(SCAN_ABORT_UMAC,
|
||||
IWL_ALWAYS_LONG_GROUP, 0),
|
||||
WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
|
||||
0, sizeof(cmd), &cmd);
|
||||
if (!ret)
|
||||
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
|
||||
@ -2978,8 +3107,7 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
|
||||
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
|
||||
{
|
||||
int base_size, tail_size;
|
||||
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
|
||||
SCAN_REQ_UMAC,
|
||||
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
base_size = iwl_scan_req_umac_get_size(scan_ver);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2015, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2012-2015, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -317,7 +317,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
||||
}
|
||||
|
||||
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
u16 *queueptr, u8 tid, u8 flags)
|
||||
u16 *queueptr, u8 tid)
|
||||
{
|
||||
int queue = *queueptr;
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
@ -326,11 +326,28 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
};
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
if (mvm->sta_remove_requires_queue_remove) {
|
||||
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
|
||||
SCD_QUEUE_CONFIG_CMD);
|
||||
struct iwl_scd_queue_cfg_cmd remove_cmd = {
|
||||
.operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
|
||||
.u.remove.queue = cpu_to_le32(queue),
|
||||
};
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
|
||||
sizeof(remove_cmd),
|
||||
&remove_cmd);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
iwl_trans_txq_free(mvm->trans, queue);
|
||||
*queueptr = IWL_MVM_INVALID_QUEUE;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
|
||||
@ -374,7 +391,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
mvm->queue_info[queue].reserved = false;
|
||||
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
|
||||
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
|
||||
|
||||
if (ret)
|
||||
@ -513,7 +530,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
||||
disable_agg_tids, false);
|
||||
|
||||
ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
|
||||
ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm,
|
||||
"Failed to free inactive queue %d (ret=%d)\n",
|
||||
@ -597,6 +614,39 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
|
||||
return queue;
|
||||
}
|
||||
|
||||
/* Re-configure the SCD for a queue that has already been configured */
|
||||
static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
|
||||
int sta_id, int tid, int frame_limit, u16 ssn)
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.action = SCD_CFG_ENABLE_QUEUE,
|
||||
.window = frame_limit,
|
||||
.sta_id = sta_id,
|
||||
.ssn = cpu_to_le16(ssn),
|
||||
.tx_fifo = fifo,
|
||||
.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
|
||||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
|
||||
.tid = tid,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
|
||||
"Trying to reconfig unallocated queue %d\n", queue))
|
||||
return -ENXIO;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
|
||||
WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
|
||||
queue, fifo, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a given queue has a higher AC than the TID stream that is being compared
|
||||
* to, the queue needs to be redirected to the lower AC. This function does that
|
||||
@ -717,21 +767,40 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
|
||||
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
|
||||
u8 sta_id, u8 tid, unsigned int timeout)
|
||||
{
|
||||
int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
mvm->trans->cfg->min_256_ba_txq_size);
|
||||
int queue, size;
|
||||
|
||||
if (tid == IWL_MAX_TID_COUNT) {
|
||||
tid = IWL_MGMT_TID;
|
||||
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
|
||||
mvm->trans->cfg->min_txq_size);
|
||||
} else {
|
||||
struct ieee80211_sta *sta;
|
||||
|
||||
rcu_read_lock();
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
||||
|
||||
/* this queue isn't used for traffic (cab_queue) */
|
||||
if (IS_ERR_OR_NULL(sta)) {
|
||||
size = IWL_MGMT_QUEUE_SIZE;
|
||||
} else if (sta->he_cap.has_he) {
|
||||
/* support for 256 ba size */
|
||||
size = IWL_DEFAULT_QUEUE_SIZE_HE;
|
||||
} else {
|
||||
size = IWL_DEFAULT_QUEUE_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
do {
|
||||
__le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
|
||||
/* take the min with bc tbl entries allowed */
|
||||
size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
|
||||
|
||||
queue = iwl_trans_txq_alloc(mvm->trans, enable,
|
||||
sta_id, tid, SCD_QUEUE_CFG,
|
||||
size, timeout);
|
||||
/* size needs to be power of 2 values for calculating read/write pointers */
|
||||
size = rounddown_pow_of_two(size);
|
||||
|
||||
do {
|
||||
queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
|
||||
tid, size, timeout);
|
||||
|
||||
if (queue < 0)
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
@ -1020,12 +1089,12 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
||||
* Remove the ones that did.
|
||||
*/
|
||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
u16 tid_bitmap;
|
||||
u16 q_tid_bitmap;
|
||||
|
||||
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
||||
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
|
||||
/*
|
||||
* We need to take into account a situation in which a TXQ was
|
||||
@ -1038,7 +1107,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
||||
* Mark this queue in the right bitmap, we'll send the command
|
||||
* to the firmware later.
|
||||
*/
|
||||
if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
|
||||
if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
|
||||
set_bit(queue, changetid_queues);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
@ -1338,7 +1407,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
|
||||
out_err:
|
||||
queue_tmp = queue;
|
||||
iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
|
||||
iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1517,8 +1586,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.sta_id = sta->sta_id;
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
|
||||
0) >= 12 &&
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
|
||||
sta->type == IWL_STA_AUX_ACTIVITY)
|
||||
cmd.mac_id_n_color = cpu_to_le32(mac_id);
|
||||
else
|
||||
@ -1785,8 +1853,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
||||
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
|
||||
continue;
|
||||
|
||||
iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
|
||||
0);
|
||||
iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
|
||||
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
|
||||
@ -1994,7 +2061,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
|
||||
if (ret) {
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_mvm_disable_txq(mvm, NULL, queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
IWL_MAX_TID_COUNT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2066,7 +2133,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
|
||||
return -EINVAL;
|
||||
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
@ -2083,7 +2150,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
|
||||
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
|
||||
return -EINVAL;
|
||||
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
@ -2200,7 +2267,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
||||
}
|
||||
|
||||
queue = *queueptr;
|
||||
iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return;
|
||||
|
||||
@ -2435,7 +2502,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
|
||||
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
|
||||
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
|
||||
if (ret)
|
||||
@ -2444,8 +2511,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define IWL_MAX_RX_BA_SESSIONS 16
|
||||
|
||||
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
|
||||
{
|
||||
struct iwl_mvm_delba_data notif = {
|
||||
@ -2527,18 +2592,126 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta,
|
||||
bool start, int tid, u16 ssn,
|
||||
u16 buf_size)
|
||||
{
|
||||
struct iwl_mvm_add_sta_cmd cmd = {
|
||||
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
|
||||
.sta_id = mvm_sta->sta_id,
|
||||
.add_modify = STA_MODE_MODIFY,
|
||||
};
|
||||
u32 status;
|
||||
int ret;
|
||||
|
||||
if (start) {
|
||||
cmd.add_immediate_ba_tid = tid;
|
||||
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
|
||||
cmd.rx_ba_window = cpu_to_le16(buf_size);
|
||||
cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
|
||||
} else {
|
||||
cmd.remove_immediate_ba_tid = tid;
|
||||
cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
|
||||
}
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
|
||||
iwl_mvm_add_sta_cmd_size(mvm),
|
||||
&cmd, &status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (status & IWL_ADD_STA_STATUS_MASK) {
|
||||
case ADD_STA_SUCCESS:
|
||||
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
|
||||
start ? "start" : "stopp");
|
||||
if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
|
||||
!(status & IWL_ADD_STA_BAID_VALID_MASK)))
|
||||
return -EINVAL;
|
||||
return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
|
||||
case ADD_STA_IMMEDIATE_BA_FAILURE:
|
||||
IWL_WARN(mvm, "RX BA Session refused by fw\n");
|
||||
return -ENOSPC;
|
||||
default:
|
||||
IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
|
||||
start ? "start" : "stopp", status);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta,
|
||||
bool start, int tid, u16 ssn,
|
||||
u16 buf_size, int baid)
|
||||
{
|
||||
struct iwl_rx_baid_cfg_cmd cmd = {
|
||||
.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
|
||||
cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
|
||||
};
|
||||
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
|
||||
|
||||
if (start) {
|
||||
cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
|
||||
cmd.alloc.tid = tid;
|
||||
cmd.alloc.ssn = cpu_to_le16(ssn);
|
||||
cmd.alloc.win_size = cpu_to_le16(buf_size);
|
||||
baid = -EIO;
|
||||
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
|
||||
cmd.remove_v1.baid = cpu_to_le32(baid);
|
||||
BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
|
||||
} else {
|
||||
cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
|
||||
cmd.remove.tid = cpu_to_le32(tid);
|
||||
}
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
|
||||
&cmd, &baid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!start) {
|
||||
/* ignore firmware baid on remove */
|
||||
baid = 0;
|
||||
}
|
||||
|
||||
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
|
||||
start ? "start" : "stopp");
|
||||
|
||||
if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
|
||||
return -EINVAL;
|
||||
|
||||
return baid;
|
||||
}
|
||||
|
||||
static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
|
||||
bool start, int tid, u16 ssn, u16 buf_size,
|
||||
int baid)
|
||||
{
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
|
||||
return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
|
||||
tid, ssn, buf_size, baid);
|
||||
|
||||
return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
|
||||
tid, ssn, buf_size);
|
||||
}
|
||||
|
||||
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
|
||||
{
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_add_sta_cmd cmd = {};
|
||||
struct iwl_mvm_baid_data *baid_data = NULL;
|
||||
int ret;
|
||||
u32 status;
|
||||
int ret, baid;
|
||||
u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
|
||||
IWL_MAX_BAID_OLD;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
|
||||
if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
|
||||
IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
@ -2584,59 +2757,29 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
reorder_buf_size / sizeof(baid_data->entries[0]);
|
||||
}
|
||||
|
||||
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
|
||||
cmd.sta_id = mvm_sta->sta_id;
|
||||
cmd.add_modify = STA_MODE_MODIFY;
|
||||
if (start) {
|
||||
cmd.add_immediate_ba_tid = (u8) tid;
|
||||
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
|
||||
cmd.rx_ba_window = cpu_to_le16(buf_size);
|
||||
if (iwl_mvm_has_new_rx_api(mvm) && !start) {
|
||||
baid = mvm_sta->tid_to_baid[tid];
|
||||
} else {
|
||||
cmd.remove_immediate_ba_tid = (u8) tid;
|
||||
}
|
||||
cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
|
||||
STA_MODIFY_REMOVE_BA_TID;
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
|
||||
iwl_mvm_add_sta_cmd_size(mvm),
|
||||
&cmd, &status);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
switch (status & IWL_ADD_STA_STATUS_MASK) {
|
||||
case ADD_STA_SUCCESS:
|
||||
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
|
||||
start ? "start" : "stopp");
|
||||
break;
|
||||
case ADD_STA_IMMEDIATE_BA_FAILURE:
|
||||
IWL_WARN(mvm, "RX BA Session refused by fw\n");
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
default:
|
||||
ret = -EIO;
|
||||
IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
|
||||
start ? "start" : "stopp", status);
|
||||
break;
|
||||
/* we don't really need it in this case */
|
||||
baid = -1;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
/* Don't send command to remove (start=0) BAID during restart */
|
||||
if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
|
||||
baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
|
||||
baid);
|
||||
|
||||
if (baid < 0) {
|
||||
ret = baid;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (start) {
|
||||
u8 baid;
|
||||
|
||||
mvm->rx_ba_sessions++;
|
||||
|
||||
if (!iwl_mvm_has_new_rx_api(mvm))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
|
||||
IWL_ADD_STA_BAID_SHIFT);
|
||||
baid_data->baid = baid;
|
||||
baid_data->timeout = timeout;
|
||||
baid_data->last_rx = jiffies;
|
||||
@ -2664,7 +2807,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
|
||||
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
|
||||
} else {
|
||||
u8 baid = mvm_sta->tid_to_baid[tid];
|
||||
baid = mvm_sta->tid_to_baid[tid];
|
||||
|
||||
if (mvm->rx_ba_sessions > 0)
|
||||
/* check that restart flow didn't zero the counter */
|
||||
@ -3239,8 +3382,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
int i, size;
|
||||
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
|
||||
int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA_KEY,
|
||||
int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
|
||||
new_api ? 2 : 1);
|
||||
|
||||
if (sta_id == IWL_MVM_INVALID_STA)
|
||||
@ -3940,7 +4082,7 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
|
||||
|
||||
if (!WARN_ON(!mvmsta))
|
||||
if (mvmsta)
|
||||
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
|
||||
|
||||
rcu_read_unlock();
|
||||
@ -3999,3 +4141,21 @@ out:
|
||||
iwl_mvm_dealloc_int_sta(mvm, sta);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 mac_id)
|
||||
{
|
||||
struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
|
||||
.mac_id = cpu_to_le32(mac_id),
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
|
||||
CMD_ASYNC,
|
||||
sizeof(cancel_channel_switch_cmd),
|
||||
&cancel_channel_switch_cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
|
||||
}
|
||||
|
||||
@ -548,4 +548,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
|
||||
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
|
||||
u8 *key, u32 key_len);
|
||||
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 mac_id);
|
||||
#endif /* __sta_h__ */
|
||||
|
||||
@ -97,8 +97,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
|
||||
/* In newer version of this command an aux station is added only
|
||||
* in cases of dedicated tx queue and need to be removed in end
|
||||
* of use */
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA, 0) >= 12)
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12)
|
||||
iwl_mvm_rm_aux_sta(mvm);
|
||||
}
|
||||
|
||||
@ -658,8 +657,8 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
|
||||
0, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm,
|
||||
@ -923,8 +922,8 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
|
||||
}
|
||||
|
||||
cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
return iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
|
||||
0, sizeof(cmd), &cmd);
|
||||
}
|
||||
|
||||
@ -1162,8 +1161,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
|
||||
const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
|
||||
MAC_CONF_GROUP, 0) };
|
||||
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
|
||||
struct iwl_notification_wait wait_notif;
|
||||
struct iwl_mvm_session_prot_cmd cmd = {
|
||||
.id_and_color =
|
||||
@ -1201,8 +1199,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
|
||||
if (!wait_for_notif) {
|
||||
if (iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
|
||||
0, sizeof(cmd), &cmd)) {
|
||||
IWL_ERR(mvm,
|
||||
"Couldn't send the SESSION_PROTECTION_CMD\n");
|
||||
@ -1219,8 +1216,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
iwl_mvm_session_prot_notif, NULL);
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
|
||||
0, sizeof(cmd), &cmd)) {
|
||||
IWL_ERR(mvm,
|
||||
"Couldn't send the SESSION_PROTECTION_CMD\n");
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2019-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2019-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2016 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -160,6 +160,11 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
notif = (struct ct_kill_notif *)pkt->data;
|
||||
IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
|
||||
notif->temperature);
|
||||
if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP,
|
||||
CT_KILL_NOTIFICATION, 0) > 1)
|
||||
IWL_DEBUG_TEMP(mvm,
|
||||
"CT kill notification DTS bitmap = 0x%x, Scheme = %d\n",
|
||||
notif->dts, notif->scheme);
|
||||
|
||||
iwl_mvm_enter_ctkill(mvm);
|
||||
}
|
||||
@ -240,8 +245,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
|
||||
* a response. For older versions we send the command and wait for a
|
||||
* notification (no command TLV for previous versions).
|
||||
*/
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
|
||||
CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
if (cmd_ver == 1)
|
||||
return iwl_mvm_send_temp_cmd(mvm, true, temp);
|
||||
|
||||
@ -318,15 +318,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
||||
|
||||
/* info->control is only relevant for non HW rate control */
|
||||
if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
/* HT rate doesn't make sense for a non data frame */
|
||||
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
|
||||
!ieee80211_is_data(fc),
|
||||
"Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
|
||||
info->control.rates[0].flags,
|
||||
info->control.rates[0].idx,
|
||||
le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
|
||||
le16_to_cpu(fc),
|
||||
sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
|
||||
|
||||
rate_idx = info->control.rates[0].idx;
|
||||
}
|
||||
@ -351,7 +350,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
||||
is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
|
||||
|
||||
/* Set CCK or OFDM flag */
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) {
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
|
||||
if (!is_cck)
|
||||
rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
|
||||
else
|
||||
@ -654,7 +653,8 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
|
||||
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
|
||||
int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
|
||||
struct iwl_probe_resp_data *resp_data;
|
||||
u8 *ie, *pos;
|
||||
const u8 *ie;
|
||||
u8 *pos;
|
||||
u8 match[] = {
|
||||
(WLAN_OUI_WFA >> 16) & 0xff,
|
||||
(WLAN_OUI_WFA >> 8) & 0xff,
|
||||
@ -671,10 +671,10 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
|
||||
if (!resp_data->notif.noa_active)
|
||||
goto out;
|
||||
|
||||
ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
|
||||
mgmt->u.probe_resp.variable,
|
||||
skb->len - base_len,
|
||||
match, 4, 2);
|
||||
ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
|
||||
mgmt->u.probe_resp.variable,
|
||||
skb->len - base_len,
|
||||
match, 4, 2);
|
||||
if (!ie) {
|
||||
IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
|
||||
goto out;
|
||||
@ -1602,8 +1602,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
|
||||
|
||||
if (unlikely(!seq_ctl)) {
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
|
||||
/*
|
||||
* If it is an NDP, we can't update next_reclaim since
|
||||
* its sequence control is 0. Note that for that same
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -169,8 +169,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
|
||||
|
||||
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
|
||||
{
|
||||
if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP,
|
||||
TX_CMD, 0) > 8)
|
||||
if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
|
||||
/* In the new rate legacy rates are indexed:
|
||||
* 0 - 3 for CCK and 0 - 7 for OFDM.
|
||||
*/
|
||||
@ -241,38 +240,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
|
||||
return last_idx;
|
||||
}
|
||||
|
||||
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
int tid, int frame_limit, u16 ssn)
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.action = SCD_CFG_ENABLE_QUEUE,
|
||||
.window = frame_limit,
|
||||
.sta_id = sta_id,
|
||||
.ssn = cpu_to_le16(ssn),
|
||||
.tx_fifo = fifo,
|
||||
.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
|
||||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
|
||||
.tid = tid,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
|
||||
"Trying to reconfig unallocated queue %d\n", queue))
|
||||
return -ENXIO;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
|
||||
WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
|
||||
queue, fifo, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_mvm_send_lq_cmd() - Send link quality command
|
||||
* @mvm: Driver data.
|
||||
@ -480,8 +447,7 @@ void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
|
||||
cmd.low_latency_tx = 1;
|
||||
}
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
|
||||
0, sizeof(cmd), &cmd))
|
||||
IWL_ERR(mvm, "Failed to send low latency command\n");
|
||||
}
|
||||
|
||||
@ -495,14 +495,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
|
||||
|
||||
/* Ma devices */
|
||||
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)},
|
||||
|
||||
/* Bz devices */
|
||||
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
{0}
|
||||
@ -668,8 +670,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
|
||||
IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
|
||||
IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
|
||||
IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
|
||||
IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
|
||||
IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
|
||||
IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
|
||||
|
||||
/* SO with GF2 */
|
||||
IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
|
||||
@ -682,6 +684,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
|
||||
IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
|
||||
IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
|
||||
IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
|
||||
IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
|
||||
|
||||
/* MA with GF2 */
|
||||
IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name),
|
||||
@ -1301,7 +1305,30 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
|
||||
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
|
||||
|
||||
/* MsP */
|
||||
/* For now we use the same FW as MR, but this will change in the future. */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
|
||||
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
|
||||
iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
|
||||
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
|
||||
iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
|
||||
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
|
||||
iwl_cfg_ma_a0_ms_a0, iwl_ax204_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
|
||||
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
|
||||
iwl_cfg_snj_a0_ms_a0, iwl_ax204_name)
|
||||
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
};
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2003-2015, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2003-2015, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -103,6 +103,18 @@ struct iwl_rx_completion_desc {
|
||||
u8 reserved2[25];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_rx_completion_desc_bz - Bz completion descriptor
|
||||
* @rbid: unique tag of the received buffer
|
||||
* @flags: flags (0: fragmented, all others: reserved)
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct iwl_rx_completion_desc_bz {
|
||||
__le16 rbid;
|
||||
u8 flags;
|
||||
u8 reserved[1];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_rxq - Rx queue
|
||||
* @id: queue index
|
||||
@ -133,11 +145,7 @@ struct iwl_rxq {
|
||||
int id;
|
||||
void *bd;
|
||||
dma_addr_t bd_dma;
|
||||
union {
|
||||
void *used_bd;
|
||||
__le32 *bd_32;
|
||||
struct iwl_rx_completion_desc *cd;
|
||||
};
|
||||
void *used_bd;
|
||||
dma_addr_t used_bd_dma;
|
||||
u32 read;
|
||||
u32 write;
|
||||
@ -261,6 +269,20 @@ enum iwl_pcie_fw_reset_state {
|
||||
FW_RESET_ERROR,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum wl_pcie_imr_status - imr dma transfer state
|
||||
* @IMR_D2S_IDLE: default value of the dma transfer
|
||||
* @IMR_D2S_REQUESTED: dma transfer requested
|
||||
* @IMR_D2S_COMPLETED: dma transfer completed
|
||||
* @IMR_D2S_ERROR: dma transfer error
|
||||
*/
|
||||
enum iwl_pcie_imr_status {
|
||||
IMR_D2S_IDLE,
|
||||
IMR_D2S_REQUESTED,
|
||||
IMR_D2S_COMPLETED,
|
||||
IMR_D2S_ERROR,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans_pcie - PCIe transport specific data
|
||||
* @rxq: all the RX queue data
|
||||
@ -319,6 +341,8 @@ enum iwl_pcie_fw_reset_state {
|
||||
* @alloc_page_lock: spinlock for the page allocator
|
||||
* @alloc_page: allocated page to still use parts of
|
||||
* @alloc_page_used: how much of the allocated page was already used (bytes)
|
||||
* @imr_status: imr dma state machine
|
||||
* @wait_queue_head_t: imr wait queue for dma completion
|
||||
* @rf_name: name/version of the CRF, if any
|
||||
*/
|
||||
struct iwl_trans_pcie {
|
||||
@ -363,7 +387,7 @@ struct iwl_trans_pcie {
|
||||
|
||||
/* PCI bus related data */
|
||||
struct pci_dev *pci_dev;
|
||||
void __iomem *hw_base;
|
||||
u8 __iomem *hw_base;
|
||||
|
||||
bool ucode_write_complete;
|
||||
bool sx_complete;
|
||||
@ -414,7 +438,8 @@ struct iwl_trans_pcie {
|
||||
bool fw_reset_handshake;
|
||||
enum iwl_pcie_fw_reset_state fw_reset_state;
|
||||
wait_queue_head_t fw_reset_waitq;
|
||||
|
||||
enum iwl_pcie_imr_status imr_status;
|
||||
wait_queue_head_t imr_waitq;
|
||||
char rf_name[32];
|
||||
};
|
||||
|
||||
@ -809,4 +834,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd);
|
||||
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd);
|
||||
void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
|
||||
u32 dst_addr, u64 src_addr, u32 byte_cnt);
|
||||
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
|
||||
u32 dst_addr, u64 src_addr, u32 byte_cnt);
|
||||
|
||||
#endif /* __iwl_trans_int_pcie_h__ */
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2003-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2003-2014, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -190,11 +190,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
rxq->write_actual = round_down(rxq->write, 8);
|
||||
if (trans->trans_cfg->mq_rx_supported)
|
||||
if (!trans->trans_cfg->mq_rx_supported)
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
|
||||
else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
|
||||
iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
|
||||
HBUS_TARG_WRPTR_RX_Q(rxq->id));
|
||||
else
|
||||
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
|
||||
rxq->write_actual);
|
||||
else
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
|
||||
}
|
||||
|
||||
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
|
||||
@ -652,23 +655,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
|
||||
iwl_pcie_rx_allocator(trans_pcie->trans);
|
||||
}
|
||||
|
||||
static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
|
||||
static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_rx_transfer_desc *rx_td;
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
return sizeof(struct iwl_rx_transfer_desc);
|
||||
|
||||
if (use_rx_td)
|
||||
return sizeof(*rx_td);
|
||||
else
|
||||
return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
|
||||
sizeof(__le32);
|
||||
return trans->trans_cfg->mq_rx_supported ?
|
||||
sizeof(__le64) : sizeof(__le32);
|
||||
}
|
||||
|
||||
static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
|
||||
{
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
|
||||
return sizeof(struct iwl_rx_completion_desc_bz);
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
return sizeof(struct iwl_rx_completion_desc);
|
||||
|
||||
return sizeof(__le32);
|
||||
}
|
||||
|
||||
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq)
|
||||
{
|
||||
bool use_rx_td = (trans->trans_cfg->device_family >=
|
||||
IWL_DEVICE_FAMILY_AX210);
|
||||
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
|
||||
int free_size = iwl_pcie_free_bd_size(trans);
|
||||
|
||||
if (rxq->bd)
|
||||
dma_free_coherent(trans->dev,
|
||||
@ -682,8 +692,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
|
||||
|
||||
if (rxq->used_bd)
|
||||
dma_free_coherent(trans->dev,
|
||||
(use_rx_td ? sizeof(*rxq->cd) :
|
||||
sizeof(__le32)) * rxq->queue_size,
|
||||
iwl_pcie_used_bd_size(trans) *
|
||||
rxq->queue_size,
|
||||
rxq->used_bd, rxq->used_bd_dma);
|
||||
rxq->used_bd_dma = 0;
|
||||
rxq->used_bd = NULL;
|
||||
@ -707,7 +717,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
|
||||
else
|
||||
rxq->queue_size = RX_QUEUE_SIZE;
|
||||
|
||||
free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
|
||||
free_size = iwl_pcie_free_bd_size(trans);
|
||||
|
||||
/*
|
||||
* Allocate the circular buffer of Read Buffer Descriptors
|
||||
@ -720,14 +730,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
|
||||
|
||||
if (trans->trans_cfg->mq_rx_supported) {
|
||||
rxq->used_bd = dma_alloc_coherent(dev,
|
||||
(use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
|
||||
iwl_pcie_used_bd_size(trans) *
|
||||
rxq->queue_size,
|
||||
&rxq->used_bd_dma,
|
||||
GFP_KERNEL);
|
||||
if (!rxq->used_bd)
|
||||
goto err;
|
||||
}
|
||||
|
||||
rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
|
||||
rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
|
||||
rxq->rb_stts_dma =
|
||||
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
|
||||
|
||||
@ -1307,9 +1318,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
"Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
|
||||
rxq->id, offset,
|
||||
iwl_get_cmd_string(trans,
|
||||
iwl_cmd_id(pkt->hdr.cmd,
|
||||
pkt->hdr.group_id,
|
||||
0)),
|
||||
WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
|
||||
pkt->hdr.group_id, pkt->hdr.cmd,
|
||||
le16_to_cpu(pkt->hdr.sequence));
|
||||
|
||||
@ -1319,7 +1328,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
|
||||
|
||||
/* check that what the device tells us made sense */
|
||||
if (offset > max_len)
|
||||
if (len < sizeof(*pkt) || offset > max_len)
|
||||
break;
|
||||
|
||||
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
|
||||
@ -1419,6 +1428,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
|
||||
u16 vid;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
|
||||
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
|
||||
|
||||
if (!trans->trans_cfg->mq_rx_supported) {
|
||||
rxb = rxq->queue[i];
|
||||
@ -1426,11 +1436,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
|
||||
return rxb;
|
||||
}
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
vid = le16_to_cpu(rxq->cd[i].rbid);
|
||||
*join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
|
||||
struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
|
||||
|
||||
vid = le16_to_cpu(cd[i].rbid);
|
||||
*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
|
||||
} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
struct iwl_rx_completion_desc *cd = rxq->used_bd;
|
||||
|
||||
vid = le16_to_cpu(cd[i].rbid);
|
||||
*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
|
||||
} else {
|
||||
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
|
||||
__le32 *cd = rxq->used_bd;
|
||||
|
||||
vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
|
||||
}
|
||||
|
||||
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
|
||||
@ -1608,10 +1627,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
|
||||
if (WARN_ON(entry->entry >= trans->num_rx_queues))
|
||||
return IRQ_NONE;
|
||||
|
||||
if (WARN_ONCE(!rxq,
|
||||
"[%d] Got MSI-X interrupt before we have Rx queues",
|
||||
entry->entry))
|
||||
if (!rxq) {
|
||||
if (net_ratelimit())
|
||||
IWL_ERR(trans,
|
||||
"[%d] Got MSI-X interrupt before we have Rx queues\n",
|
||||
entry->entry);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
lock_map_acquire(&trans->sync_cmd_lockdep_map);
|
||||
IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
|
||||
@ -1954,7 +1976,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
||||
CSR_INT, CSR_INT_BIT_RX_PERIODIC);
|
||||
}
|
||||
/* Sending RX interrupt require many steps to be done in the
|
||||
* the device:
|
||||
* device:
|
||||
* 1- write interrupt to current index in ICT table.
|
||||
* 2- dma RX frame.
|
||||
* 3- update RX shared data to indicate last write index.
|
||||
@ -1998,6 +2020,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
||||
/* Wake up uCode load routine, now that load is complete */
|
||||
trans_pcie->ucode_write_complete = true;
|
||||
wake_up(&trans_pcie->ucode_write_waitq);
|
||||
/* Wake up IMR write routine, now that write to SRAM is complete */
|
||||
if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
|
||||
trans_pcie->imr_status = IMR_D2S_COMPLETED;
|
||||
wake_up(&trans_pcie->ucode_write_waitq);
|
||||
}
|
||||
}
|
||||
|
||||
if (inta & ~handled) {
|
||||
@ -2211,7 +2238,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
/* This "Tx" DMA channel is used only for loading uCode */
|
||||
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
|
||||
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
|
||||
trans_pcie->imr_status == IMR_D2S_REQUESTED) {
|
||||
IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
|
||||
isr_stats->tx++;
|
||||
|
||||
/* Wake up IMR routine once write to SRAM is complete */
|
||||
if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
|
||||
trans_pcie->imr_status = IMR_D2S_COMPLETED;
|
||||
wake_up(&trans_pcie->ucode_write_waitq);
|
||||
}
|
||||
} else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
|
||||
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
|
||||
isr_stats->tx++;
|
||||
/*
|
||||
@ -2220,6 +2257,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
*/
|
||||
trans_pcie->ucode_write_complete = true;
|
||||
wake_up(&trans_pcie->ucode_write_waitq);
|
||||
|
||||
/* Wake up IMR routine once write to SRAM is complete */
|
||||
if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
|
||||
trans_pcie->imr_status = IMR_D2S_COMPLETED;
|
||||
wake_up(&trans_pcie->ucode_write_waitq);
|
||||
}
|
||||
}
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
|
||||
@ -2234,7 +2277,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
inta_fh);
|
||||
isr_stats->sw++;
|
||||
/* during FW reset flow report errors from there */
|
||||
if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
|
||||
if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
|
||||
trans_pcie->imr_status = IMR_D2S_ERROR;
|
||||
wake_up(&trans_pcie->imr_waitq);
|
||||
} else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
|
||||
trans_pcie->fw_reset_state = FW_RESET_ERROR;
|
||||
wake_up(&trans_pcie->fw_reset_waitq);
|
||||
} else {
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2007-2015, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2007-2015, 2018-2022 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -745,7 +745,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
|
||||
iwl_set_bits_prph(trans, LMPM_CHICK,
|
||||
LMPM_CHICK_EXTENDED_ADDR_SPACE);
|
||||
|
||||
memcpy(v_addr, (u8 *)section->data + offset, copy_size);
|
||||
memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
|
||||
ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
|
||||
copy_size);
|
||||
|
||||
@ -1949,6 +1949,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
||||
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
|
||||
trans->txqs.page_offs = trans_cfg->cb_data_offs;
|
||||
trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
|
||||
trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
|
||||
|
||||
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
|
||||
trans_pcie->n_no_reclaim_cmds = 0;
|
||||
@ -2864,7 +2865,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
|
||||
{
|
||||
struct iwl_trans *trans = file->private_data;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
|
||||
u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
|
||||
struct cont_rec *data = &trans_pcie->fw_mon_data;
|
||||
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
|
||||
ssize_t size, bytes_copied = 0;
|
||||
@ -3469,7 +3470,8 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
|
||||
.d3_suspend = iwl_trans_pcie_d3_suspend, \
|
||||
.d3_resume = iwl_trans_pcie_d3_resume, \
|
||||
.interrupts = iwl_trans_pci_interrupts, \
|
||||
.sync_nmi = iwl_trans_pcie_sync_nmi \
|
||||
.sync_nmi = iwl_trans_pcie_sync_nmi, \
|
||||
.imr_dma_data = iwl_trans_pcie_copy_imr \
|
||||
|
||||
static const struct iwl_trans_ops trans_ops_pcie = {
|
||||
IWL_TRANS_COMMON_OPS,
|
||||
@ -3554,6 +3556,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
mutex_init(&trans_pcie->mutex);
|
||||
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
|
||||
init_waitqueue_head(&trans_pcie->fw_reset_waitq);
|
||||
init_waitqueue_head(&trans_pcie->imr_waitq);
|
||||
|
||||
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
|
||||
WQ_HIGHPRI | WQ_UNBOUND, 1);
|
||||
@ -3682,3 +3685,41 @@ out_free_trans:
|
||||
iwl_trans_free(trans);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
|
||||
u32 dst_addr, u64 src_addr, u32 byte_cnt)
|
||||
{
|
||||
iwl_write_prph(trans, IMR_UREG_CHICK,
|
||||
iwl_read_prph(trans, IMR_UREG_CHICK) |
|
||||
IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
|
||||
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
|
||||
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
|
||||
(u32)(src_addr & 0xFFFFFFFF));
|
||||
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
|
||||
iwl_get_dma_hi_addr(src_addr));
|
||||
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
|
||||
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
|
||||
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
|
||||
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
|
||||
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
|
||||
u32 dst_addr, u64 src_addr, u32 byte_cnt)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int ret = -1;
|
||||
|
||||
trans_pcie->imr_status = IMR_D2S_REQUESTED;
|
||||
iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
|
||||
ret = wait_event_timeout(trans_pcie->imr_waitq,
|
||||
trans_pcie->imr_status !=
|
||||
IMR_D2S_REQUESTED, 5 * HZ);
|
||||
if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
|
||||
IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
|
||||
iwl_trans_pcie_dump_regs(trans);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
trans_pcie->imr_status = IMR_D2S_IDLE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -213,7 +213,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
||||
|
||||
/* map the remaining (adjusted) nocopy/dup fragments */
|
||||
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||
const void *data = cmddata[i];
|
||||
void *data = (void *)(uintptr_t)cmddata[i];
|
||||
|
||||
if (!cmdlen[i])
|
||||
continue;
|
||||
@ -222,7 +222,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
||||
continue;
|
||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
|
||||
data = dup_buf;
|
||||
phys_addr = dma_map_single(trans->dev, (void *)data,
|
||||
phys_addr = dma_map_single(trans->dev, data,
|
||||
cmdlen[i], DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||
idx = -ENOMEM;
|
||||
|
||||
@ -154,7 +154,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
void *tfd;
|
||||
u32 num_tbs;
|
||||
|
||||
tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
|
||||
tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
|
||||
|
||||
if (reset)
|
||||
memset(tfd, 0, trans->txqs.tfd.size);
|
||||
@ -540,7 +540,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
trans->cfg->min_txq_size);
|
||||
else
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
trans->cfg->min_ba_txq_size);
|
||||
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
|
||||
ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
|
||||
cmd_queue);
|
||||
@ -594,7 +594,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
trans->cfg->min_txq_size);
|
||||
else
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
trans->cfg->min_ba_txq_size);
|
||||
ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
|
||||
cmd_queue);
|
||||
if (ret) {
|
||||
@ -877,7 +877,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
if (configure_scd) {
|
||||
iwl_scd_txq_set_inactive(trans, txq_id);
|
||||
|
||||
iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
|
||||
iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
|
||||
ARRAY_SIZE(zero_val));
|
||||
}
|
||||
|
||||
@ -1114,7 +1114,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
|
||||
/* map the remaining (adjusted) nocopy/dup fragments */
|
||||
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||
const void *data = cmddata[i];
|
||||
void *data = (void *)(uintptr_t)cmddata[i];
|
||||
|
||||
if (!cmdlen[i])
|
||||
continue;
|
||||
@ -1123,7 +1123,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
continue;
|
||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
|
||||
data = dup_buf;
|
||||
phys_addr = dma_map_single(trans->dev, (void *)data,
|
||||
phys_addr = dma_map_single(trans->dev, data,
|
||||
cmdlen[i], DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||
iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
|
||||
@ -1201,7 +1201,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||
cmd = txq->entries[cmd_index].cmd;
|
||||
meta = &txq->entries[cmd_index].meta;
|
||||
group_id = cmd->hdr.group_id;
|
||||
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
|
||||
cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
|
||||
|
||||
iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
|
||||
|
||||
|
||||
@ -1,13 +1,15 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2020-2021 Intel Corporation
|
||||
* Copyright (C) 2020-2022 Intel Corporation
|
||||
*/
|
||||
#include <net/tso.h>
|
||||
#include <linux/tcp.h>
|
||||
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-io.h"
|
||||
#include "fw/api/commands.h"
|
||||
#include "fw/api/tx.h"
|
||||
#include "fw/api/datapath.h"
|
||||
#include "queue/tx.h"
|
||||
#include "iwl-fh.h"
|
||||
#include "iwl-scd.h"
|
||||
@ -41,13 +43,13 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
|
||||
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
|
||||
struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
|
||||
|
||||
/* Starting from AX210, the HW expects bytes */
|
||||
WARN_ON(trans->txqs.bc_table_dword);
|
||||
WARN_ON(len > 0x3FFF);
|
||||
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
|
||||
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
|
||||
scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
|
||||
} else {
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
|
||||
|
||||
@ -189,7 +191,7 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
|
||||
return NULL;
|
||||
|
||||
/* set the chaining pointer to the previous page if there */
|
||||
*(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
|
||||
*(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
|
||||
*page_ptr = ret;
|
||||
|
||||
return ret;
|
||||
@ -314,7 +316,7 @@ alloc:
|
||||
return NULL;
|
||||
p->pos = page_address(p->page);
|
||||
/* set the chaining pointer to NULL */
|
||||
*(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
|
||||
*(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
|
||||
out:
|
||||
*page_ptr = p->page;
|
||||
get_page(p->page);
|
||||
@ -963,7 +965,7 @@ void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
|
||||
while (next) {
|
||||
struct page *tmp = next;
|
||||
|
||||
next = *(void **)(page_address(next) + PAGE_SIZE -
|
||||
next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
|
||||
sizeof(void *));
|
||||
__free_page(tmp);
|
||||
}
|
||||
@ -1083,9 +1085,8 @@ error:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
|
||||
struct iwl_txq **intxq, int size,
|
||||
unsigned int timeout)
|
||||
static struct iwl_txq *
|
||||
iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
|
||||
{
|
||||
size_t bc_tbl_size, bc_tbl_entries;
|
||||
struct iwl_txq *txq;
|
||||
@ -1097,18 +1098,18 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
|
||||
bc_tbl_entries = bc_tbl_size / sizeof(u16);
|
||||
|
||||
if (WARN_ON(size > bc_tbl_entries))
|
||||
return -EINVAL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
|
||||
if (!txq)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
|
||||
&txq->bc_tbl.dma);
|
||||
if (!txq->bc_tbl.addr) {
|
||||
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
|
||||
kfree(txq);
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = iwl_txq_alloc(trans, txq, size, false);
|
||||
@ -1124,12 +1125,11 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
|
||||
|
||||
txq->wd_timeout = msecs_to_jiffies(timeout);
|
||||
|
||||
*intxq = txq;
|
||||
return 0;
|
||||
return txq;
|
||||
|
||||
error:
|
||||
iwl_txq_gen2_free_memory(trans, txq);
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
@ -1186,30 +1186,57 @@ error_free_resp:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
|
||||
int cmd_id, int size, unsigned int timeout)
|
||||
int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
|
||||
u8 tid, int size, unsigned int timeout)
|
||||
{
|
||||
struct iwl_txq *txq = NULL;
|
||||
struct iwl_tx_queue_cfg_cmd cmd = {
|
||||
.flags = flags,
|
||||
.sta_id = sta_id,
|
||||
.tid = tid,
|
||||
};
|
||||
struct iwl_txq *txq;
|
||||
union {
|
||||
struct iwl_tx_queue_cfg_cmd old;
|
||||
struct iwl_scd_queue_cfg_cmd new;
|
||||
} cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = cmd_id,
|
||||
.len = { sizeof(cmd) },
|
||||
.data = { &cmd, },
|
||||
.flags = CMD_WANT_SKB,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
|
||||
if (ret)
|
||||
return ret;
|
||||
txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
|
||||
if (IS_ERR(txq))
|
||||
return PTR_ERR(txq);
|
||||
|
||||
cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
|
||||
cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
|
||||
cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
|
||||
if (trans->txqs.queue_alloc_cmd_ver == 0) {
|
||||
memset(&cmd.old, 0, sizeof(cmd.old));
|
||||
cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
|
||||
cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
|
||||
cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
|
||||
cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
|
||||
cmd.old.tid = tid;
|
||||
|
||||
if (hweight32(sta_mask) != 1) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
cmd.old.sta_id = ffs(sta_mask) - 1;
|
||||
|
||||
hcmd.id = SCD_QUEUE_CFG;
|
||||
hcmd.len[0] = sizeof(cmd.old);
|
||||
hcmd.data[0] = &cmd.old;
|
||||
} else if (trans->txqs.queue_alloc_cmd_ver == 3) {
|
||||
memset(&cmd.new, 0, sizeof(cmd.new));
|
||||
cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
|
||||
cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
|
||||
cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
|
||||
cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
|
||||
cmd.new.u.add.flags = cpu_to_le32(flags);
|
||||
cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
|
||||
cmd.new.u.add.tid = tid;
|
||||
|
||||
hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
|
||||
hcmd.len[0] = sizeof(cmd.new);
|
||||
hcmd.data[0] = &cmd.new;
|
||||
} else {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = iwl_trans_send_cmd(trans, &hcmd);
|
||||
if (ret)
|
||||
@ -1307,10 +1334,10 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
|
||||
dma_addr_t hi_len;
|
||||
|
||||
if (trans->trans_cfg->use_tfh) {
|
||||
struct iwl_tfh_tfd *tfd = _tfd;
|
||||
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
|
||||
struct iwl_tfh_tfd *tfh_tfd = _tfd;
|
||||
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
|
||||
|
||||
return (dma_addr_t)(le64_to_cpu(tb->addr));
|
||||
return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
|
||||
}
|
||||
|
||||
tfd = _tfd;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2020-2021 Intel Corporation
|
||||
* Copyright (C) 2020-2022 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_trans_queue_tx_h__
|
||||
#define __iwl_trans_queue_tx_h__
|
||||
@ -41,7 +41,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
|
||||
if (trans->trans_cfg->use_tfh)
|
||||
idx = iwl_txq_get_cmd_index(txq, idx);
|
||||
|
||||
return txq->tfds + trans->txqs.tfd.size * idx;
|
||||
return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
|
||||
}
|
||||
|
||||
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
|
||||
@ -112,10 +112,9 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
|
||||
struct iwl_cmd_meta *meta,
|
||||
struct iwl_tfh_tfd *tfd);
|
||||
|
||||
int iwl_txq_dyn_alloc(struct iwl_trans *trans,
|
||||
__le16 flags, u8 sta_id, u8 tid,
|
||||
int cmd_id, int size,
|
||||
unsigned int timeout);
|
||||
int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
|
||||
u32 sta_mask, u8 tid,
|
||||
int size, unsigned int timeout);
|
||||
|
||||
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
|
||||
@ -137,9 +136,9 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
|
||||
struct iwl_tfd *tfd;
|
||||
|
||||
if (trans->trans_cfg->use_tfh) {
|
||||
struct iwl_tfh_tfd *tfd = _tfd;
|
||||
struct iwl_tfh_tfd *tfh_tfd = _tfd;
|
||||
|
||||
return le16_to_cpu(tfd->num_tbs) & 0x1f;
|
||||
return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
|
||||
}
|
||||
|
||||
tfd = (struct iwl_tfd *)_tfd;
|
||||
@ -153,10 +152,10 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
|
||||
struct iwl_tfd_tb *tb;
|
||||
|
||||
if (trans->trans_cfg->use_tfh) {
|
||||
struct iwl_tfh_tfd *tfd = _tfd;
|
||||
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
|
||||
struct iwl_tfh_tfd *tfh_tfd = _tfd;
|
||||
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
|
||||
|
||||
return le16_to_cpu(tb->tb_len);
|
||||
return le16_to_cpu(tfh_tb->tb_len);
|
||||
}
|
||||
|
||||
tfd = (struct iwl_tfd *)_tfd;
|
||||
|
||||
@ -2421,6 +2421,7 @@ ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
|
||||
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
|
||||
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
|
||||
#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
|
||||
#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7)
|
||||
|
||||
/*
|
||||
* Calculate 802.11ax HE capabilities IE PPE field size
|
||||
|
||||
Loading…
Reference in New Issue
Block a user