Second set of iwlwifi patches for 4.20
* TKIP implementation in new devices; * Fix for the shared antenna setting in 22000 series; * Report that we set the RU offset in HE code; * Fix some register addresses in 22000 series; * Fix one FW feature TLV that had a conflict with another value; * A couple of fixes for SoftAP mode; * Work continues for new 22560 hardware; * Some fixes in the datapath; * Some debugging and other general fixes; * Some cleanups, small improvements and other general fixes; -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAlutzKgACgkQoUecoho8 xfpv7g//fhLx4xvYl2i5sVhk+FcYCEUcyYiVO4wrXdmdNafobuJRaMuxuUuagExE J+wsQmSt3LribOGQB06aa/Lf+d5FyKpD8Qcs3ZY2WX5OlLRN1EczNoiTXKfE0E1D d0a80IjD2EeqhhU9D/7DTBsN7zCpJEW5otJ4S9WY0Y/MKHSFyiDcoqnx4H1ZAv5N WH8cvTjGf4tPjkjuuEPLlVhz65hqNsM1A+VaZCU21SOlc8ihSXSAt1h8AMnWLPLz MHxzMbnjWPN8qjgKacEy7ETP14iCjTryRsXBWt48A+XZYyUQFNcnjFVME5KyOB6V YkHb8EQSkjHOWg4eutOJijNPBHLxQDFHY6LdOZ3JEmqtOKPt+A82JwXmBq6Ez4O4 DobrAEvDwnBMFhNoboNA9C0/B57j9+FPkgd0a8Y98Rr28SBYbnmK8wlZVRWU0S/9 WRg0pafzmCh1hcYedLWfGFkNio9ZZqfhLOWdVpaobJkE47gDLx3aoyXx7UK4VSfG kIigRccQsOYfOUNvQM8f/J2/uzy1TfkM7E4PaU6Q6lIhfjDuWPzGY7SpuwpSd1hf qu1EB4EwRiOdvwc/EiLJpsrMSNe9m1EWIopfas0x1dallOSC1bNIRTh2o4oN3a3V vcvtTIwST5C5IvMeZOFR2xmqArypWlEjvE2ieuq8RtMZnMaq1Rk= =5Y2V -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2018-09-28' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next Second set of iwlwifi patches for 4.20 * TKIP implementation in new devices; * Fix for the shared antenna setting in 22000 series; * Report that we set the RU offset in HE code; * Fix some register addresses in 22000 series; * Fix one FW feature TLV that had a conflict with another value; * A couple of fixes for SoftAP mode; * Work continues for new 22560 hardware; * Some fixes in the datapath; * Some debugging and other general fixes; * Some cleanups, small improvements and other general fixes;
This commit is contained in:
commit
08b0109eea
@ -143,7 +143,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
|||||||
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
|
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
|
||||||
.led_mode = IWL_LED_RF_STATE, \
|
.led_mode = IWL_LED_RF_STATE, \
|
||||||
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
|
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
|
||||||
.non_shared_ant = ANT_A, \
|
.non_shared_ant = ANT_B, \
|
||||||
.dccm_offset = IWL_22000_DCCM_OFFSET, \
|
.dccm_offset = IWL_22000_DCCM_OFFSET, \
|
||||||
.dccm_len = IWL_22000_DCCM_LEN, \
|
.dccm_len = IWL_22000_DCCM_LEN, \
|
||||||
.dccm2_offset = IWL_22000_DCCM2_OFFSET, \
|
.dccm2_offset = IWL_22000_DCCM2_OFFSET, \
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
||||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||||
|
* Copyright (C) 2018 Intel Corporation
|
||||||
*
|
*
|
||||||
* Portions of this file are derived from the ipw3945 project, as well
|
* Portions of this file are derived from the ipw3945 project, as well
|
||||||
* as portions of the ieee80211 subsystem header files.
|
* as portions of the ieee80211 subsystem header files.
|
||||||
@ -1647,7 +1648,6 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
|
|||||||
priv->status, table.valid);
|
priv->status, table.valid);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_iwlwifi_dev_ucode_error(trans->dev, &table, 0, table.brd_ver);
|
|
||||||
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
|
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
|
||||||
desc_lookup(table.error_id));
|
desc_lookup(table.error_id));
|
||||||
IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
|
IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
|
||||||
|
@ -99,6 +99,11 @@ enum iwl_data_path_subcmd_ids {
|
|||||||
*/
|
*/
|
||||||
TLC_MNG_CONFIG_CMD = 0xF,
|
TLC_MNG_CONFIG_CMD = 0xF,
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd
|
||||||
|
*/
|
||||||
|
HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
|
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
|
||||||
*/
|
*/
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||||
|
* Copyright (C) 2018 Intel Corporation
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of version 2 of the GNU General Public License as
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
@ -30,6 +31,7 @@
|
|||||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||||
|
* Copyright (C) 2018 Intel Corporation
|
||||||
* All rights reserved.
|
* All rights reserved.
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
@ -336,6 +338,9 @@ struct iwl_dbg_mem_access_rsp {
|
|||||||
#define CONT_REC_COMMAND_SIZE 80
|
#define CONT_REC_COMMAND_SIZE 80
|
||||||
#define ENABLE_CONT_RECORDING 0x15
|
#define ENABLE_CONT_RECORDING 0x15
|
||||||
#define DISABLE_CONT_RECORDING 0x16
|
#define DISABLE_CONT_RECORDING 0x16
|
||||||
|
#define BUFFER_ALLOCATION 0x27
|
||||||
|
#define START_DEBUG_RECORDING 0x29
|
||||||
|
#define STOP_DEBUG_RECORDING 0x2A
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct iwl_continuous_record_mode - recording mode
|
* struct iwl_continuous_record_mode - recording mode
|
||||||
@ -353,4 +358,31 @@ struct iwl_continuous_record_cmd {
|
|||||||
sizeof(struct iwl_continuous_record_mode)];
|
sizeof(struct iwl_continuous_record_mode)];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
/* maximum fragments to be allocated per target of allocationId */
|
||||||
|
#define IWL_BUFFER_LOCATION_MAX_FRAGS 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct iwl_fragment_data single fragment structure
|
||||||
|
* @address: 64bit start address
|
||||||
|
* @size: size in bytes
|
||||||
|
*/
|
||||||
|
struct iwl_fragment_data {
|
||||||
|
__le64 address;
|
||||||
|
__le32 size;
|
||||||
|
} __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct iwl_buffer_allocation_cmd - buffer allocation command structure
|
||||||
|
* @allocation_id: id of the allocation
|
||||||
|
* @buffer_location: location of the buffer
|
||||||
|
* @num_frags: number of fragments
|
||||||
|
* @fragments: memory fragments
|
||||||
|
*/
|
||||||
|
struct iwl_buffer_allocation_cmd {
|
||||||
|
__le32 allocation_id;
|
||||||
|
__le32 buffer_location;
|
||||||
|
__le32 num_frags;
|
||||||
|
struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS];
|
||||||
|
} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */
|
||||||
|
|
||||||
#endif /* __iwl_fw_api_debug_h__ */
|
#endif /* __iwl_fw_api_debug_h__ */
|
||||||
|
@ -578,4 +578,18 @@ struct iwl_he_sta_context_cmd {
|
|||||||
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
|
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
|
||||||
} __packed; /* STA_CONTEXT_DOT11AX_API_S */
|
} __packed; /* STA_CONTEXT_DOT11AX_API_S */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct iwl_he_monitor_cmd - configure air sniffer for HE
|
||||||
|
* @bssid: the BSSID to sniff for
|
||||||
|
* @reserved1: reserved for dword alignment
|
||||||
|
* @aid: the AID to track on for HE MU
|
||||||
|
* @reserved2: reserved for future use
|
||||||
|
*/
|
||||||
|
struct iwl_he_monitor_cmd {
|
||||||
|
u8 bssid[6];
|
||||||
|
__le16 reserved1;
|
||||||
|
__le16 aid;
|
||||||
|
u8 reserved2[6];
|
||||||
|
} __packed; /* HE_AIR_SNIFFER_CONFIG_CMD_API_S_VER_1 */
|
||||||
|
|
||||||
#endif /* __iwl_fw_api_mac_h__ */
|
#endif /* __iwl_fw_api_mac_h__ */
|
||||||
|
@ -601,23 +601,21 @@ struct iwl_rx_mpdu_desc {
|
|||||||
*/
|
*/
|
||||||
u8 mac_phy_idx;
|
u8 mac_phy_idx;
|
||||||
/* DW4 - carries csum data only when rpa_en == 1 */
|
/* DW4 - carries csum data only when rpa_en == 1 */
|
||||||
struct {
|
/**
|
||||||
|
* @raw_csum: raw checksum (alledgedly unreliable)
|
||||||
|
*/
|
||||||
|
__le16 raw_csum;
|
||||||
|
|
||||||
|
union {
|
||||||
/**
|
/**
|
||||||
* @raw_csum: raw checksum (alledgedly unreliable)
|
* @l3l4_flags: &enum iwl_rx_l3l4_flags
|
||||||
*/
|
*/
|
||||||
__le16 raw_csum;
|
__le16 l3l4_flags;
|
||||||
|
|
||||||
union {
|
/**
|
||||||
/**
|
* @sigb_common2: for HE sniffer, HE-SIG-B common part 2
|
||||||
* @l3l4_flags: &enum iwl_rx_l3l4_flags
|
*/
|
||||||
*/
|
__le16 sigb_common2;
|
||||||
__le16 l3l4_flags;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @sigb_common2: for HE sniffer, HE-SIG-B common part 2
|
|
||||||
*/
|
|
||||||
__le16 sigb_common2;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
/* DW5 */
|
/* DW5 */
|
||||||
/**
|
/**
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||||
|
* Copyright(c) 2018 Intel Corporation
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of version 2 of the GNU General Public License as
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
@ -30,6 +31,7 @@
|
|||||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||||
|
* Copyright(c) 2018 Intel Corporation
|
||||||
* All rights reserved.
|
* All rights reserved.
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
@ -391,7 +393,7 @@ enum iwl_sta_type {
|
|||||||
* @tfd_queue_msk: tfd queues used by this station.
|
* @tfd_queue_msk: tfd queues used by this station.
|
||||||
* Obselete for new TX API (9 and above).
|
* Obselete for new TX API (9 and above).
|
||||||
* @rx_ba_window: aggregation window size
|
* @rx_ba_window: aggregation window size
|
||||||
* @sp_length: the size of the SP as it appears in the WME IE
|
* @sp_length: the size of the SP in actual number of frames
|
||||||
* @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
|
* @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
|
||||||
* enabled ACs.
|
* enabled ACs.
|
||||||
*
|
*
|
||||||
|
@ -747,9 +747,9 @@ enum iwl_mvm_ba_resp_flags {
|
|||||||
* @tfd_cnt: number of TFD-Q elements
|
* @tfd_cnt: number of TFD-Q elements
|
||||||
* @ra_tid_cnt: number of RATID-Q elements
|
* @ra_tid_cnt: number of RATID-Q elements
|
||||||
* @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
|
* @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
|
||||||
* for details.
|
* for details. Length in @tfd_cnt.
|
||||||
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
|
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
|
||||||
* &iwl_mvm_compressed_ba_ratid for more details.
|
* &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
|
||||||
*/
|
*/
|
||||||
struct iwl_mvm_compressed_ba_notif {
|
struct iwl_mvm_compressed_ba_notif {
|
||||||
__le32 flags;
|
__le32 flags;
|
||||||
@ -766,7 +766,7 @@ struct iwl_mvm_compressed_ba_notif {
|
|||||||
__le32 tx_rate;
|
__le32 tx_rate;
|
||||||
__le16 tfd_cnt;
|
__le16 tfd_cnt;
|
||||||
__le16 ra_tid_cnt;
|
__le16 ra_tid_cnt;
|
||||||
struct iwl_mvm_compressed_ba_tfd tfd[1];
|
struct iwl_mvm_compressed_ba_tfd tfd[0];
|
||||||
struct iwl_mvm_compressed_ba_ratid ra_tid[0];
|
struct iwl_mvm_compressed_ba_ratid ra_tid[0];
|
||||||
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
|
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
|
||||||
|
|
||||||
|
@ -538,12 +538,108 @@ static struct scatterlist *alloc_sgtable(int size)
|
|||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
|
||||||
|
{
|
||||||
|
u32 prph_len = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
|
||||||
|
i++) {
|
||||||
|
/* The range includes both boundaries */
|
||||||
|
int num_bytes_in_chunk =
|
||||||
|
iwl_prph_dump_addr_comm[i].end -
|
||||||
|
iwl_prph_dump_addr_comm[i].start + 4;
|
||||||
|
|
||||||
|
prph_len += sizeof(struct iwl_fw_error_dump_data) +
|
||||||
|
sizeof(struct iwl_fw_error_dump_prph) +
|
||||||
|
num_bytes_in_chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fwrt->trans->cfg->mq_rx_supported) {
|
||||||
|
for (i = 0; i <
|
||||||
|
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
|
||||||
|
/* The range includes both boundaries */
|
||||||
|
int num_bytes_in_chunk =
|
||||||
|
iwl_prph_dump_addr_9000[i].end -
|
||||||
|
iwl_prph_dump_addr_9000[i].start + 4;
|
||||||
|
|
||||||
|
prph_len += sizeof(struct iwl_fw_error_dump_data) +
|
||||||
|
sizeof(struct iwl_fw_error_dump_prph) +
|
||||||
|
num_bytes_in_chunk;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return prph_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
|
||||||
|
struct iwl_fw_error_dump_data **dump_data,
|
||||||
|
u32 sram_len, u32 sram_ofs, u32 smem_len,
|
||||||
|
u32 sram2_len)
|
||||||
|
{
|
||||||
|
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
|
||||||
|
struct iwl_fw_error_dump_mem *dump_mem;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!fwrt->fw->n_dbg_mem_tlv) {
|
||||||
|
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
||||||
|
(*dump_data)->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
|
||||||
|
dump_mem = (void *)(*dump_data)->data;
|
||||||
|
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
|
||||||
|
dump_mem->offset = cpu_to_le32(sram_ofs);
|
||||||
|
iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
|
||||||
|
sram_len);
|
||||||
|
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
|
||||||
|
u32 len = le32_to_cpu(fw_dbg_mem[i].len);
|
||||||
|
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
|
||||||
|
|
||||||
|
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
||||||
|
(*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
|
||||||
|
dump_mem = (void *)(*dump_data)->data;
|
||||||
|
dump_mem->type = fw_dbg_mem[i].data_type;
|
||||||
|
dump_mem->offset = cpu_to_le32(ofs);
|
||||||
|
|
||||||
|
IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
|
||||||
|
dump_mem->type);
|
||||||
|
|
||||||
|
iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
|
||||||
|
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (smem_len) {
|
||||||
|
IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
|
||||||
|
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
||||||
|
(*dump_data)->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
|
||||||
|
dump_mem = (void *)(*dump_data)->data;
|
||||||
|
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
|
||||||
|
dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
|
||||||
|
iwl_trans_read_mem_bytes(fwrt->trans,
|
||||||
|
fwrt->trans->cfg->smem_offset,
|
||||||
|
dump_mem->data, smem_len);
|
||||||
|
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sram2_len) {
|
||||||
|
IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
|
||||||
|
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
||||||
|
(*dump_data)->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
|
||||||
|
dump_mem = (void *)(*dump_data)->data;
|
||||||
|
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
|
||||||
|
dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
|
||||||
|
iwl_trans_read_mem_bytes(fwrt->trans,
|
||||||
|
fwrt->trans->cfg->dccm2_offset,
|
||||||
|
dump_mem->data, sram2_len);
|
||||||
|
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
||||||
{
|
{
|
||||||
struct iwl_fw_error_dump_file *dump_file;
|
struct iwl_fw_error_dump_file *dump_file;
|
||||||
struct iwl_fw_error_dump_data *dump_data;
|
struct iwl_fw_error_dump_data *dump_data;
|
||||||
struct iwl_fw_error_dump_info *dump_info;
|
struct iwl_fw_error_dump_info *dump_info;
|
||||||
struct iwl_fw_error_dump_mem *dump_mem;
|
|
||||||
struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
|
struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
|
||||||
struct iwl_fw_error_dump_trigger_desc *dump_trig;
|
struct iwl_fw_error_dump_trigger_desc *dump_trig;
|
||||||
struct iwl_fw_dump_ptrs *fw_error_dump;
|
struct iwl_fw_dump_ptrs *fw_error_dump;
|
||||||
@ -655,35 +751,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||||||
|
|
||||||
/* Make room for PRPH registers */
|
/* Make room for PRPH registers */
|
||||||
if (!fwrt->trans->cfg->gen2 &&
|
if (!fwrt->trans->cfg->gen2 &&
|
||||||
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
|
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
|
||||||
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
|
prph_len += iwl_fw_get_prph_len(fwrt);
|
||||||
i++) {
|
|
||||||
/* The range includes both boundaries */
|
|
||||||
int num_bytes_in_chunk =
|
|
||||||
iwl_prph_dump_addr_comm[i].end -
|
|
||||||
iwl_prph_dump_addr_comm[i].start + 4;
|
|
||||||
|
|
||||||
prph_len += sizeof(*dump_data) +
|
|
||||||
sizeof(struct iwl_fw_error_dump_prph) +
|
|
||||||
num_bytes_in_chunk;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!fwrt->trans->cfg->gen2 &&
|
|
||||||
fwrt->trans->cfg->mq_rx_supported &&
|
|
||||||
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
|
|
||||||
for (i = 0; i <
|
|
||||||
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
|
|
||||||
/* The range includes both boundaries */
|
|
||||||
int num_bytes_in_chunk =
|
|
||||||
iwl_prph_dump_addr_9000[i].end -
|
|
||||||
iwl_prph_dump_addr_9000[i].start + 4;
|
|
||||||
|
|
||||||
prph_len += sizeof(*dump_data) +
|
|
||||||
sizeof(struct iwl_fw_error_dump_prph) +
|
|
||||||
num_bytes_in_chunk;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
|
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
|
||||||
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
|
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
|
||||||
@ -703,18 +772,19 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||||||
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
||||||
/* Make room for the SMEM, if it exists */
|
/* Make room for the SMEM, if it exists */
|
||||||
if (smem_len)
|
if (smem_len)
|
||||||
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
|
file_len += sizeof(*dump_data) + smem_len +
|
||||||
smem_len;
|
sizeof(struct iwl_fw_error_dump_mem);
|
||||||
|
|
||||||
/* Make room for the secondary SRAM, if it exists */
|
/* Make room for the secondary SRAM, if it exists */
|
||||||
if (sram2_len)
|
if (sram2_len)
|
||||||
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
|
file_len += sizeof(*dump_data) + sram2_len +
|
||||||
sram2_len;
|
sizeof(struct iwl_fw_error_dump_mem);
|
||||||
|
|
||||||
/* Make room for MEM segments */
|
/* Make room for MEM segments */
|
||||||
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
|
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
|
||||||
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
|
file_len += sizeof(*dump_data) +
|
||||||
le32_to_cpu(fw_dbg_mem[i].len);
|
le32_to_cpu(fw_dbg_mem[i].len) +
|
||||||
|
sizeof(struct iwl_fw_error_dump_mem);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -746,7 +816,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||||||
|
|
||||||
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
|
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
|
||||||
!fwrt->fw->n_dbg_mem_tlv)
|
!fwrt->fw->n_dbg_mem_tlv)
|
||||||
file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
|
file_len += sizeof(*dump_data) + sram_len +
|
||||||
|
sizeof(struct iwl_fw_error_dump_mem);
|
||||||
|
|
||||||
dump_file = vzalloc(file_len);
|
dump_file = vzalloc(file_len);
|
||||||
if (!dump_file) {
|
if (!dump_file) {
|
||||||
@ -811,7 +882,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* We only dump the FIFOs if the FW is in error state */
|
/* We only dump the FIFOs if the FW is in error state */
|
||||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
|
if (fifo_data_len) {
|
||||||
iwl_fw_dump_fifos(fwrt, &dump_data);
|
iwl_fw_dump_fifos(fwrt, &dump_data);
|
||||||
if (radio_len)
|
if (radio_len)
|
||||||
iwl_read_radio_regs(fwrt, &dump_data);
|
iwl_read_radio_regs(fwrt, &dump_data);
|
||||||
@ -833,17 +904,10 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||||||
if (monitor_dump_only)
|
if (monitor_dump_only)
|
||||||
goto dump_trans_data;
|
goto dump_trans_data;
|
||||||
|
|
||||||
if (!fwrt->fw->n_dbg_mem_tlv &&
|
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM))
|
||||||
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, smem_len,
|
||||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
sram2_len);
|
||||||
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
|
|
||||||
dump_mem = (void *)dump_data->data;
|
|
||||||
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
|
|
||||||
dump_mem->offset = cpu_to_le32(sram_ofs);
|
|
||||||
iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
|
|
||||||
sram_len);
|
|
||||||
dump_data = iwl_fw_error_next_data(dump_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
|
if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
|
||||||
u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
|
u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
|
||||||
@ -852,8 +916,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
|
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
|
||||||
dump_data->len = cpu_to_le32(data_size * 2);
|
dump_data->len = cpu_to_le32(data_size * 2);
|
||||||
|
|
||||||
memcpy(dump_data->data, fwrt->dump.d3_debug_data,
|
memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
|
||||||
data_size);
|
|
||||||
|
|
||||||
kfree(fwrt->dump.d3_debug_data);
|
kfree(fwrt->dump.d3_debug_data);
|
||||||
fwrt->dump.d3_debug_data = NULL;
|
fwrt->dump.d3_debug_data = NULL;
|
||||||
@ -865,55 +928,6 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||||||
dump_data = iwl_fw_error_next_data(dump_data);
|
dump_data = iwl_fw_error_next_data(dump_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
|
|
||||||
u32 len = le32_to_cpu(fw_dbg_mem[i].len);
|
|
||||||
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
|
|
||||||
|
|
||||||
if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
|
|
||||||
break;
|
|
||||||
|
|
||||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
|
||||||
dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
|
|
||||||
dump_mem = (void *)dump_data->data;
|
|
||||||
dump_mem->type = fw_dbg_mem[i].data_type;
|
|
||||||
dump_mem->offset = cpu_to_le32(ofs);
|
|
||||||
|
|
||||||
IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
|
|
||||||
dump_mem->type);
|
|
||||||
|
|
||||||
iwl_trans_read_mem_bytes(fwrt->trans, ofs,
|
|
||||||
dump_mem->data,
|
|
||||||
len);
|
|
||||||
|
|
||||||
dump_data = iwl_fw_error_next_data(dump_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
|
||||||
IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
|
|
||||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
|
||||||
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
|
|
||||||
dump_mem = (void *)dump_data->data;
|
|
||||||
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
|
|
||||||
dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
|
|
||||||
iwl_trans_read_mem_bytes(fwrt->trans,
|
|
||||||
fwrt->trans->cfg->smem_offset,
|
|
||||||
dump_mem->data, smem_len);
|
|
||||||
dump_data = iwl_fw_error_next_data(dump_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
|
||||||
IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
|
|
||||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
|
||||||
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
|
|
||||||
dump_mem = (void *)dump_data->data;
|
|
||||||
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
|
|
||||||
dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
|
|
||||||
iwl_trans_read_mem_bytes(fwrt->trans,
|
|
||||||
fwrt->trans->cfg->dccm2_offset,
|
|
||||||
dump_mem->data, sram2_len);
|
|
||||||
dump_data = iwl_fw_error_next_data(dump_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Dump fw's virtual image */
|
/* Dump fw's virtual image */
|
||||||
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
|
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
|
||||||
!fwrt->trans->cfg->gen2 &&
|
!fwrt->trans->cfg->gen2 &&
|
||||||
@ -1149,6 +1163,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct iwl_fw_runtime *fwrt =
|
struct iwl_fw_runtime *fwrt =
|
||||||
container_of(work, struct iwl_fw_runtime, dump.wk.work);
|
container_of(work, struct iwl_fw_runtime, dump.wk.work);
|
||||||
|
struct iwl_fw_dbg_params params = {0};
|
||||||
|
|
||||||
if (fwrt->ops && fwrt->ops->dump_start &&
|
if (fwrt->ops && fwrt->ops->dump_start &&
|
||||||
fwrt->ops->dump_start(fwrt->ops_ctx))
|
fwrt->ops->dump_start(fwrt->ops_ctx))
|
||||||
@ -1162,38 +1177,16 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
iwl_fw_dbg_stop_recording(fwrt, ¶ms);
|
||||||
/* stop recording */
|
|
||||||
iwl_fw_dbg_stop_recording(fwrt->trans);
|
|
||||||
|
|
||||||
iwl_fw_error_dump(fwrt);
|
iwl_fw_error_dump(fwrt);
|
||||||
|
|
||||||
/* start recording again if the firmware is not crashed */
|
/* start recording again if the firmware is not crashed */
|
||||||
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
|
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
|
||||||
fwrt->fw->dbg_dest_tlv) {
|
fwrt->fw->dbg_dest_tlv) {
|
||||||
iwl_clear_bits_prph(fwrt->trans,
|
|
||||||
MON_BUFF_SAMPLE_CTL, 0x100);
|
|
||||||
iwl_clear_bits_prph(fwrt->trans,
|
|
||||||
MON_BUFF_SAMPLE_CTL, 0x1);
|
|
||||||
iwl_set_bits_prph(fwrt->trans,
|
|
||||||
MON_BUFF_SAMPLE_CTL, 0x1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
|
|
||||||
u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
|
|
||||||
|
|
||||||
iwl_fw_dbg_stop_recording(fwrt->trans);
|
|
||||||
/* wait before we collect the data till the DBGC stop */
|
/* wait before we collect the data till the DBGC stop */
|
||||||
udelay(500);
|
udelay(500);
|
||||||
|
iwl_fw_dbg_restart_recording(fwrt, ¶ms);
|
||||||
iwl_fw_error_dump(fwrt);
|
|
||||||
|
|
||||||
/* start recording again if the firmware is not crashed */
|
|
||||||
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
|
|
||||||
fwrt->fw->dbg_dest_tlv) {
|
|
||||||
iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample);
|
|
||||||
iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (fwrt->ops && fwrt->ops->dump_end)
|
if (fwrt->ops && fwrt->ops->dump_end)
|
||||||
|
@ -71,6 +71,7 @@
|
|||||||
#include "iwl-io.h"
|
#include "iwl-io.h"
|
||||||
#include "file.h"
|
#include "file.h"
|
||||||
#include "error-dump.h"
|
#include "error-dump.h"
|
||||||
|
#include "api/commands.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_fw_dump_desc - describes the dump
|
* struct iwl_fw_dump_desc - describes the dump
|
||||||
@ -83,6 +84,16 @@ struct iwl_fw_dump_desc {
|
|||||||
struct iwl_fw_error_dump_trigger_desc trig_desc;
|
struct iwl_fw_error_dump_trigger_desc trig_desc;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct iwl_fw_dbg_params - register values to restore
|
||||||
|
* @in_sample: DBGC_IN_SAMPLE value
|
||||||
|
* @out_ctrl: DBGC_OUT_CTRL value
|
||||||
|
*/
|
||||||
|
struct iwl_fw_dbg_params {
|
||||||
|
u32 in_sample;
|
||||||
|
u32 out_ctrl;
|
||||||
|
};
|
||||||
|
|
||||||
extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
|
extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
|
||||||
|
|
||||||
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
|
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
|
||||||
@ -196,15 +207,78 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
|
|||||||
iwl_fw_dbg_get_trigger((fwrt)->fw,\
|
iwl_fw_dbg_get_trigger((fwrt)->fw,\
|
||||||
(trig)))
|
(trig)))
|
||||||
|
|
||||||
static inline void iwl_fw_dbg_stop_recording(struct iwl_trans *trans)
|
static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
|
||||||
|
{
|
||||||
|
struct iwl_continuous_record_cmd cont_rec = {};
|
||||||
|
struct iwl_host_cmd hcmd = {
|
||||||
|
.id = LDBG_CONFIG_CMD,
|
||||||
|
.flags = CMD_ASYNC,
|
||||||
|
.data[0] = &cont_rec,
|
||||||
|
.len[0] = sizeof(cont_rec),
|
||||||
|
};
|
||||||
|
|
||||||
|
cont_rec.record_mode.enable_recording = start ?
|
||||||
|
cpu_to_le16(START_DEBUG_RECORDING) :
|
||||||
|
cpu_to_le16(STOP_DEBUG_RECORDING);
|
||||||
|
|
||||||
|
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
_iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
|
||||||
|
struct iwl_fw_dbg_params *params)
|
||||||
{
|
{
|
||||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||||
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
|
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
|
||||||
} else {
|
return;
|
||||||
iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
|
|
||||||
udelay(100);
|
|
||||||
iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (params) {
|
||||||
|
params->in_sample = iwl_read_prph(trans, DBGC_IN_SAMPLE);
|
||||||
|
params->out_ctrl = iwl_read_prph(trans, DBGC_OUT_CTRL);
|
||||||
|
}
|
||||||
|
|
||||||
|
iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
|
||||||
|
udelay(100);
|
||||||
|
iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt,
|
||||||
|
struct iwl_fw_dbg_params *params)
|
||||||
|
{
|
||||||
|
if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
|
||||||
|
_iwl_fw_dbg_stop_recording(fwrt->trans, params);
|
||||||
|
else
|
||||||
|
iwl_fw_dbg_start_stop_hcmd(fwrt, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
_iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
|
||||||
|
struct iwl_fw_dbg_params *params)
|
||||||
|
{
|
||||||
|
if (WARN_ON(!params))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||||
|
iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
|
||||||
|
iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
|
||||||
|
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
|
||||||
|
} else {
|
||||||
|
iwl_write_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
|
||||||
|
udelay(100);
|
||||||
|
iwl_write_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||||
|
struct iwl_fw_dbg_params *params)
|
||||||
|
{
|
||||||
|
if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
|
||||||
|
_iwl_fw_dbg_restart_recording(fwrt->trans, params);
|
||||||
|
else
|
||||||
|
iwl_fw_dbg_start_stop_hcmd(fwrt, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
|
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
|
||||||
|
@ -400,8 +400,8 @@ enum iwl_ucode_tlv_capa {
|
|||||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
|
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
|
||||||
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
|
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
|
||||||
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
|
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
|
||||||
IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)86,
|
|
||||||
IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
|
IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
|
||||||
|
IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
|
||||||
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
|
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
|
||||||
|
|
||||||
NUM_IWL_UCODE_TLV_CAPA
|
NUM_IWL_UCODE_TLV_CAPA
|
||||||
|
@ -64,20 +64,41 @@
|
|||||||
* the init done for driver command that configures several system modes
|
* the init done for driver command that configures several system modes
|
||||||
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
|
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
|
||||||
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
|
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
|
||||||
* @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
|
|
||||||
* @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
|
* @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
|
||||||
* exponent, the actual size is 2**value, valid sizes are 8-2048.
|
* exponent, the actual size is 2**value, valid sizes are 8-2048.
|
||||||
* The value is four bits long. Maximum valid exponent is 12
|
* The value is four bits long. Maximum valid exponent is 12
|
||||||
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
|
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
|
||||||
* default is short format - not supported by the driver)
|
* default is short format - not supported by the driver)
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_POS: RB size position
|
||||||
|
* (values are IWL_CTXT_INFO_RB_SIZE_*K)
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size
|
||||||
|
* @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
|
||||||
*/
|
*/
|
||||||
enum iwl_context_info_flags {
|
enum iwl_context_info_flags {
|
||||||
IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
|
IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
|
||||||
IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
|
IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
|
||||||
IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
|
IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
|
||||||
IWL_CTXT_INFO_RB_SIZE_4K = BIT(3),
|
|
||||||
IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
|
IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
|
||||||
IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
|
IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_POS = 9,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_1K = 0x1,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_2K = 0x2,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_4K = 0x4,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_8K = 0x8,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_12K = 0x9,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_16K = 0xa,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_20K = 0xb,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_24K = 0xc,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_28K = 0xd,
|
||||||
|
IWL_CTXT_INFO_RB_SIZE_32K = 0xe,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -128,61 +128,6 @@ TRACE_EVENT(iwlwifi_dev_tx,
|
|||||||
__entry->framelen, __entry->skbaddr)
|
__entry->framelen, __entry->skbaddr)
|
||||||
);
|
);
|
||||||
|
|
||||||
struct iwl_error_event_table;
|
|
||||||
TRACE_EVENT(iwlwifi_dev_ucode_error,
|
|
||||||
TP_PROTO(const struct device *dev, const struct iwl_error_event_table *table,
|
|
||||||
u32 hw_ver, u32 brd_ver),
|
|
||||||
TP_ARGS(dev, table, hw_ver, brd_ver),
|
|
||||||
TP_STRUCT__entry(
|
|
||||||
DEV_ENTRY
|
|
||||||
__field(u32, desc)
|
|
||||||
__field(u32, tsf_low)
|
|
||||||
__field(u32, data1)
|
|
||||||
__field(u32, data2)
|
|
||||||
__field(u32, line)
|
|
||||||
__field(u32, blink2)
|
|
||||||
__field(u32, ilink1)
|
|
||||||
__field(u32, ilink2)
|
|
||||||
__field(u32, bcon_time)
|
|
||||||
__field(u32, gp1)
|
|
||||||
__field(u32, gp2)
|
|
||||||
__field(u32, rev_type)
|
|
||||||
__field(u32, major)
|
|
||||||
__field(u32, minor)
|
|
||||||
__field(u32, hw_ver)
|
|
||||||
__field(u32, brd_ver)
|
|
||||||
),
|
|
||||||
TP_fast_assign(
|
|
||||||
DEV_ASSIGN;
|
|
||||||
__entry->desc = table->error_id;
|
|
||||||
__entry->tsf_low = table->tsf_low;
|
|
||||||
__entry->data1 = table->data1;
|
|
||||||
__entry->data2 = table->data2;
|
|
||||||
__entry->line = table->line;
|
|
||||||
__entry->blink2 = table->blink2;
|
|
||||||
__entry->ilink1 = table->ilink1;
|
|
||||||
__entry->ilink2 = table->ilink2;
|
|
||||||
__entry->bcon_time = table->bcon_time;
|
|
||||||
__entry->gp1 = table->gp1;
|
|
||||||
__entry->gp2 = table->gp2;
|
|
||||||
__entry->rev_type = table->gp3;
|
|
||||||
__entry->major = table->ucode_ver;
|
|
||||||
__entry->minor = table->hw_ver;
|
|
||||||
__entry->hw_ver = hw_ver;
|
|
||||||
__entry->brd_ver = brd_ver;
|
|
||||||
),
|
|
||||||
TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
|
|
||||||
"blink2 0x%05X ilink 0x%05X 0x%05X "
|
|
||||||
"bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X "
|
|
||||||
"minor 0x%08X hw 0x%08X brd 0x%08X",
|
|
||||||
__get_str(dev), __entry->desc, __entry->tsf_low,
|
|
||||||
__entry->data1, __entry->data2, __entry->line,
|
|
||||||
__entry->blink2, __entry->ilink1, __entry->ilink2,
|
|
||||||
__entry->bcon_time, __entry->gp1, __entry->gp2,
|
|
||||||
__entry->rev_type, __entry->major, __entry->minor,
|
|
||||||
__entry->hw_ver, __entry->brd_ver)
|
|
||||||
);
|
|
||||||
|
|
||||||
TRACE_EVENT(iwlwifi_dev_ucode_event,
|
TRACE_EVENT(iwlwifi_dev_ucode_event,
|
||||||
TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
|
TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
|
||||||
TP_ARGS(dev, time, data, ev),
|
TP_ARGS(dev, time, data, ev),
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
*
|
*
|
||||||
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
|
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
|
||||||
|
* Copyright (C) 2018 Intel Corporation
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms of version 2 of the GNU General Public License as
|
* under the terms of version 2 of the GNU General Public License as
|
||||||
@ -26,12 +27,10 @@
|
|||||||
#ifndef __CHECKER__
|
#ifndef __CHECKER__
|
||||||
#include "iwl-trans.h"
|
#include "iwl-trans.h"
|
||||||
|
|
||||||
#include "dvm/commands.h"
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include "iwl-devtrace.h"
|
#include "iwl-devtrace.h"
|
||||||
|
|
||||||
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
|
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
|
||||||
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
|
|
||||||
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
|
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
|
||||||
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
|
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
|
||||||
#endif
|
#endif
|
||||||
|
@ -679,6 +679,19 @@ enum iwl_plat_pm_mode {
|
|||||||
* enter/exit (in msecs).
|
* enter/exit (in msecs).
|
||||||
*/
|
*/
|
||||||
#define IWL_TRANS_IDLE_TIMEOUT 2000
|
#define IWL_TRANS_IDLE_TIMEOUT 2000
|
||||||
|
#define IWL_MAX_DEBUG_ALLOCATIONS 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct iwl_dram_data
|
||||||
|
* @physical: page phy pointer
|
||||||
|
* @block: pointer to the allocated block/page
|
||||||
|
* @size: size of the block/page
|
||||||
|
*/
|
||||||
|
struct iwl_dram_data {
|
||||||
|
dma_addr_t physical;
|
||||||
|
void *block;
|
||||||
|
int size;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_trans - transport common data
|
* struct iwl_trans - transport common data
|
||||||
@ -713,6 +726,8 @@ enum iwl_plat_pm_mode {
|
|||||||
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
|
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
|
||||||
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
|
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
|
||||||
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
|
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
|
||||||
|
* @num_blocks: number of blocks in fw_mon
|
||||||
|
* @fw_mon: address of the buffers for firmware monitor
|
||||||
* @system_pm_mode: the system-wide power management mode in use.
|
* @system_pm_mode: the system-wide power management mode in use.
|
||||||
* This mode is set dynamically, depending on the WoWLAN values
|
* This mode is set dynamically, depending on the WoWLAN values
|
||||||
* configured from the userspace at runtime.
|
* configured from the userspace at runtime.
|
||||||
@ -764,6 +779,8 @@ struct iwl_trans {
|
|||||||
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
|
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
|
||||||
u32 dbg_dump_mask;
|
u32 dbg_dump_mask;
|
||||||
u8 dbg_dest_reg_num;
|
u8 dbg_dest_reg_num;
|
||||||
|
int num_blocks;
|
||||||
|
struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS];
|
||||||
|
|
||||||
enum iwl_plat_pm_mode system_pm_mode;
|
enum iwl_plat_pm_mode system_pm_mode;
|
||||||
enum iwl_plat_pm_mode runtime_pm_mode;
|
enum iwl_plat_pm_mode runtime_pm_mode;
|
||||||
|
@ -691,6 +691,15 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
|
|||||||
return bt_activity >= BT_LOW_TRAFFIC;
|
return bt_activity >= BT_LOW_TRAFFIC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants)
|
||||||
|
{
|
||||||
|
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
|
||||||
|
(mvm->cfg->non_shared_ant & enabled_ants))
|
||||||
|
return mvm->cfg->non_shared_ant;
|
||||||
|
|
||||||
|
return first_antenna(enabled_ants);
|
||||||
|
}
|
||||||
|
|
||||||
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
||||||
struct ieee80211_tx_info *info, u8 ac)
|
struct ieee80211_tx_info *info, u8 ac)
|
||||||
{
|
{
|
||||||
|
@ -434,23 +434,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||||||
u8 chains_static, chains_dynamic;
|
u8 chains_static, chains_dynamic;
|
||||||
struct cfg80211_chan_def chandef;
|
struct cfg80211_chan_def chandef;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
struct iwl_binding_cmd binding_cmd = {};
|
struct iwl_binding_cmd_v1 binding_cmd = {};
|
||||||
struct iwl_time_quota_cmd quota_cmd = {};
|
struct iwl_time_quota_cmd quota_cmd = {};
|
||||||
struct iwl_time_quota_data *quota;
|
struct iwl_time_quota_data *quota;
|
||||||
u32 status;
|
u32 status;
|
||||||
int size;
|
|
||||||
|
|
||||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
|
||||||
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
|
return -EINVAL;
|
||||||
size = sizeof(binding_cmd);
|
|
||||||
if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
|
|
||||||
!iwl_mvm_is_cdb_supported(mvm))
|
|
||||||
binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
|
|
||||||
else
|
|
||||||
binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
|
|
||||||
} else {
|
|
||||||
size = IWL_BINDING_CMD_SIZE_V1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* add back the PHY */
|
/* add back the PHY */
|
||||||
if (WARN_ON(!mvmvif->phy_ctxt))
|
if (WARN_ON(!mvmvif->phy_ctxt))
|
||||||
@ -497,7 +487,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||||||
|
|
||||||
status = 0;
|
status = 0;
|
||||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
|
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
|
||||||
size, &binding_cmd, &status);
|
IWL_BINDING_CMD_SIZE_V1, &binding_cmd,
|
||||||
|
&status);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
|
IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -1042,7 +1033,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
|
|||||||
* the recording automatically before entering D3. This can
|
* the recording automatically before entering D3. This can
|
||||||
* be removed once the FW starts doing that.
|
* be removed once the FW starts doing that.
|
||||||
*/
|
*/
|
||||||
iwl_fw_dbg_stop_recording(mvm->fwrt.trans);
|
_iwl_fw_dbg_stop_recording(mvm->fwrt.trans, NULL);
|
||||||
|
|
||||||
/* must be last -- this switches firmware state */
|
/* must be last -- this switches firmware state */
|
||||||
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
|
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
|
||||||
|
@ -1727,6 +1727,35 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
|
|||||||
return ret ?: count;
|
return ret ?: count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
|
||||||
|
size_t count, loff_t *ppos)
|
||||||
|
{
|
||||||
|
struct iwl_he_monitor_cmd he_mon_cmd = {};
|
||||||
|
u32 aid;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!iwl_mvm_firmware_running(mvm))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid,
|
||||||
|
&he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1],
|
||||||
|
&he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
|
||||||
|
&he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
|
||||||
|
if (ret != 7)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
he_mon_cmd.aid = cpu_to_le16(aid);
|
||||||
|
|
||||||
|
mutex_lock(&mvm->mutex);
|
||||||
|
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
|
||||||
|
DATA_PATH_GROUP, 0), 0,
|
||||||
|
sizeof(he_mon_cmd), &he_mon_cmd);
|
||||||
|
mutex_unlock(&mvm->mutex);
|
||||||
|
|
||||||
|
return ret ?: count;
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
|
iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
|
||||||
size_t count, loff_t *ppos)
|
size_t count, loff_t *ppos)
|
||||||
@ -1796,6 +1825,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
|
|||||||
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
|
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
MVM_DEBUGFS_WRITE_FILE_OPS(he_sniffer_params, 32);
|
||||||
|
|
||||||
static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
|
static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
|
||||||
size_t count, loff_t *ppos)
|
size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
@ -1984,6 +2015,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
|||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
|
MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
|
||||||
#endif
|
#endif
|
||||||
|
MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0200);
|
||||||
|
|
||||||
if (!debugfs_create_bool("enable_scan_iteration_notif",
|
if (!debugfs_create_bool("enable_scan_iteration_notif",
|
||||||
0600,
|
0600,
|
||||||
|
@ -82,6 +82,10 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
|
|||||||
IWL_GEN2_EDCA_TX_FIFO_VI,
|
IWL_GEN2_EDCA_TX_FIFO_VI,
|
||||||
IWL_GEN2_EDCA_TX_FIFO_BE,
|
IWL_GEN2_EDCA_TX_FIFO_BE,
|
||||||
IWL_GEN2_EDCA_TX_FIFO_BK,
|
IWL_GEN2_EDCA_TX_FIFO_BK,
|
||||||
|
IWL_GEN2_TRIG_TX_FIFO_VO,
|
||||||
|
IWL_GEN2_TRIG_TX_FIFO_VI,
|
||||||
|
IWL_GEN2_TRIG_TX_FIFO_BE,
|
||||||
|
IWL_GEN2_TRIG_TX_FIFO_BK,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct iwl_mvm_mac_iface_iterator_data {
|
struct iwl_mvm_mac_iface_iterator_data {
|
||||||
|
@ -554,8 +554,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||||||
|
|
||||||
hw->wiphy->max_remain_on_channel_duration = 10000;
|
hw->wiphy->max_remain_on_channel_duration = 10000;
|
||||||
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
|
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
|
||||||
/* we can compensate an offset of up to 3 channels = 15 MHz */
|
|
||||||
hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
|
|
||||||
|
|
||||||
/* Extract MAC address */
|
/* Extract MAC address */
|
||||||
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
|
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
|
||||||
@ -2476,6 +2474,9 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
|
|||||||
|
|
||||||
iwl_mvm_mac_ctxt_remove(mvm, vif);
|
iwl_mvm_mac_ctxt_remove(mvm, vif);
|
||||||
|
|
||||||
|
kfree(mvmvif->ap_wep_key);
|
||||||
|
mvmvif->ap_wep_key = NULL;
|
||||||
|
|
||||||
mutex_unlock(&mvm->mutex);
|
mutex_unlock(&mvm->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2968,7 +2969,13 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||||||
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
|
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
|
||||||
true);
|
true);
|
||||||
|
|
||||||
ret = 0;
|
/* if wep is used, need to set the key for the station now */
|
||||||
|
if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key)
|
||||||
|
ret = iwl_mvm_set_sta_key(mvm, vif, sta,
|
||||||
|
mvmvif->ap_wep_key,
|
||||||
|
STA_KEY_IDX_INVALID);
|
||||||
|
else
|
||||||
|
ret = 0;
|
||||||
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
|
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
|
||||||
new_state == IEEE80211_STA_ASSOC) {
|
new_state == IEEE80211_STA_ASSOC) {
|
||||||
/* disable beacon filtering */
|
/* disable beacon filtering */
|
||||||
@ -3151,8 +3158,15 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
|||||||
|
|
||||||
switch (key->cipher) {
|
switch (key->cipher) {
|
||||||
case WLAN_CIPHER_SUITE_TKIP:
|
case WLAN_CIPHER_SUITE_TKIP:
|
||||||
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
if (!mvm->trans->cfg->gen2) {
|
||||||
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
|
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
||||||
|
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
|
||||||
|
} else if (vif->type == NL80211_IFTYPE_STATION) {
|
||||||
|
key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE;
|
||||||
|
} else {
|
||||||
|
IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n");
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case WLAN_CIPHER_SUITE_CCMP:
|
case WLAN_CIPHER_SUITE_CCMP:
|
||||||
case WLAN_CIPHER_SUITE_GCMP:
|
case WLAN_CIPHER_SUITE_GCMP:
|
||||||
@ -3167,13 +3181,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
|||||||
break;
|
break;
|
||||||
case WLAN_CIPHER_SUITE_WEP40:
|
case WLAN_CIPHER_SUITE_WEP40:
|
||||||
case WLAN_CIPHER_SUITE_WEP104:
|
case WLAN_CIPHER_SUITE_WEP104:
|
||||||
/* For non-client mode, only use WEP keys for TX as we probably
|
if (vif->type == NL80211_IFTYPE_AP) {
|
||||||
* don't have a station yet anyway and would then have to keep
|
struct iwl_mvm_vif *mvmvif =
|
||||||
* track of the keys, linking them to each of the clients/peers
|
iwl_mvm_vif_from_mac80211(vif);
|
||||||
* as they appear. For now, don't do that, for performance WEP
|
|
||||||
* offload doesn't really matter much, but we need it for some
|
mvmvif->ap_wep_key = kmemdup(key,
|
||||||
* other offload features in client mode.
|
sizeof(*key) + key->keylen,
|
||||||
*/
|
GFP_KERNEL);
|
||||||
|
if (!mvmvif->ap_wep_key)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
if (vif->type != NL80211_IFTYPE_STATION)
|
if (vif->type != NL80211_IFTYPE_STATION)
|
||||||
return 0;
|
return 0;
|
||||||
break;
|
break;
|
||||||
|
@ -471,6 +471,7 @@ struct iwl_mvm_vif {
|
|||||||
netdev_features_t features;
|
netdev_features_t features;
|
||||||
|
|
||||||
struct iwl_probe_resp_data __rcu *probe_resp_data;
|
struct iwl_probe_resp_data __rcu *probe_resp_data;
|
||||||
|
struct ieee80211_key_conf *ap_wep_key;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct iwl_mvm_vif *
|
static inline struct iwl_mvm_vif *
|
||||||
@ -1818,6 +1819,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
|
|||||||
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
|
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
|
||||||
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
|
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
|
||||||
enum nl80211_band band);
|
enum nl80211_band band);
|
||||||
|
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
|
||||||
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
||||||
struct ieee80211_tx_info *info, u8 ac);
|
struct ieee80211_tx_info *info, u8 ac);
|
||||||
|
|
||||||
|
@ -584,6 +584,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||||||
};
|
};
|
||||||
int err, scan_size;
|
int err, scan_size;
|
||||||
u32 min_backoff;
|
u32 min_backoff;
|
||||||
|
enum iwl_amsdu_size rb_size_default;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use IWL_MVM_STATION_COUNT to check the validity of the station
|
* We use IWL_MVM_STATION_COUNT to check the validity of the station
|
||||||
@ -694,8 +695,16 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||||||
trans_cfg.op_mode = op_mode;
|
trans_cfg.op_mode = op_mode;
|
||||||
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
|
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
|
||||||
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
|
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
|
||||||
|
|
||||||
|
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||||
|
rb_size_default = IWL_AMSDU_2K;
|
||||||
|
else
|
||||||
|
rb_size_default = IWL_AMSDU_4K;
|
||||||
|
|
||||||
switch (iwlwifi_mod_params.amsdu_size) {
|
switch (iwlwifi_mod_params.amsdu_size) {
|
||||||
case IWL_AMSDU_DEF:
|
case IWL_AMSDU_DEF:
|
||||||
|
trans_cfg.rx_buf_size = rb_size_default;
|
||||||
|
break;
|
||||||
case IWL_AMSDU_4K:
|
case IWL_AMSDU_4K:
|
||||||
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
|
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
|
||||||
break;
|
break;
|
||||||
@ -708,16 +717,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||||||
default:
|
default:
|
||||||
pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
|
pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
|
||||||
iwlwifi_mod_params.amsdu_size);
|
iwlwifi_mod_params.amsdu_size);
|
||||||
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
|
trans_cfg.rx_buf_size = rb_size_default;
|
||||||
}
|
|
||||||
|
|
||||||
/* the hardware splits the A-MSDU */
|
|
||||||
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
|
|
||||||
trans_cfg.rx_buf_size = IWL_AMSDU_2K;
|
|
||||||
/* TODO: remove when balanced power mode is fw supported */
|
|
||||||
iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
|
|
||||||
} else if (mvm->cfg->mq_rx_supported) {
|
|
||||||
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
trans->wide_cmd_header = true;
|
trans->wide_cmd_header = true;
|
||||||
|
@ -3213,7 +3213,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||||||
|
|
||||||
/* These values will be overridden later */
|
/* These values will be overridden later */
|
||||||
lq_sta->lq.single_stream_ant_msk =
|
lq_sta->lq.single_stream_ant_msk =
|
||||||
first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
|
iwl_mvm_bt_coex_get_single_ant_msk(mvm, iwl_mvm_get_valid_tx_ant(mvm));
|
||||||
lq_sta->lq.dual_stream_ant_msk = ANT_AB;
|
lq_sta->lq.dual_stream_ant_msk = ANT_AB;
|
||||||
|
|
||||||
/* as default allow aggregation for all tids */
|
/* as default allow aggregation for all tids */
|
||||||
@ -3576,7 +3576,8 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
|
|||||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||||
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
||||||
|
|
||||||
if (num_of_ant(initial_rate->ant) == 1)
|
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
|
||||||
|
num_of_ant(initial_rate->ant) == 1)
|
||||||
lq_cmd->single_stream_ant_msk = initial_rate->ant;
|
lq_cmd->single_stream_ant_msk = initial_rate->ant;
|
||||||
|
|
||||||
lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
|
lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
|
||||||
|
@ -283,6 +283,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
|||||||
!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
|
!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (mvm->trans->cfg->gen2 &&
|
||||||
|
!(status & RX_MPDU_RES_STATUS_MIC_OK))
|
||||||
|
stats->flag |= RX_FLAG_MMIC_ERROR;
|
||||||
|
|
||||||
*crypt_len = IEEE80211_TKIP_IV_LEN;
|
*crypt_len = IEEE80211_TKIP_IV_LEN;
|
||||||
/* fall through if TTAK OK */
|
/* fall through if TTAK OK */
|
||||||
case IWL_RX_MPDU_STATUS_SEC_WEP:
|
case IWL_RX_MPDU_STATUS_SEC_WEP:
|
||||||
@ -294,8 +298,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
|||||||
IWL_RX_MPDU_STATUS_SEC_WEP)
|
IWL_RX_MPDU_STATUS_SEC_WEP)
|
||||||
*crypt_len = IEEE80211_WEP_IV_LEN;
|
*crypt_len = IEEE80211_WEP_IV_LEN;
|
||||||
|
|
||||||
if (pkt_flags & FH_RSCSR_RADA_EN)
|
if (pkt_flags & FH_RSCSR_RADA_EN) {
|
||||||
stats->flag |= RX_FLAG_ICV_STRIPPED;
|
stats->flag |= RX_FLAG_ICV_STRIPPED;
|
||||||
|
if (mvm->trans->cfg->gen2)
|
||||||
|
stats->flag |= RX_FLAG_MMIC_STRIPPED;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
|
case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
|
||||||
@ -1102,7 +1109,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||||||
le16_encode_bits(offs,
|
le16_encode_bits(offs,
|
||||||
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
|
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
|
||||||
he->data2 |=
|
he->data2 |=
|
||||||
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
|
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
|
||||||
|
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
|
||||||
if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
|
if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
|
||||||
he->data2 |=
|
he->data2 |=
|
||||||
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
|
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
|
||||||
@ -1150,7 +1158,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||||||
|
|
||||||
he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
|
he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
|
||||||
|
|
||||||
if (rate_n_flags & RATE_MCS_BF_POS)
|
if (rate_n_flags & RATE_MCS_BF_MSK)
|
||||||
he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
|
he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
|
||||||
|
|
||||||
switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
|
switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
|
||||||
|
@ -67,6 +67,14 @@
|
|||||||
#include "sta.h"
|
#include "sta.h"
|
||||||
#include "rs.h"
|
#include "rs.h"
|
||||||
|
|
||||||
|
static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
|
||||||
|
|
||||||
|
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||||
|
u32 sta_id,
|
||||||
|
struct ieee80211_key_conf *key, bool mcast,
|
||||||
|
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
|
||||||
|
u8 key_offset, bool mfp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* New version of ADD_STA_sta command added new fields at the end of the
|
* New version of ADD_STA_sta command added new fields at the end of the
|
||||||
* structure, so sending the size of the relevant API's structure is enough to
|
* structure, so sending the size of the relevant API's structure is enough to
|
||||||
@ -2096,6 +2104,19 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||||
&cfg, timeout);
|
&cfg, timeout);
|
||||||
|
|
||||||
|
if (mvmvif->ap_wep_key) {
|
||||||
|
u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
|
||||||
|
|
||||||
|
if (key_offset == STA_KEY_IDX_INVALID)
|
||||||
|
return -ENOSPC;
|
||||||
|
|
||||||
|
ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
|
||||||
|
mvmvif->ap_wep_key, 1, 0, NULL, 0,
|
||||||
|
key_offset, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3128,10 +3149,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
|||||||
|
|
||||||
switch (keyconf->cipher) {
|
switch (keyconf->cipher) {
|
||||||
case WLAN_CIPHER_SUITE_TKIP:
|
case WLAN_CIPHER_SUITE_TKIP:
|
||||||
if (vif->type == NL80211_IFTYPE_AP) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
|
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
|
||||||
/* get phase 1 key from mac80211 */
|
/* get phase 1 key from mac80211 */
|
||||||
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
|
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
|
||||||
|
@ -840,6 +840,36 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
|
||||||
|
struct ieee80211_sta *sta,
|
||||||
|
unsigned int tid)
|
||||||
|
{
|
||||||
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||||
|
enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
|
||||||
|
u8 ac = tid_to_mac80211_ac[tid];
|
||||||
|
unsigned int txf;
|
||||||
|
int lmac = IWL_LMAC_24G_INDEX;
|
||||||
|
|
||||||
|
if (iwl_mvm_is_cdb_supported(mvm) &&
|
||||||
|
band == NL80211_BAND_5GHZ)
|
||||||
|
lmac = IWL_LMAC_5G_INDEX;
|
||||||
|
|
||||||
|
/* For HE redirect to trigger based fifos */
|
||||||
|
if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
|
||||||
|
ac += 4;
|
||||||
|
|
||||||
|
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't send an AMSDU that will be longer than the TXF.
|
||||||
|
* Add a security margin of 256 for the TX command + headers.
|
||||||
|
* We also want to have the start of the next packet inside the
|
||||||
|
* fifo to be able to send bursts.
|
||||||
|
*/
|
||||||
|
return min_t(unsigned int, mvmsta->max_amsdu_len,
|
||||||
|
mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
|
||||||
|
}
|
||||||
|
|
||||||
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||||
struct ieee80211_tx_info *info,
|
struct ieee80211_tx_info *info,
|
||||||
struct ieee80211_sta *sta,
|
struct ieee80211_sta *sta,
|
||||||
@ -852,7 +882,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||||||
u16 snap_ip_tcp, pad;
|
u16 snap_ip_tcp, pad;
|
||||||
unsigned int dbg_max_amsdu_len;
|
unsigned int dbg_max_amsdu_len;
|
||||||
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
|
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
|
||||||
u8 tid, txf;
|
u8 tid;
|
||||||
|
|
||||||
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
|
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
|
||||||
tcp_hdrlen(skb);
|
tcp_hdrlen(skb);
|
||||||
@ -891,20 +921,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||||||
!(mvmsta->amsdu_enabled & BIT(tid)))
|
!(mvmsta->amsdu_enabled & BIT(tid)))
|
||||||
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
|
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
|
||||||
|
|
||||||
max_amsdu_len = mvmsta->max_amsdu_len;
|
max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
|
||||||
|
|
||||||
/* the Tx FIFO to which this A-MSDU will be routed */
|
|
||||||
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't send an AMSDU that will be longer than the TXF.
|
|
||||||
* Add a security margin of 256 for the TX command + headers.
|
|
||||||
* We also want to have the start of the next packet inside the
|
|
||||||
* fifo to be able to send bursts.
|
|
||||||
*/
|
|
||||||
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
|
||||||
mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] -
|
|
||||||
256);
|
|
||||||
|
|
||||||
if (unlikely(dbg_max_amsdu_len))
|
if (unlikely(dbg_max_amsdu_len))
|
||||||
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
||||||
|
@ -546,7 +546,6 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
|
|||||||
|
|
||||||
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
|
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
|
||||||
|
|
||||||
trace_iwlwifi_dev_ucode_error(trans->dev, &table, table.hw_ver, table.brd_ver);
|
|
||||||
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
|
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
|
||||||
desc_lookup(table.error_id));
|
desc_lookup(table.error_id));
|
||||||
IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
|
IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
|
||||||
|
@ -96,9 +96,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
|||||||
/* Configure debug, for integration */
|
/* Configure debug, for integration */
|
||||||
iwl_pcie_alloc_fw_monitor(trans, 0);
|
iwl_pcie_alloc_fw_monitor(trans, 0);
|
||||||
prph_sc_ctrl->hwm_cfg.hwm_base_addr =
|
prph_sc_ctrl->hwm_cfg.hwm_base_addr =
|
||||||
cpu_to_le64(trans_pcie->fw_mon_phys);
|
cpu_to_le64(trans->fw_mon[0].physical);
|
||||||
prph_sc_ctrl->hwm_cfg.hwm_size =
|
prph_sc_ctrl->hwm_cfg.hwm_size =
|
||||||
cpu_to_le32(trans_pcie->fw_mon_size);
|
cpu_to_le32(trans->fw_mon[0].size);
|
||||||
|
|
||||||
/* allocate ucode sections in dram and set addresses */
|
/* allocate ucode sections in dram and set addresses */
|
||||||
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
|
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
|
||||||
|
@ -162,7 +162,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
|||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_context_info *ctxt_info;
|
struct iwl_context_info *ctxt_info;
|
||||||
struct iwl_context_info_rbd_cfg *rx_cfg;
|
struct iwl_context_info_rbd_cfg *rx_cfg;
|
||||||
u32 control_flags = 0;
|
u32 control_flags = 0, rb_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
|
ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
|
||||||
@ -177,11 +177,29 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
|||||||
/* size is in DWs */
|
/* size is in DWs */
|
||||||
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
|
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
|
||||||
|
|
||||||
|
switch (trans_pcie->rx_buf_size) {
|
||||||
|
case IWL_AMSDU_2K:
|
||||||
|
rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
|
||||||
|
break;
|
||||||
|
case IWL_AMSDU_4K:
|
||||||
|
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
|
||||||
|
break;
|
||||||
|
case IWL_AMSDU_8K:
|
||||||
|
rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
|
||||||
|
break;
|
||||||
|
case IWL_AMSDU_12K:
|
||||||
|
rb_size = IWL_CTXT_INFO_RB_SIZE_12K;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
WARN_ON(1);
|
||||||
|
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
|
||||||
|
}
|
||||||
|
|
||||||
BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
|
BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
|
||||||
control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
|
control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
|
||||||
IWL_CTXT_INFO_TFD_FORMAT_LONG |
|
(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
|
||||||
RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
|
IWL_CTXT_INFO_RB_CB_SIZE_POS) |
|
||||||
IWL_CTXT_INFO_RB_CB_SIZE_POS;
|
(rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
|
||||||
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
|
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
|
||||||
|
|
||||||
/* initialize RX default queue */
|
/* initialize RX default queue */
|
||||||
|
@ -400,18 +400,6 @@ enum iwl_image_response_code {
|
|||||||
IWL_IMAGE_RESP_FAIL = 2,
|
IWL_IMAGE_RESP_FAIL = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* struct iwl_dram_data
|
|
||||||
* @physical: page phy pointer
|
|
||||||
* @block: pointer to the allocated block/page
|
|
||||||
* @size: size of the block/page
|
|
||||||
*/
|
|
||||||
struct iwl_dram_data {
|
|
||||||
dma_addr_t physical;
|
|
||||||
void *block;
|
|
||||||
int size;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_self_init_dram - dram data used by self init process
|
* struct iwl_self_init_dram - dram data used by self init process
|
||||||
* @fw: lmac and umac dram data
|
* @fw: lmac and umac dram data
|
||||||
@ -463,9 +451,6 @@ struct iwl_self_init_dram {
|
|||||||
* @reg_lock: protect hw register access
|
* @reg_lock: protect hw register access
|
||||||
* @mutex: to protect stop_device / start_fw / start_hw
|
* @mutex: to protect stop_device / start_fw / start_hw
|
||||||
* @cmd_in_flight: true when we have a host command in flight
|
* @cmd_in_flight: true when we have a host command in flight
|
||||||
* @fw_mon_phys: physical address of the buffer for the firmware monitor
|
|
||||||
* @fw_mon_cpu_addr: address of the buffer for the firmware monitor
|
|
||||||
* @fw_mon_size: size of the buffer for the firmware monitor
|
|
||||||
* @msix_entries: array of MSI-X entries
|
* @msix_entries: array of MSI-X entries
|
||||||
* @msix_enabled: true if managed to enable MSI-X
|
* @msix_enabled: true if managed to enable MSI-X
|
||||||
* @shared_vec_mask: the type of causes the shared vector handles
|
* @shared_vec_mask: the type of causes the shared vector handles
|
||||||
@ -553,10 +538,6 @@ struct iwl_trans_pcie {
|
|||||||
bool cmd_hold_nic_awake;
|
bool cmd_hold_nic_awake;
|
||||||
bool ref_cmd_in_flight;
|
bool ref_cmd_in_flight;
|
||||||
|
|
||||||
dma_addr_t fw_mon_phys;
|
|
||||||
void *fw_mon_cpu_addr;
|
|
||||||
u32 fw_mon_size;
|
|
||||||
|
|
||||||
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
|
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
|
||||||
bool msix_enabled;
|
bool msix_enabled;
|
||||||
u8 shared_vec_mask;
|
u8 shared_vec_mask;
|
||||||
|
@ -165,7 +165,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
|
|||||||
trans_pcie->is_down = true;
|
trans_pcie->is_down = true;
|
||||||
|
|
||||||
/* Stop dbgc before stopping device */
|
/* Stop dbgc before stopping device */
|
||||||
iwl_fw_dbg_stop_recording(trans);
|
_iwl_fw_dbg_stop_recording(trans, NULL);
|
||||||
|
|
||||||
/* tell the device to stop sending interrupts */
|
/* tell the device to stop sending interrupts */
|
||||||
iwl_disable_interrupts(trans);
|
iwl_disable_interrupts(trans);
|
||||||
|
@ -185,44 +185,28 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
|
|||||||
|
|
||||||
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
|
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
int i;
|
||||||
|
|
||||||
if (!trans_pcie->fw_mon_cpu_addr)
|
for (i = 0; i < trans->num_blocks; i++) {
|
||||||
return;
|
dma_free_coherent(trans->dev, trans->fw_mon[i].size,
|
||||||
|
trans->fw_mon[i].block,
|
||||||
dma_free_coherent(trans->dev, trans_pcie->fw_mon_size,
|
trans->fw_mon[i].physical);
|
||||||
trans_pcie->fw_mon_cpu_addr,
|
trans->fw_mon[i].block = NULL;
|
||||||
trans_pcie->fw_mon_phys);
|
trans->fw_mon[i].physical = 0;
|
||||||
trans_pcie->fw_mon_cpu_addr = NULL;
|
trans->fw_mon[i].size = 0;
|
||||||
trans_pcie->fw_mon_phys = 0;
|
trans->num_blocks--;
|
||||||
trans_pcie->fw_mon_size = 0;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
|
static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
|
||||||
|
u8 max_power, u8 min_power)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
void *cpu_addr = NULL;
|
void *cpu_addr = NULL;
|
||||||
dma_addr_t phys;
|
dma_addr_t phys = 0;
|
||||||
u32 size = 0;
|
u32 size = 0;
|
||||||
u8 power;
|
u8 power;
|
||||||
|
|
||||||
if (!max_power) {
|
for (power = max_power; power >= min_power; power--) {
|
||||||
/* default max_power is maximum */
|
|
||||||
max_power = 26;
|
|
||||||
} else {
|
|
||||||
max_power += 11;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (WARN(max_power > 26,
|
|
||||||
"External buffer size for monitor is too big %d, check the FW TLV\n",
|
|
||||||
max_power))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (trans_pcie->fw_mon_cpu_addr)
|
|
||||||
return;
|
|
||||||
|
|
||||||
phys = 0;
|
|
||||||
for (power = max_power; power >= 11; power--) {
|
|
||||||
size = BIT(power);
|
size = BIT(power);
|
||||||
cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
|
cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
|
||||||
GFP_KERNEL | __GFP_NOWARN |
|
GFP_KERNEL | __GFP_NOWARN |
|
||||||
@ -245,9 +229,34 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
|
|||||||
(unsigned long)BIT(power - 10),
|
(unsigned long)BIT(power - 10),
|
||||||
(unsigned long)BIT(max_power - 10));
|
(unsigned long)BIT(max_power - 10));
|
||||||
|
|
||||||
trans_pcie->fw_mon_cpu_addr = cpu_addr;
|
trans->fw_mon[trans->num_blocks].block = cpu_addr;
|
||||||
trans_pcie->fw_mon_phys = phys;
|
trans->fw_mon[trans->num_blocks].physical = phys;
|
||||||
trans_pcie->fw_mon_size = size;
|
trans->fw_mon[trans->num_blocks].size = size;
|
||||||
|
trans->num_blocks++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
|
||||||
|
{
|
||||||
|
if (!max_power) {
|
||||||
|
/* default max_power is maximum */
|
||||||
|
max_power = 26;
|
||||||
|
} else {
|
||||||
|
max_power += 11;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (WARN(max_power > 26,
|
||||||
|
"External buffer size for monitor is too big %d, check the FW TLV\n",
|
||||||
|
max_power))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function allocats the default fw monitor.
|
||||||
|
* The optional additional ones will be allocated in runtime
|
||||||
|
*/
|
||||||
|
if (trans->num_blocks)
|
||||||
|
return;
|
||||||
|
|
||||||
|
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
|
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
|
||||||
@ -911,7 +920,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
|
|||||||
|
|
||||||
void iwl_pcie_apply_destination(struct iwl_trans *trans)
|
void iwl_pcie_apply_destination(struct iwl_trans *trans)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
|
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -962,18 +970,18 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
|
|||||||
}
|
}
|
||||||
|
|
||||||
monitor:
|
monitor:
|
||||||
if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
|
if (dest->monitor_mode == EXTERNAL_MODE && trans->fw_mon[0].size) {
|
||||||
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
|
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
|
||||||
trans_pcie->fw_mon_phys >> dest->base_shift);
|
trans->fw_mon[0].physical >> dest->base_shift);
|
||||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
||||||
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
|
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
|
||||||
(trans_pcie->fw_mon_phys +
|
(trans->fw_mon[0].physical +
|
||||||
trans_pcie->fw_mon_size - 256) >>
|
trans->fw_mon[0].size - 256) >>
|
||||||
dest->end_shift);
|
dest->end_shift);
|
||||||
else
|
else
|
||||||
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
|
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
|
||||||
(trans_pcie->fw_mon_phys +
|
(trans->fw_mon[0].physical +
|
||||||
trans_pcie->fw_mon_size) >>
|
trans->fw_mon[0].size) >>
|
||||||
dest->end_shift);
|
dest->end_shift);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -981,7 +989,6 @@ monitor:
|
|||||||
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
|
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
|
||||||
const struct fw_img *image)
|
const struct fw_img *image)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int first_ucode_section;
|
int first_ucode_section;
|
||||||
|
|
||||||
@ -1011,12 +1018,12 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
|
|||||||
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||||
iwl_pcie_alloc_fw_monitor(trans, 0);
|
iwl_pcie_alloc_fw_monitor(trans, 0);
|
||||||
|
|
||||||
if (trans_pcie->fw_mon_size) {
|
if (trans->fw_mon[0].size) {
|
||||||
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
|
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
|
||||||
trans_pcie->fw_mon_phys >> 4);
|
trans->fw_mon[0].physical >> 4);
|
||||||
iwl_write_prph(trans, MON_BUFF_END_ADDR,
|
iwl_write_prph(trans, MON_BUFF_END_ADDR,
|
||||||
(trans_pcie->fw_mon_phys +
|
(trans->fw_mon[0].physical +
|
||||||
trans_pcie->fw_mon_size) >> 4);
|
trans->fw_mon[0].size) >> 4);
|
||||||
}
|
}
|
||||||
} else if (trans->dbg_dest_tlv) {
|
} else if (trans->dbg_dest_tlv) {
|
||||||
iwl_pcie_apply_destination(trans);
|
iwl_pcie_apply_destination(trans);
|
||||||
@ -1243,7 +1250,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
|
|||||||
trans_pcie->is_down = true;
|
trans_pcie->is_down = true;
|
||||||
|
|
||||||
/* Stop dbgc before stopping device */
|
/* Stop dbgc before stopping device */
|
||||||
iwl_fw_dbg_stop_recording(trans);
|
_iwl_fw_dbg_stop_recording(trans, NULL);
|
||||||
|
|
||||||
/* tell the device to stop sending interrupts */
|
/* tell the device to stop sending interrupts */
|
||||||
iwl_disable_interrupts(trans);
|
iwl_disable_interrupts(trans);
|
||||||
@ -1805,18 +1812,30 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
|
|||||||
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
|
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
|
||||||
|
{
|
||||||
|
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||||
|
return 0x00FFFFFF;
|
||||||
|
else
|
||||||
|
return 0x000FFFFF;
|
||||||
|
}
|
||||||
|
|
||||||
static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
|
static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
|
||||||
{
|
{
|
||||||
|
u32 mask = iwl_trans_pcie_prph_msk(trans);
|
||||||
|
|
||||||
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
|
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
|
||||||
((reg & 0x000FFFFF) | (3 << 24)));
|
((reg & mask) | (3 << 24)));
|
||||||
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
|
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
|
static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
|
||||||
u32 val)
|
u32 val)
|
||||||
{
|
{
|
||||||
|
u32 mask = iwl_trans_pcie_prph_msk(trans);
|
||||||
|
|
||||||
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
|
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
|
||||||
((addr & 0x000FFFFF) | (3 << 24)));
|
((addr & mask) | (3 << 24)));
|
||||||
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
|
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2840,10 +2859,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
|
|||||||
struct iwl_fw_error_dump_data **data,
|
struct iwl_fw_error_dump_data **data,
|
||||||
u32 monitor_len)
|
u32 monitor_len)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
u32 len = 0;
|
u32 len = 0;
|
||||||
|
|
||||||
if ((trans_pcie->fw_mon_cpu_addr &&
|
if ((trans->num_blocks &&
|
||||||
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
|
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
|
||||||
trans->dbg_dest_tlv) {
|
trans->dbg_dest_tlv) {
|
||||||
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
|
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
|
||||||
@ -2871,12 +2889,12 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
|
|||||||
cpu_to_le32(iwl_read_prph(trans, base));
|
cpu_to_le32(iwl_read_prph(trans, base));
|
||||||
|
|
||||||
len += sizeof(**data) + sizeof(*fw_mon_data);
|
len += sizeof(**data) + sizeof(*fw_mon_data);
|
||||||
if (trans_pcie->fw_mon_cpu_addr) {
|
if (trans->num_blocks) {
|
||||||
memcpy(fw_mon_data->data,
|
memcpy(fw_mon_data->data,
|
||||||
trans_pcie->fw_mon_cpu_addr,
|
trans->fw_mon[0].block,
|
||||||
trans_pcie->fw_mon_size);
|
trans->fw_mon[0].size);
|
||||||
|
|
||||||
monitor_len = trans_pcie->fw_mon_size;
|
monitor_len = trans->fw_mon[0].size;
|
||||||
} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
|
} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
|
||||||
/*
|
/*
|
||||||
* Update pointers to reflect actual values after
|
* Update pointers to reflect actual values after
|
||||||
@ -2912,36 +2930,15 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
|
|||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct iwl_trans_dump_data
|
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
|
||||||
*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
|
|
||||||
const struct iwl_fw_dbg_trigger_tlv *trigger)
|
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
if (trans->num_blocks) {
|
||||||
struct iwl_fw_error_dump_data *data;
|
*len += sizeof(struct iwl_fw_error_dump_data) +
|
||||||
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
|
sizeof(struct iwl_fw_error_dump_fw_mon) +
|
||||||
struct iwl_fw_error_dump_txcmd *txcmd;
|
trans->fw_mon[0].size;
|
||||||
struct iwl_trans_dump_data *dump_data;
|
return trans->fw_mon[0].size;
|
||||||
u32 len, num_rbs = 0;
|
|
||||||
u32 monitor_len;
|
|
||||||
int i, ptr;
|
|
||||||
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
|
|
||||||
!trans->cfg->mq_rx_supported &&
|
|
||||||
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
|
|
||||||
|
|
||||||
/* transport dump header */
|
|
||||||
len = sizeof(*dump_data);
|
|
||||||
|
|
||||||
/* host commands */
|
|
||||||
len += sizeof(*data) +
|
|
||||||
cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
|
|
||||||
|
|
||||||
/* FW monitor */
|
|
||||||
if (trans_pcie->fw_mon_cpu_addr) {
|
|
||||||
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
|
|
||||||
trans_pcie->fw_mon_size;
|
|
||||||
monitor_len = trans_pcie->fw_mon_size;
|
|
||||||
} else if (trans->dbg_dest_tlv) {
|
} else if (trans->dbg_dest_tlv) {
|
||||||
u32 base, end, cfg_reg;
|
u32 base, end, cfg_reg, monitor_len;
|
||||||
|
|
||||||
if (trans->dbg_dest_tlv->version == 1) {
|
if (trans->dbg_dest_tlv->version == 1) {
|
||||||
cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
|
cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
|
||||||
@ -2971,11 +2968,39 @@ static struct iwl_trans_dump_data
|
|||||||
end += (1 << trans->dbg_dest_tlv->end_shift);
|
end += (1 << trans->dbg_dest_tlv->end_shift);
|
||||||
monitor_len = end - base;
|
monitor_len = end - base;
|
||||||
}
|
}
|
||||||
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
|
*len += sizeof(struct iwl_fw_error_dump_data) +
|
||||||
monitor_len;
|
sizeof(struct iwl_fw_error_dump_fw_mon) +
|
||||||
} else {
|
monitor_len;
|
||||||
monitor_len = 0;
|
return monitor_len;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct iwl_trans_dump_data
|
||||||
|
*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
|
||||||
|
const struct iwl_fw_dbg_trigger_tlv *trigger)
|
||||||
|
{
|
||||||
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
struct iwl_fw_error_dump_data *data;
|
||||||
|
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||||
|
struct iwl_fw_error_dump_txcmd *txcmd;
|
||||||
|
struct iwl_trans_dump_data *dump_data;
|
||||||
|
u32 len, num_rbs = 0;
|
||||||
|
u32 monitor_len;
|
||||||
|
int i, ptr;
|
||||||
|
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
|
||||||
|
!trans->cfg->mq_rx_supported &&
|
||||||
|
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
|
||||||
|
|
||||||
|
/* transport dump header */
|
||||||
|
len = sizeof(*dump_data);
|
||||||
|
|
||||||
|
/* host commands */
|
||||||
|
len += sizeof(*data) +
|
||||||
|
cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
|
||||||
|
|
||||||
|
/* FW monitor */
|
||||||
|
monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
|
||||||
|
|
||||||
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
|
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
|
||||||
if (!(trans->dbg_dump_mask &
|
if (!(trans->dbg_dump_mask &
|
||||||
@ -3297,6 +3322,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||||||
iwl_disable_interrupts(trans);
|
iwl_disable_interrupts(trans);
|
||||||
|
|
||||||
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
|
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
|
||||||
|
if (trans->hw_rev == 0xffffffff) {
|
||||||
|
dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
|
||||||
|
ret = -EIO;
|
||||||
|
goto out_no_pci;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
|
* In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
|
||||||
* changed, and now the revision step also includes bit 0-1 (no more
|
* changed, and now the revision step also includes bit 0-1 (no more
|
||||||
|
@ -416,6 +416,35 @@ out_err:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
|
||||||
|
struct sk_buff *skb,
|
||||||
|
struct iwl_tfh_tfd *tfd,
|
||||||
|
struct iwl_cmd_meta *out_meta)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||||
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||||
|
dma_addr_t tb_phys;
|
||||||
|
int tb_idx;
|
||||||
|
|
||||||
|
if (!skb_frag_size(frag))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
|
||||||
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||||
|
return -ENOMEM;
|
||||||
|
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
|
||||||
|
skb_frag_size(frag));
|
||||||
|
|
||||||
|
out_meta->tbs |= BIT(tb_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct
|
static struct
|
||||||
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
||||||
struct iwl_txq *txq,
|
struct iwl_txq *txq,
|
||||||
@ -428,7 +457,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||||||
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
||||||
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
|
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
|
||||||
dma_addr_t tb_phys;
|
dma_addr_t tb_phys;
|
||||||
int i, len, tb1_len, tb2_len;
|
int len, tb1_len, tb2_len;
|
||||||
void *tb1_addr;
|
void *tb1_addr;
|
||||||
|
|
||||||
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
|
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
|
||||||
@ -467,24 +496,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
|
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set up the remaining entries to point to the data */
|
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
|
||||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
goto out_err;
|
||||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
||||||
int tb_idx;
|
|
||||||
|
|
||||||
if (!skb_frag_size(frag))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
|
|
||||||
skb_frag_size(frag), DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
|
||||||
goto out_err;
|
|
||||||
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
|
|
||||||
skb_frag_size(frag));
|
|
||||||
|
|
||||||
out_meta->tbs |= BIT(tb_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
|
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
|
||||||
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
|
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
|
||||||
@ -526,7 +539,12 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
|||||||
|
|
||||||
hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
||||||
|
|
||||||
if (amsdu)
|
/*
|
||||||
|
* Only build A-MSDUs here if doing so by GSO, otherwise it may be
|
||||||
|
* an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
|
||||||
|
* built in the higher layers already.
|
||||||
|
*/
|
||||||
|
if (amsdu && skb_shinfo(skb)->gso_size)
|
||||||
return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
|
return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
|
||||||
out_meta, hdr_len, len);
|
out_meta, hdr_len, len);
|
||||||
|
|
||||||
|
@ -1097,7 +1097,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|||||||
|
|
||||||
if (!iwl_queue_used(txq, last_to_free)) {
|
if (!iwl_queue_used(txq, last_to_free)) {
|
||||||
IWL_ERR(trans,
|
IWL_ERR(trans,
|
||||||
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
|
"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
|
||||||
__func__, txq_id, last_to_free,
|
__func__, txq_id, last_to_free,
|
||||||
trans->cfg->base_params->max_tfd_queue_size,
|
trans->cfg->base_params->max_tfd_queue_size,
|
||||||
txq->write_ptr, txq->read_ptr);
|
txq->write_ptr, txq->read_ptr);
|
||||||
@ -1977,29 +1977,24 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|||||||
|
|
||||||
static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
struct iwl_txq *txq, u8 hdr_len,
|
struct iwl_txq *txq, u8 hdr_len,
|
||||||
struct iwl_cmd_meta *out_meta,
|
struct iwl_cmd_meta *out_meta)
|
||||||
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
|
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
u16 head_tb_len;
|
||||||
u16 tb2_len;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up TFD's third entry to point directly to remainder
|
* Set up TFD's third entry to point directly to remainder
|
||||||
* of skb's head, if any
|
* of skb's head, if any
|
||||||
*/
|
*/
|
||||||
tb2_len = skb_headlen(skb) - hdr_len;
|
head_tb_len = skb_headlen(skb) - hdr_len;
|
||||||
|
|
||||||
if (tb2_len > 0) {
|
if (head_tb_len > 0) {
|
||||||
dma_addr_t tb2_phys = dma_map_single(trans->dev,
|
dma_addr_t tb_phys = dma_map_single(trans->dev,
|
||||||
skb->data + hdr_len,
|
skb->data + hdr_len,
|
||||||
tb2_len, DMA_TO_DEVICE);
|
head_tb_len, DMA_TO_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||||
iwl_pcie_tfd_unmap(trans, out_meta, txq,
|
|
||||||
txq->write_ptr);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set up the remaining entries to point to the data */
|
/* set up the remaining entries to point to the data */
|
||||||
@ -2014,23 +2009,14 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
|
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
|
||||||
skb_frag_size(frag), DMA_TO_DEVICE);
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||||
iwl_pcie_tfd_unmap(trans, out_meta, txq,
|
|
||||||
txq->write_ptr);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
||||||
skb_frag_size(frag), false);
|
skb_frag_size(frag), false);
|
||||||
|
|
||||||
out_meta->tbs |= BIT(tb_idx);
|
out_meta->tbs |= BIT(tb_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
|
||||||
iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
|
|
||||||
trans_pcie->tfd_size,
|
|
||||||
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
|
|
||||||
hdr_len);
|
|
||||||
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2091,7 +2077,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
u8 *start_hdr;
|
u8 *start_hdr;
|
||||||
struct iwl_tso_hdr_page *hdr_page;
|
struct iwl_tso_hdr_page *hdr_page;
|
||||||
struct page **page_ptr;
|
struct page **page_ptr;
|
||||||
int ret;
|
|
||||||
struct tso_t tso;
|
struct tso_t tso;
|
||||||
|
|
||||||
/* if the packet is protected, then it must be CCMP or GCMP */
|
/* if the packet is protected, then it must be CCMP or GCMP */
|
||||||
@ -2177,10 +2162,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
if (trans_pcie->sw_csum_tx) {
|
if (trans_pcie->sw_csum_tx) {
|
||||||
csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
|
csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!csum_skb) {
|
if (!csum_skb)
|
||||||
ret = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out_unmap;
|
|
||||||
}
|
|
||||||
|
|
||||||
iwl_compute_pseudo_hdr_csum(iph, tcph,
|
iwl_compute_pseudo_hdr_csum(iph, tcph,
|
||||||
skb->protocol ==
|
skb->protocol ==
|
||||||
@ -2201,8 +2184,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
hdr_tb_len, DMA_TO_DEVICE);
|
hdr_tb_len, DMA_TO_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
|
if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
|
||||||
dev_kfree_skb(csum_skb);
|
dev_kfree_skb(csum_skb);
|
||||||
ret = -EINVAL;
|
return -EINVAL;
|
||||||
goto out_unmap;
|
|
||||||
}
|
}
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
|
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
|
||||||
hdr_tb_len, false);
|
hdr_tb_len, false);
|
||||||
@ -2227,8 +2209,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
size, DMA_TO_DEVICE);
|
size, DMA_TO_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
|
||||||
dev_kfree_skb(csum_skb);
|
dev_kfree_skb(csum_skb);
|
||||||
ret = -EINVAL;
|
return -EINVAL;
|
||||||
goto out_unmap;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
||||||
@ -2262,10 +2243,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
skb_push(skb, hdr_len + iv_len);
|
skb_push(skb, hdr_len + iv_len);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_unmap:
|
|
||||||
iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
#else /* CONFIG_INET */
|
#else /* CONFIG_INET */
|
||||||
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
@ -2430,9 +2407,26 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
out_meta, dev_cmd,
|
out_meta, dev_cmd,
|
||||||
tb1_len)))
|
tb1_len)))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
} else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
|
} else {
|
||||||
out_meta, dev_cmd, tb1_len))) {
|
struct sk_buff *frag;
|
||||||
goto out_err;
|
|
||||||
|
if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
|
||||||
|
out_meta)))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
skb_walk_frags(skb, frag) {
|
||||||
|
if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
|
||||||
|
out_meta)))
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||||
|
iwl_pcie_get_tfd(trans, txq,
|
||||||
|
txq->write_ptr),
|
||||||
|
trans_pcie->tfd_size,
|
||||||
|
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
|
||||||
|
hdr_len);
|
||||||
|
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* building the A-MSDU might have changed this data, so memcpy it now */
|
/* building the A-MSDU might have changed this data, so memcpy it now */
|
||||||
@ -2477,6 +2471,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
spin_unlock(&txq->lock);
|
spin_unlock(&txq->lock);
|
||||||
return 0;
|
return 0;
|
||||||
out_err:
|
out_err:
|
||||||
|
iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
|
||||||
spin_unlock(&txq->lock);
|
spin_unlock(&txq->lock);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user