Third set of iwlwifi patches intended for v5.8
* Update range request API; * Add ACPI DSM support; * Support enabling 5.2GHz bands in Indonesia via ACPI; * Bump FW API version to 56; * TX queues refactoring started; * Fix one memory leak; * Some other small fixes and clean-ups; -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAl7Qu/QACgkQoUecoho8 xfopKw/+LLgx8++Auqf6YnsUJaLN5KhSvkYIXTj4SmqWrnN6wAJLg40QbMgy6N9y QLPNcidEpHS6gfcRiTdQklYArrX4eqNq6tGY1YEtEMFkrlGcM+r+0OZU/lpYoUWK Oiy0loqute7zbp/SzGLFEiKe6IYbzNJebJI8TdjR8Yg50+DFmva/cUdwZhm61zww 0zsBkkHWE9qtdokelwFersaI/Yfm5gC1YIxyP9gx60aK6DoS+MXuml6G9t+WTqOf GDQ61MV7YjnDihfkouT+aMfAq13sHqtpCBrJQlftEEt8HrczmwMgXpYngp0nOIRb /wXvY/QicOOWLJPnh0kOr/OUnd7iXRNiFcD2PcGpNcsNsCLClhkeOMX3/SyVYK4s A3ORNUqtt8ccDJY/sFetZifl7gKxTZVCfiumC6uF5Q1CJdvaGE4435wHbEOe/Dvj 8Re6Nd8cwpopVPNDhbouCsH1qSLe+u1tFaaUgGQSthFovV5thsdMXT40iKP98Lhl XvyQJVMYGe9o1B+kI5ln+Ju6YuM9OF8MD77RB4cQphA9iwrBCPQj6/tQ/Mu4aPJ3 nRHPIAFj7PY8IBNMfW+RCbIjjsuyqWLWljLdLQ7z4b10VlY50C23wCJHSARg/cV7 qHy0ObvoFPk49tQT81bmublUPkcIftO6VRhiEMWVoTKdS2Lpkik= =nlI2 -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2020-05-29' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next Third set of iwlwifi patches intended for v5.8 * Update range request API; * Add ACPI DSM support; * Support enabling 5.2GHz bands in Indonesia via ACPI; * Bump FW API version to 56; * TX queues refactoring started; * Fix one memory leak; * Some other small fixes and clean-ups; # gpg: Signature made Fri 29 May 2020 10:38:28 AM EEST using RSA key ID 1A3CC5FA # gpg: Good signature from "Luciano Roth Coelho (Luca) <luca@coelho.fi>" # gpg: aka "Luciano Roth Coelho (Intel) <luciano.coelho@intel.com>"
This commit is contained in:
commit
6bb986e940
@ -57,7 +57,7 @@
|
||||
#include "iwl-prph.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MAX 55
|
||||
#define IWL_22000_UCODE_API_MAX 56
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MIN 39
|
||||
|
@ -58,44 +58,121 @@
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/uuid.h>
|
||||
#include "iwl-drv.h"
|
||||
#include "iwl-debug.h"
|
||||
#include "acpi.h"
|
||||
#include "fw/runtime.h"
|
||||
|
||||
void *iwl_acpi_get_object(struct device *dev, acpi_string method)
|
||||
static const guid_t intel_wifi_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
|
||||
0xA5, 0xB3, 0x1F, 0x73,
|
||||
0x8E, 0x28, 0x5A, 0xDE);
|
||||
|
||||
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
|
||||
acpi_handle *ret_handle)
|
||||
{
|
||||
acpi_handle root_handle;
|
||||
acpi_handle handle;
|
||||
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
acpi_status status;
|
||||
|
||||
root_handle = ACPI_HANDLE(dev);
|
||||
if (!root_handle) {
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"Could not retrieve root port ACPI handle\n");
|
||||
return ERR_PTR(-ENOENT);
|
||||
"ACPI: Could not retrieve root port handle\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Get the method's handle */
|
||||
status = acpi_get_handle(root_handle, method, &handle);
|
||||
status = acpi_get_handle(root_handle, method, ret_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method);
|
||||
return ERR_PTR(-ENOENT);
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: %s method not found\n", method);
|
||||
return -ENOENT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *iwl_acpi_get_object(struct device *dev, acpi_string method)
|
||||
{
|
||||
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
acpi_handle handle;
|
||||
acpi_status status;
|
||||
int ret;
|
||||
|
||||
ret = iwl_acpi_get_handle(dev, method, &handle);
|
||||
if (ret)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
/* Call the method with no arguments */
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buf);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n",
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: %s method invocation failed (status: 0x%x)\n",
|
||||
method, status);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
return buf.pointer;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
|
||||
|
||||
/**
|
||||
* Generic function for evaluating a method defined in the device specific
|
||||
* method (DSM) interface. The returned acpi object must be freed by calling
|
||||
* function.
|
||||
*/
|
||||
void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
|
||||
union acpi_object *args)
|
||||
{
|
||||
union acpi_object *obj;
|
||||
|
||||
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_wifi_guid, rev, func,
|
||||
args);
|
||||
if (!obj) {
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: DSM method invocation failed (rev: %d, func:%d)\n",
|
||||
rev, func);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate a DSM with no arguments and a single u8 return value (inside a
|
||||
* buffer object), verify and return that value.
|
||||
*/
|
||||
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
|
||||
{
|
||||
union acpi_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
|
||||
if (IS_ERR(obj))
|
||||
return -ENOENT;
|
||||
|
||||
if (obj->type != ACPI_TYPE_BUFFER) {
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: DSM method did not return a valid object, type=%d\n",
|
||||
obj->type);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (obj->buffer.length != sizeof(u8)) {
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: DSM method returned invalid buffer, length=%d\n",
|
||||
obj->buffer.length);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = obj->buffer.pointer[0];
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
|
||||
func, ret);
|
||||
out:
|
||||
ACPI_FREE(obj);
|
||||
return ret;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
|
||||
|
||||
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
|
||||
union acpi_object *data,
|
||||
int data_size, int *tbl_rev)
|
||||
|
@ -127,12 +127,23 @@ struct iwl_geo_profile {
|
||||
u8 values[ACPI_GEO_TABLE_SIZE];
|
||||
};
|
||||
|
||||
enum iwl_dsm_funcs_rev_0 {
|
||||
DSM_FUNC_QUERY = 0,
|
||||
DSM_FUNC_DISABLE_SRD = 1,
|
||||
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
struct iwl_fw_runtime;
|
||||
|
||||
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
|
||||
|
||||
void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
|
||||
union acpi_object *args);
|
||||
|
||||
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
|
||||
|
||||
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
|
||||
union acpi_object *data,
|
||||
int data_size, int *tbl_rev);
|
||||
@ -192,6 +203,17 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
|
||||
int func, union acpi_object *args)
|
||||
{
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
|
||||
union acpi_object *data,
|
||||
int data_size,
|
||||
|
@ -550,13 +550,11 @@ struct iwl_tof_range_req_ap_entry_v4 {
|
||||
/**
|
||||
* enum iwl_location_cipher - location cipher selection
|
||||
* @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128
|
||||
* @IWL_LOCATION_CIPHER_CCMP_256: CCMP 256
|
||||
* @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128
|
||||
* @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256
|
||||
*/
|
||||
enum iwl_location_cipher {
|
||||
IWL_LOCATION_CIPHER_CCMP_128,
|
||||
IWL_LOCATION_CIPHER_CCMP_256,
|
||||
IWL_LOCATION_CIPHER_GCMP_128,
|
||||
IWL_LOCATION_CIPHER_GCMP_256,
|
||||
};
|
||||
@ -577,7 +575,8 @@ enum iwl_location_cipher {
|
||||
* @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
|
||||
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
|
||||
* the number of measurement iterations (min 2^0 = 1, max 2^14)
|
||||
* @reserved: For alignment and future use
|
||||
* @sta_id: the station id of the AP. Only relevant when associated to the AP,
|
||||
* otherwise should be set to &IWL_MVM_INVALID_STA.
|
||||
* @cipher: pairwise cipher suite for secured measurement.
|
||||
* &enum iwl_location_cipher.
|
||||
* @hltk: HLTK to be used for secured 11az measurement
|
||||
@ -586,7 +585,8 @@ enum iwl_location_cipher {
|
||||
* If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
|
||||
* calibration value that corresponds to the rx bandwidth of the FTM
|
||||
* frame.
|
||||
* @reserved2: For alignment and future use.
|
||||
* @beacon_interval: beacon interval of the AP in TUs. Only required if
|
||||
* &IWL_INITIATOR_AP_FLAGS_TB is set.
|
||||
*/
|
||||
struct iwl_tof_range_req_ap_entry {
|
||||
__le32 initiator_ap_flags;
|
||||
@ -598,13 +598,13 @@ struct iwl_tof_range_req_ap_entry {
|
||||
__le16 burst_period;
|
||||
u8 samples_per_burst;
|
||||
u8 num_of_bursts;
|
||||
u8 reserved;
|
||||
u8 sta_id;
|
||||
u8 cipher;
|
||||
u8 hltk[HLTK_11AZ_LEN];
|
||||
u8 tk[TK_11AZ_LEN];
|
||||
__le16 calib[IWL_TOF_BW_NUM];
|
||||
__le16 reserved2;
|
||||
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_5 */
|
||||
__le16 beacon_interval;
|
||||
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */
|
||||
|
||||
/**
|
||||
* enum iwl_tof_response_mode
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(C) 2018 - 2019 Intel Corporation
|
||||
* Copyright(C) 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(C) 2018 - 2019 Intel Corporation
|
||||
* Copyright(C) 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -74,6 +74,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
|
||||
*/
|
||||
NVM_ACCESS_COMPLETE = 0x0,
|
||||
|
||||
/**
|
||||
* @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd
|
||||
*/
|
||||
LARI_CONFIG_CHANGE = 0x1,
|
||||
|
||||
/**
|
||||
* @NVM_GET_INFO:
|
||||
* Command is &struct iwl_nvm_get_info,
|
||||
@ -446,4 +451,29 @@ struct iwl_tas_config_cmd {
|
||||
__le32 black_list_size;
|
||||
__le32 black_list_array[IWL_TAS_BLACK_LIST_MAX];
|
||||
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* enum iwl_lari_configs - bit masks for the various LARI config operations
|
||||
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
|
||||
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
|
||||
* @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled
|
||||
* @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in
|
||||
* Indonesia
|
||||
*/
|
||||
enum iwl_lari_config_masks {
|
||||
LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0),
|
||||
LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1),
|
||||
LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2),
|
||||
LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_lari_config_change_cmd - change LARI configuration
|
||||
* @config_bitmap: bit map of the config commands. each bit will trigger a
|
||||
* different predefined FW config operation
|
||||
*/
|
||||
struct iwl_lari_config_change_cmd {
|
||||
__le32 config_bitmap;
|
||||
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */
|
||||
|
||||
#endif /* __iwl_fw_api_nvm_reg_h__ */
|
||||
|
@ -5,7 +5,7 @@
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018, 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -18,7 +18,7 @@
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018, 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -93,6 +93,11 @@ enum iwl_prph_scratch_mtr_format {
|
||||
* @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
|
||||
* There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
|
||||
* 3: 256 bit.
|
||||
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored
|
||||
* by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K
|
||||
* appropriately; use the below values for this.
|
||||
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
|
||||
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
|
||||
*/
|
||||
enum iwl_prph_scratch_flags {
|
||||
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
|
||||
@ -103,6 +108,9 @@ enum iwl_prph_scratch_flags {
|
||||
IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
|
||||
IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
|
||||
IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
|
||||
IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20,
|
||||
IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20,
|
||||
IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -240,6 +240,7 @@ enum iwl_nvm_channel_flags {
|
||||
* @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
|
||||
* for this regulatory domain (valid only in 5Ghz).
|
||||
* @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
|
||||
* @REG_CAPA_11AX_DISABLED: 11ax is forbidden for this regulatory domain.
|
||||
*/
|
||||
enum iwl_reg_capa_flags {
|
||||
REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
|
||||
@ -250,6 +251,7 @@ enum iwl_reg_capa_flags {
|
||||
REG_CAPA_MCS_9_ALLOWED = BIT(5),
|
||||
REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
|
||||
REG_CAPA_DC_HIGH_ENABLED = BIT(9),
|
||||
REG_CAPA_11AX_DISABLED = BIT(10),
|
||||
};
|
||||
|
||||
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
|
||||
@ -1115,6 +1117,9 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
|
||||
flags |= NL80211_RRF_NO_160MHZ;
|
||||
}
|
||||
|
||||
if (cap_flags & REG_CAPA_11AX_DISABLED)
|
||||
flags |= NL80211_RRF_NO_HE;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
@ -795,6 +795,132 @@ struct iwl_trans_debug {
|
||||
u32 domains_bitmap;
|
||||
};
|
||||
|
||||
struct iwl_dma_ptr {
|
||||
dma_addr_t dma;
|
||||
void *addr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct iwl_cmd_meta {
|
||||
/* only for SYNC commands, iff the reply skb is wanted */
|
||||
struct iwl_host_cmd *source;
|
||||
u32 flags;
|
||||
u32 tbs;
|
||||
};
|
||||
|
||||
/*
|
||||
* The FH will write back to the first TB only, so we need to copy some data
|
||||
* into the buffer regardless of whether it should be mapped or not.
|
||||
* This indicates how big the first TB must be to include the scratch buffer
|
||||
* and the assigned PN.
|
||||
* Since PN location is 8 bytes at offset 12, it's 20 now.
|
||||
* If we make it bigger then allocations will be bigger and copy slower, so
|
||||
* that's probably not useful.
|
||||
*/
|
||||
#define IWL_FIRST_TB_SIZE 20
|
||||
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
|
||||
|
||||
struct iwl_pcie_txq_entry {
|
||||
void *cmd;
|
||||
struct sk_buff *skb;
|
||||
/* buffer to free after command completes */
|
||||
const void *free_buf;
|
||||
struct iwl_cmd_meta meta;
|
||||
};
|
||||
|
||||
struct iwl_pcie_first_tb_buf {
|
||||
u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_txq - Tx Queue for DMA
|
||||
* @q: generic Rx/Tx queue descriptor
|
||||
* @tfds: transmit frame descriptors (DMA memory)
|
||||
* @first_tb_bufs: start of command headers, including scratch buffers, for
|
||||
* the writeback -- this is DMA memory and an array holding one buffer
|
||||
* for each command on the queue
|
||||
* @first_tb_dma: DMA address for the first_tb_bufs start
|
||||
* @entries: transmit entries (driver state)
|
||||
* @lock: queue lock
|
||||
* @stuck_timer: timer that fires if queue gets stuck
|
||||
* @trans: pointer back to transport (for timer)
|
||||
* @need_update: indicates need to update read/write index
|
||||
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
|
||||
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
|
||||
* @frozen: tx stuck queue timer is frozen
|
||||
* @frozen_expiry_remainder: remember how long until the timer fires
|
||||
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
|
||||
* @write_ptr: 1-st empty entry (index) host_w
|
||||
* @read_ptr: last used entry (index) host_r
|
||||
* @dma_addr: physical addr for BD's
|
||||
* @n_window: safe queue window
|
||||
* @id: queue id
|
||||
* @low_mark: low watermark, resume queue if free space more than this
|
||||
* @high_mark: high watermark, stop queue if free space less than this
|
||||
*
|
||||
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
|
||||
* descriptors) and required locking structures.
|
||||
*
|
||||
* Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
|
||||
* always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
|
||||
* there might be HW changes in the future). For the normal TX
|
||||
* queues, n_window, which is the size of the software queue data
|
||||
* is also 256; however, for the command queue, n_window is only
|
||||
* 32 since we don't need so many commands pending. Since the HW
|
||||
* still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
|
||||
* This means that we end up with the following:
|
||||
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
|
||||
* SW entries: | 0 | ... | 31 |
|
||||
* where N is a number between 0 and 7. This means that the SW
|
||||
* data is a window overlayed over the HW queue.
|
||||
*/
|
||||
struct iwl_txq {
|
||||
void *tfds;
|
||||
struct iwl_pcie_first_tb_buf *first_tb_bufs;
|
||||
dma_addr_t first_tb_dma;
|
||||
struct iwl_pcie_txq_entry *entries;
|
||||
/* lock for syncing changes on the queue */
|
||||
spinlock_t lock;
|
||||
unsigned long frozen_expiry_remainder;
|
||||
struct timer_list stuck_timer;
|
||||
struct iwl_trans *trans;
|
||||
bool need_update;
|
||||
bool frozen;
|
||||
bool ampdu;
|
||||
int block;
|
||||
unsigned long wd_timeout;
|
||||
struct sk_buff_head overflow_q;
|
||||
struct iwl_dma_ptr bc_tbl;
|
||||
|
||||
int write_ptr;
|
||||
int read_ptr;
|
||||
dma_addr_t dma_addr;
|
||||
int n_window;
|
||||
u32 id;
|
||||
int low_mark;
|
||||
int high_mark;
|
||||
|
||||
bool overflow_tx;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans_txqs - transport tx queues data
|
||||
*
|
||||
* @queue_used - bit mask of used queues
|
||||
* @queue_stopped - bit mask of stopped queues
|
||||
*/
|
||||
struct iwl_trans_txqs {
|
||||
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
||||
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
|
||||
struct {
|
||||
u8 fifo;
|
||||
u8 q_id;
|
||||
unsigned int wdg_timeout;
|
||||
} cmd;
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans - transport common data
|
||||
*
|
||||
@ -828,6 +954,7 @@ struct iwl_trans_debug {
|
||||
* @system_pm_mode: the system-wide power management mode in use.
|
||||
* This mode is set dynamically, depending on the WoWLAN values
|
||||
* configured from the userspace at runtime.
|
||||
* @iwl_trans_txqs: transport tx queues data.
|
||||
*/
|
||||
struct iwl_trans {
|
||||
const struct iwl_trans_ops *ops;
|
||||
@ -875,6 +1002,7 @@ struct iwl_trans {
|
||||
enum iwl_plat_pm_mode system_pm_mode;
|
||||
|
||||
const char *name;
|
||||
struct iwl_trans_txqs txqs;
|
||||
|
||||
/* pointer to trans specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
|
@ -391,9 +391,9 @@ iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
|
||||
}
|
||||
|
||||
static int
|
||||
iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
|
||||
struct cfg80211_pmsr_request_peer *peer,
|
||||
struct iwl_tof_range_req_ap_entry_v4 *target)
|
||||
iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
|
||||
struct cfg80211_pmsr_request_peer *peer,
|
||||
struct iwl_tof_range_req_ap_entry_v4 *target)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -408,6 +408,38 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct cfg80211_pmsr_request_peer *peer,
|
||||
struct iwl_tof_range_req_ap_entry *target)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
|
||||
&target->format_bw,
|
||||
&target->ctrl_ch_position);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
|
||||
|
||||
if (vif->bss_conf.assoc &&
|
||||
!memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
target->sta_id = mvmvif->ap_sta_id;
|
||||
} else {
|
||||
target->sta_id = IWL_MVM_INVALID_STA;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Beacon interval is currently unknown, so use the common value
|
||||
* of 100 TUs.
|
||||
*/
|
||||
target->beacon_interval = cpu_to_le16(100);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
|
||||
{
|
||||
u32 status;
|
||||
@ -496,7 +528,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
for (i = 0; i < cmd.num_of_ap; i++) {
|
||||
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
|
||||
|
||||
err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]);
|
||||
err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -521,8 +553,9 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
||||
for (i = 0; i < cmd.num_of_ap; i++) {
|
||||
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
|
||||
struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
|
||||
|
||||
err = iwl_mvm_ftm_put_target(mvm, peer, (void *)&cmd.ap[i]);
|
||||
err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -548,6 +581,7 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
||||
switch (cmd_ver) {
|
||||
case 9:
|
||||
case 10:
|
||||
err = iwl_mvm_ftm_start_v9(mvm, vif, req);
|
||||
break;
|
||||
case 8:
|
||||
|
@ -988,6 +988,44 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
|
||||
if (ret < 0)
|
||||
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
|
||||
}
|
||||
|
||||
static bool iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
|
||||
DSM_FUNC_ENABLE_INDONESIA_5G2);
|
||||
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"Evaluated DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
|
||||
ret);
|
||||
|
||||
return ret == 1;
|
||||
}
|
||||
|
||||
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
struct iwl_lari_config_change_cmd cmd = {};
|
||||
|
||||
if (iwl_mvm_eval_dsm_indonesia_5g2(mvm))
|
||||
cmd.config_bitmap |=
|
||||
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
|
||||
|
||||
/* apply more config masks here */
|
||||
|
||||
if (cmd.config_bitmap) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
|
||||
le32_to_cpu(cmd.config_bitmap));
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
LARI_CONFIG_CHANGE),
|
||||
0, sizeof(cmd), &cmd);
|
||||
if (ret < 0)
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
|
||||
ret);
|
||||
}
|
||||
}
|
||||
#else /* CONFIG_ACPI */
|
||||
|
||||
inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
|
||||
@ -1019,6 +1057,10 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
|
||||
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
}
|
||||
|
||||
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
|
||||
@ -1293,6 +1335,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
iwl_mvm_lari_cfg(mvm);
|
||||
/*
|
||||
* RTNL is not taken during Ct-kill, but we don't need to scan/Tx
|
||||
* anyway, so don't init MCC.
|
||||
|
@ -1208,14 +1208,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
||||
*/
|
||||
flush_work(&mvm->roc_done_wk);
|
||||
|
||||
iwl_mvm_rm_aux_sta(mvm);
|
||||
|
||||
iwl_mvm_stop_device(mvm);
|
||||
|
||||
iwl_mvm_async_handlers_purge(mvm);
|
||||
/* async_handlers_list is empty and will stay empty: HW is stopped */
|
||||
|
||||
/* the fw is stopped, the aux sta is dead: clean up driver state */
|
||||
iwl_mvm_del_aux_sta(mvm);
|
||||
|
||||
/*
|
||||
* Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
|
||||
* hw (as restart_complete() won't be called in this case) and mac80211
|
||||
|
@ -2093,18 +2093,26 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
|
||||
{
|
||||
iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
|
||||
}
|
||||
|
||||
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
|
||||
{
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send the add station command for the vif's broadcast station.
|
||||
* Assumes that the station was already allocated.
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int tid, u8 queue, bool start);
|
||||
|
||||
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm);
|
||||
|
||||
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
|
@ -138,9 +138,17 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
case IWL_AMSDU_2K:
|
||||
break;
|
||||
case IWL_AMSDU_4K:
|
||||
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
|
||||
break;
|
||||
case IWL_AMSDU_8K:
|
||||
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
|
||||
/* if firmware supports the ext size, tell it */
|
||||
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
|
||||
break;
|
||||
case IWL_AMSDU_12K:
|
||||
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
|
||||
/* if firmware supports the ext size, tell it */
|
||||
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -213,7 +221,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
ctxt_info_gen3->tr_idx_arr_size =
|
||||
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
|
||||
ctxt_info_gen3->mtr_base_addr =
|
||||
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
|
||||
cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
|
||||
ctxt_info_gen3->mcr_base_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
|
||||
ctxt_info_gen3->mtr_size =
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -20,7 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -263,7 +263,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
||||
|
||||
/* initialize TX command queue */
|
||||
ctxt_info->hcmd_cfg.cmd_queue_addr =
|
||||
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
|
||||
cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
|
||||
ctxt_info->hcmd_cfg.cmd_queue_size =
|
||||
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
|
||||
|
||||
|
@ -246,12 +246,6 @@ struct iwl_rb_allocator {
|
||||
struct work_struct rx_alloc;
|
||||
};
|
||||
|
||||
struct iwl_dma_ptr {
|
||||
dma_addr_t dma;
|
||||
void *addr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
|
||||
* @index -- current index
|
||||
@ -290,107 +284,6 @@ static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
|
||||
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
||||
}
|
||||
|
||||
struct iwl_cmd_meta {
|
||||
/* only for SYNC commands, iff the reply skb is wanted */
|
||||
struct iwl_host_cmd *source;
|
||||
u32 flags;
|
||||
u32 tbs;
|
||||
};
|
||||
|
||||
/*
|
||||
* The FH will write back to the first TB only, so we need to copy some data
|
||||
* into the buffer regardless of whether it should be mapped or not.
|
||||
* This indicates how big the first TB must be to include the scratch buffer
|
||||
* and the assigned PN.
|
||||
* Since PN location is 8 bytes at offset 12, it's 20 now.
|
||||
* If we make it bigger then allocations will be bigger and copy slower, so
|
||||
* that's probably not useful.
|
||||
*/
|
||||
#define IWL_FIRST_TB_SIZE 20
|
||||
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
|
||||
|
||||
struct iwl_pcie_txq_entry {
|
||||
void *cmd;
|
||||
struct sk_buff *skb;
|
||||
/* buffer to free after command completes */
|
||||
const void *free_buf;
|
||||
struct iwl_cmd_meta meta;
|
||||
};
|
||||
|
||||
struct iwl_pcie_first_tb_buf {
|
||||
u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_txq - Tx Queue for DMA
|
||||
* @q: generic Rx/Tx queue descriptor
|
||||
* @tfds: transmit frame descriptors (DMA memory)
|
||||
* @first_tb_bufs: start of command headers, including scratch buffers, for
|
||||
* the writeback -- this is DMA memory and an array holding one buffer
|
||||
* for each command on the queue
|
||||
* @first_tb_dma: DMA address for the first_tb_bufs start
|
||||
* @entries: transmit entries (driver state)
|
||||
* @lock: queue lock
|
||||
* @stuck_timer: timer that fires if queue gets stuck
|
||||
* @trans_pcie: pointer back to transport (for timer)
|
||||
* @need_update: indicates need to update read/write index
|
||||
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
|
||||
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
|
||||
* @frozen: tx stuck queue timer is frozen
|
||||
* @frozen_expiry_remainder: remember how long until the timer fires
|
||||
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
|
||||
* @write_ptr: 1-st empty entry (index) host_w
|
||||
* @read_ptr: last used entry (index) host_r
|
||||
* @dma_addr: physical addr for BD's
|
||||
* @n_window: safe queue window
|
||||
* @id: queue id
|
||||
* @low_mark: low watermark, resume queue if free space more than this
|
||||
* @high_mark: high watermark, stop queue if free space less than this
|
||||
*
|
||||
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
|
||||
* descriptors) and required locking structures.
|
||||
*
|
||||
* Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
|
||||
* always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
|
||||
* there might be HW changes in the future). For the normal TX
|
||||
* queues, n_window, which is the size of the software queue data
|
||||
* is also 256; however, for the command queue, n_window is only
|
||||
* 32 since we don't need so many commands pending. Since the HW
|
||||
* still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
|
||||
* This means that we end up with the following:
|
||||
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
|
||||
* SW entries: | 0 | ... | 31 |
|
||||
* where N is a number between 0 and 7. This means that the SW
|
||||
* data is a window overlayed over the HW queue.
|
||||
*/
|
||||
struct iwl_txq {
|
||||
void *tfds;
|
||||
struct iwl_pcie_first_tb_buf *first_tb_bufs;
|
||||
dma_addr_t first_tb_dma;
|
||||
struct iwl_pcie_txq_entry *entries;
|
||||
spinlock_t lock;
|
||||
unsigned long frozen_expiry_remainder;
|
||||
struct timer_list stuck_timer;
|
||||
struct iwl_trans_pcie *trans_pcie;
|
||||
bool need_update;
|
||||
bool frozen;
|
||||
bool ampdu;
|
||||
int block;
|
||||
unsigned long wd_timeout;
|
||||
struct sk_buff_head overflow_q;
|
||||
struct iwl_dma_ptr bc_tbl;
|
||||
|
||||
int write_ptr;
|
||||
int read_ptr;
|
||||
dma_addr_t dma_addr;
|
||||
int n_window;
|
||||
u32 id;
|
||||
int low_mark;
|
||||
int high_mark;
|
||||
|
||||
bool overflow_tx;
|
||||
};
|
||||
|
||||
static inline dma_addr_t
|
||||
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
|
||||
{
|
||||
@ -561,9 +454,6 @@ struct iwl_trans_pcie {
|
||||
struct dma_pool *bc_pool;
|
||||
|
||||
struct iwl_txq *txq_memory;
|
||||
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
|
||||
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
||||
|
||||
/* PCI bus related data */
|
||||
struct pci_dev *pci_dev;
|
||||
@ -577,10 +467,7 @@ struct iwl_trans_pcie {
|
||||
|
||||
u8 page_offs, dev_cmd_offs;
|
||||
|
||||
u8 cmd_queue;
|
||||
u8 def_rx_queue;
|
||||
u8 cmd_fifo;
|
||||
unsigned int cmd_q_wdg_timeout;
|
||||
u8 n_no_reclaim_cmds;
|
||||
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
|
||||
u8 max_tbs;
|
||||
@ -983,9 +870,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
|
||||
static inline void iwl_wake_queue(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
|
||||
if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
|
||||
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
|
||||
}
|
||||
@ -994,9 +879,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
|
||||
static inline void iwl_stop_queue(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
|
||||
if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
|
||||
iwl_op_mode_queue_full(trans->op_mode, txq->id);
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
|
||||
} else
|
||||
|
@ -1284,7 +1284,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
int i)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
|
||||
bool page_stolen = false;
|
||||
int max_len = trans_pcie->rx_buf_bytes;
|
||||
u32 offset = 0;
|
||||
@ -1671,9 +1671,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
|
||||
if (!trans_pcie->txq[i])
|
||||
if (!trans->txqs.txq[i])
|
||||
continue;
|
||||
del_timer(&trans_pcie->txq[i]->stuck_timer);
|
||||
del_timer(&trans->txqs.txq[i]->stuck_timer);
|
||||
}
|
||||
|
||||
/* The STATUS_FW_ERROR bit is set in this function. This must happen
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -20,7 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate or reset and init all Tx and Command queues */
|
||||
if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size))
|
||||
if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
|
||||
return -ENOMEM;
|
||||
|
||||
/* enable shadow regs in HW */
|
||||
@ -262,8 +262,9 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
||||
iwl_pcie_reset_ict(trans);
|
||||
|
||||
/* make sure all queue are not stopped/used */
|
||||
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
memset(trans->txqs.queue_stopped, 0,
|
||||
sizeof(trans->txqs.queue_stopped));
|
||||
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
|
||||
|
||||
/* now that we got alive we can free the fw image & the context info.
|
||||
* paging memory cannot be freed included since FW will still use it
|
||||
|
@ -5,10 +5,9 @@
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -28,10 +27,9 @@
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -1495,14 +1493,10 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
|
||||
int ret;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
/*
|
||||
* Family IWL_DEVICE_FAMILY_AX210 and above persist mode is set by FW.
|
||||
*/
|
||||
if (!reset && trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
|
||||
if (!reset)
|
||||
/* Enable persistence mode to avoid reset */
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
|
||||
}
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
|
||||
@ -1910,9 +1904,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
trans_pcie->cmd_queue = trans_cfg->cmd_queue;
|
||||
trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
|
||||
trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
|
||||
trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
|
||||
trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
|
||||
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
|
||||
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
|
||||
trans_pcie->n_no_reclaim_cmds = 0;
|
||||
else
|
||||
@ -2205,11 +2199,10 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
|
||||
unsigned long txqs,
|
||||
bool freeze)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int queue;
|
||||
|
||||
for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
|
||||
struct iwl_txq *txq = trans_pcie->txq[queue];
|
||||
struct iwl_txq *txq = trans->txqs.txq[queue];
|
||||
unsigned long now;
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
@ -2257,13 +2250,12 @@ next_queue:
|
||||
|
||||
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
|
||||
struct iwl_txq *txq = trans_pcie->txq[i];
|
||||
struct iwl_txq *txq = trans->txqs.txq[i];
|
||||
|
||||
if (i == trans_pcie->cmd_queue)
|
||||
if (i == trans->txqs.cmd.q_id)
|
||||
continue;
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
@ -2332,7 +2324,6 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
|
||||
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq;
|
||||
unsigned long now = jiffies;
|
||||
bool overflow_tx;
|
||||
@ -2342,11 +2333,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
|
||||
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
|
||||
return -ENODEV;
|
||||
|
||||
if (!test_bit(txq_idx, trans_pcie->queue_used))
|
||||
if (!test_bit(txq_idx, trans->txqs.queue_used))
|
||||
return -EINVAL;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
|
||||
txq = trans_pcie->txq[txq_idx];
|
||||
txq = trans->txqs.txq[txq_idx];
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
overflow_tx = txq->overflow_tx ||
|
||||
@ -2394,7 +2385,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
|
||||
|
||||
static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int cnt;
|
||||
int ret = 0;
|
||||
|
||||
@ -2403,9 +2393,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
cnt < trans->trans_cfg->base_params->num_of_queues;
|
||||
cnt++) {
|
||||
|
||||
if (cnt == trans_pcie->cmd_queue)
|
||||
if (cnt == trans->txqs.cmd.q_id)
|
||||
continue;
|
||||
if (!test_bit(cnt, trans_pcie->queue_used))
|
||||
if (!test_bit(cnt, trans->txqs.queue_used))
|
||||
continue;
|
||||
if (!(BIT(cnt) & txq_bm))
|
||||
continue;
|
||||
@ -2579,13 +2569,12 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
|
||||
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
|
||||
struct iwl_dbgfs_tx_queue_state *state = v;
|
||||
struct iwl_trans *trans = priv->trans;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[state->pos];
|
||||
struct iwl_txq *txq = trans->txqs.txq[state->pos];
|
||||
|
||||
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
|
||||
(unsigned int)state->pos,
|
||||
!!test_bit(state->pos, trans_pcie->queue_used),
|
||||
!!test_bit(state->pos, trans_pcie->queue_stopped));
|
||||
!!test_bit(state->pos, trans->txqs.queue_used),
|
||||
!!test_bit(state->pos, trans->txqs.queue_stopped));
|
||||
if (txq)
|
||||
seq_printf(seq,
|
||||
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
|
||||
@ -2595,7 +2584,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
|
||||
else
|
||||
seq_puts(seq, "(unallocated)");
|
||||
|
||||
if (state->pos == trans_pcie->cmd_queue)
|
||||
if (state->pos == trans->txqs.cmd.q_id)
|
||||
seq_puts(seq, " (HCMD)");
|
||||
seq_puts(seq, "\n");
|
||||
|
||||
@ -3271,7 +3260,7 @@ static struct iwl_trans_dump_data
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_fw_error_dump_data *data;
|
||||
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
|
||||
struct iwl_fw_error_dump_txcmd *txcmd;
|
||||
struct iwl_trans_dump_data *dump_data;
|
||||
u32 len, num_rbs = 0, monitor_len = 0;
|
||||
|
@ -64,7 +64,6 @@
|
||||
*/
|
||||
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int txq_id;
|
||||
|
||||
/*
|
||||
@ -72,12 +71,13 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
|
||||
* queues. This happens when we have an rfkill interrupt.
|
||||
* Since we stop Tx altogether - mark the queues as stopped.
|
||||
*/
|
||||
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
memset(trans->txqs.queue_stopped, 0,
|
||||
sizeof(trans->txqs.queue_stopped));
|
||||
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
|
||||
|
||||
/* Unmap DMA from host system and free skb's */
|
||||
for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
|
||||
if (!trans_pcie->txq[txq_id])
|
||||
for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
|
||||
if (!trans->txqs.txq[txq_id])
|
||||
continue;
|
||||
iwl_pcie_gen2_txq_unmap(trans, txq_id);
|
||||
}
|
||||
@ -716,7 +716,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
u16 cmd_len;
|
||||
int idx;
|
||||
void *tfd;
|
||||
@ -725,7 +725,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
"queue %d out of range", txq_id))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
|
||||
if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
|
||||
"TX on unused queue %d\n", txq_id))
|
||||
return -EINVAL;
|
||||
|
||||
@ -819,7 +819,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
|
||||
struct iwl_device_cmd *out_cmd;
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
unsigned long flags;
|
||||
@ -931,7 +931,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
||||
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
|
||||
out_cmd->hdr_wide.reserved = 0;
|
||||
out_cmd->hdr_wide.sequence =
|
||||
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
|
||||
cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
|
||||
INDEX_TO_SEQ(txq->write_ptr));
|
||||
|
||||
cmd_pos = sizeof(struct iwl_cmd_header_wide);
|
||||
@ -979,7 +979,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
||||
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
|
||||
iwl_get_cmd_string(trans, cmd->id), group_id,
|
||||
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
|
||||
cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
|
||||
cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
|
||||
|
||||
/* start the TFD with the minimum copy bytes */
|
||||
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
|
||||
@ -1056,7 +1056,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
|
||||
int cmd_idx;
|
||||
int ret;
|
||||
|
||||
@ -1175,14 +1175,14 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
|
||||
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
while (txq->write_ptr != txq->read_ptr) {
|
||||
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
|
||||
txq_id, txq->read_ptr);
|
||||
|
||||
if (txq_id != trans_pcie->cmd_queue) {
|
||||
if (txq_id != trans->txqs.cmd.q_id) {
|
||||
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
||||
struct sk_buff *skb = txq->entries[idx].skb;
|
||||
|
||||
@ -1240,7 +1240,6 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
|
||||
*/
|
||||
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq;
|
||||
int i;
|
||||
|
||||
@ -1248,7 +1247,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
"queue %d out of range", txq_id))
|
||||
return;
|
||||
|
||||
txq = trans_pcie->txq[txq_id];
|
||||
txq = trans->txqs.txq[txq_id];
|
||||
|
||||
if (WARN_ON(!txq))
|
||||
return;
|
||||
@ -1256,7 +1255,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
iwl_pcie_gen2_txq_unmap(trans, txq_id);
|
||||
|
||||
/* De-alloc array of command/tx buffers */
|
||||
if (txq_id == trans_pcie->cmd_queue)
|
||||
if (txq_id == trans->txqs.cmd.q_id)
|
||||
for (i = 0; i < txq->n_window; i++) {
|
||||
kzfree(txq->entries[i].cmd);
|
||||
kzfree(txq->entries[i].free_buf);
|
||||
@ -1265,9 +1264,9 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
|
||||
iwl_pcie_gen2_txq_free_memory(trans, txq);
|
||||
|
||||
trans_pcie->txq[txq_id] = NULL;
|
||||
trans->txqs.txq[txq_id] = NULL;
|
||||
|
||||
clear_bit(txq_id, trans_pcie->queue_used);
|
||||
clear_bit(txq_id, trans->txqs.queue_used);
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
|
||||
@ -1327,7 +1326,6 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_host_cmd *hcmd)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tx_queue_cfg_rsp *rsp;
|
||||
int ret, qid;
|
||||
u32 wr_ptr;
|
||||
@ -1342,20 +1340,20 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
|
||||
qid = le16_to_cpu(rsp->queue_number);
|
||||
wr_ptr = le16_to_cpu(rsp->write_pointer);
|
||||
|
||||
if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
|
||||
if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
|
||||
WARN_ONCE(1, "queue index %d unsupported", qid);
|
||||
ret = -EIO;
|
||||
goto error_free_resp;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(qid, trans_pcie->queue_used)) {
|
||||
if (test_and_set_bit(qid, trans->txqs.queue_used)) {
|
||||
WARN_ONCE(1, "queue %d already used", qid);
|
||||
ret = -EIO;
|
||||
goto error_free_resp;
|
||||
}
|
||||
|
||||
txq->id = qid;
|
||||
trans_pcie->txq[qid] = txq;
|
||||
trans->txqs.txq[qid] = txq;
|
||||
wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
||||
|
||||
/* Place first TFD at index corresponding to start sequence number */
|
||||
@ -1413,8 +1411,6 @@ error:
|
||||
|
||||
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
|
||||
"queue %d out of range", queue))
|
||||
return;
|
||||
@ -1425,7 +1421,7 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
|
||||
* allow the op_mode to call txq_disable after it already called
|
||||
* stop_device.
|
||||
*/
|
||||
if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
|
||||
if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
|
||||
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
|
||||
"queue %d not used", queue);
|
||||
return;
|
||||
@ -1433,22 +1429,21 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
|
||||
|
||||
iwl_pcie_gen2_txq_unmap(trans, queue);
|
||||
|
||||
iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
|
||||
trans_pcie->txq[queue] = NULL;
|
||||
iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]);
|
||||
trans->txqs.txq[queue] = NULL;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
|
||||
}
|
||||
|
||||
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int i;
|
||||
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
|
||||
|
||||
/* Free all TX queues */
|
||||
for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
|
||||
if (!trans_pcie->txq[i])
|
||||
for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
|
||||
if (!trans->txqs.txq[i])
|
||||
continue;
|
||||
|
||||
iwl_pcie_gen2_txq_free(trans, i);
|
||||
@ -1457,35 +1452,34 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
|
||||
|
||||
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *queue;
|
||||
int ret;
|
||||
|
||||
/* alloc and init the tx queue */
|
||||
if (!trans_pcie->txq[txq_id]) {
|
||||
if (!trans->txqs.txq[txq_id]) {
|
||||
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
|
||||
if (!queue) {
|
||||
IWL_ERR(trans, "Not enough memory for tx queue\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
trans_pcie->txq[txq_id] = queue;
|
||||
trans->txqs.txq[txq_id] = queue;
|
||||
ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
queue = trans_pcie->txq[txq_id];
|
||||
queue = trans->txqs.txq[txq_id];
|
||||
}
|
||||
|
||||
ret = iwl_pcie_txq_init(trans, queue, queue_size,
|
||||
(txq_id == trans_pcie->cmd_queue));
|
||||
(txq_id == trans->txqs.cmd.q_id));
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
|
||||
goto error;
|
||||
}
|
||||
trans_pcie->txq[txq_id]->id = txq_id;
|
||||
set_bit(txq_id, trans_pcie->queue_used);
|
||||
trans->txqs.txq[txq_id]->id = txq_id;
|
||||
set_bit(txq_id, trans->txqs.queue_used);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -183,8 +183,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
|
||||
static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
|
||||
{
|
||||
struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
|
||||
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
|
||||
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
|
||||
struct iwl_trans *trans = txq->trans;
|
||||
|
||||
spin_lock(&txq->lock);
|
||||
/* check if triggered erroneously */
|
||||
@ -262,7 +261,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
|
||||
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
if (txq_id != trans_pcie->cmd_queue)
|
||||
if (txq_id != trans->txqs.cmd.q_id)
|
||||
sta_id = tx_cmd->sta_id;
|
||||
|
||||
bc_ent = cpu_to_le16(1 | (sta_id << 12));
|
||||
@ -280,7 +279,6 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 reg = 0;
|
||||
int txq_id = txq->id;
|
||||
|
||||
@ -293,7 +291,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
* 3. there is a chance that the NIC is asleep
|
||||
*/
|
||||
if (!trans->trans_cfg->base_params->shadow_reg_enable &&
|
||||
txq_id != trans_pcie->cmd_queue &&
|
||||
txq_id != trans->txqs.cmd.q_id &&
|
||||
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
|
||||
/*
|
||||
* wake up nic if it's powered down ...
|
||||
@ -324,13 +322,12 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
|
||||
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
|
||||
struct iwl_txq *txq = trans_pcie->txq[i];
|
||||
struct iwl_txq *txq = trans->txqs.txq[i];
|
||||
|
||||
if (!test_bit(i, trans_pcie->queue_used))
|
||||
if (!test_bit(i, trans->txqs.queue_used))
|
||||
continue;
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
@ -535,7 +532,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
tfd_sz = trans_pcie->tfd_size * slots_num;
|
||||
|
||||
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
|
||||
txq->trans_pcie = trans_pcie;
|
||||
txq->trans = trans;
|
||||
|
||||
txq->n_window = slots_num;
|
||||
|
||||
@ -661,14 +658,14 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
|
||||
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
while (txq->write_ptr != txq->read_ptr) {
|
||||
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
|
||||
txq_id, txq->read_ptr);
|
||||
|
||||
if (txq_id != trans_pcie->cmd_queue) {
|
||||
if (txq_id != trans->txqs.cmd.q_id) {
|
||||
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
|
||||
|
||||
if (WARN_ON_ONCE(!skb))
|
||||
@ -683,7 +680,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
|
||||
if (txq_id == trans_pcie->cmd_queue)
|
||||
if (txq_id == trans->txqs.cmd.q_id)
|
||||
iwl_pcie_clear_cmd_in_flight(trans);
|
||||
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
|
||||
}
|
||||
@ -712,7 +709,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
struct device *dev = trans->dev;
|
||||
int i;
|
||||
|
||||
@ -722,7 +719,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
iwl_pcie_txq_unmap(trans, txq_id);
|
||||
|
||||
/* De-alloc array of command/tx buffers */
|
||||
if (txq_id == trans_pcie->cmd_queue)
|
||||
if (txq_id == trans->txqs.cmd.q_id)
|
||||
for (i = 0; i < txq->n_window; i++) {
|
||||
kzfree(txq->entries[i].cmd);
|
||||
kzfree(txq->entries[i].free_buf);
|
||||
@ -761,8 +758,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
|
||||
|
||||
/* make sure all queue are not stopped/used */
|
||||
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
memset(trans->txqs.queue_stopped, 0,
|
||||
sizeof(trans->txqs.queue_stopped));
|
||||
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
|
||||
|
||||
trans_pcie->scd_base_addr =
|
||||
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
|
||||
@ -784,9 +782,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
if (trans->trans_cfg->base_params->scd_chain_ext_wa)
|
||||
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
|
||||
|
||||
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
|
||||
trans_pcie->cmd_fifo,
|
||||
trans_pcie->cmd_q_wdg_timeout);
|
||||
iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
|
||||
trans->txqs.cmd.fifo,
|
||||
trans->txqs.cmd.wdg_timeout);
|
||||
|
||||
/* Activate all Tx DMA/FIFO channels */
|
||||
iwl_scd_activate_fifos(trans);
|
||||
@ -822,7 +820,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
|
||||
|
||||
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
if (trans->trans_cfg->use_tfh)
|
||||
iwl_write_direct64(trans,
|
||||
FH_MEM_CBBC_QUEUE(trans, txq_id),
|
||||
@ -898,8 +896,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
|
||||
* queues. This happens when we have an rfkill interrupt.
|
||||
* Since we stop Tx altogether - mark the queues as stopped.
|
||||
*/
|
||||
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
memset(trans->txqs.queue_stopped, 0,
|
||||
sizeof(trans->txqs.queue_stopped));
|
||||
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
|
||||
|
||||
/* This can happen: start_hw, stop_device */
|
||||
if (!trans_pcie->txq_memory)
|
||||
@ -923,7 +922,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
|
||||
int txq_id;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
|
||||
|
||||
/* Tx queues */
|
||||
if (trans_pcie->txq_memory) {
|
||||
@ -931,7 +930,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
|
||||
txq_id < trans->trans_cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
iwl_pcie_txq_free(trans, txq_id);
|
||||
trans_pcie->txq[txq_id] = NULL;
|
||||
trans->txqs.txq[txq_id] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -992,7 +991,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
|
||||
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
|
||||
bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
|
||||
|
||||
if (cmd_queue)
|
||||
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
@ -1000,14 +999,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
else
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
|
||||
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
|
||||
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
|
||||
ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
|
||||
goto error;
|
||||
}
|
||||
trans_pcie->txq[txq_id]->id = txq_id;
|
||||
trans->txqs.txq[txq_id]->id = txq_id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1046,7 +1045,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
|
||||
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
|
||||
bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
|
||||
|
||||
if (cmd_queue)
|
||||
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
@ -1054,7 +1053,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
else
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
|
||||
ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
|
||||
@ -1068,7 +1067,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
* Circular buffer (TFD queue in DRAM) physical base address
|
||||
*/
|
||||
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
|
||||
trans_pcie->txq[txq_id]->dma_addr >> 8);
|
||||
trans->txqs.txq[txq_id]->dma_addr >> 8);
|
||||
}
|
||||
|
||||
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
|
||||
@ -1113,18 +1112,18 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
struct sk_buff_head *skbs)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
|
||||
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
||||
int last_to_free;
|
||||
|
||||
/* This function is not meant to release cmd queue*/
|
||||
if (WARN_ON(txq_id == trans_pcie->cmd_queue))
|
||||
if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
|
||||
if (!test_bit(txq_id, trans_pcie->queue_used)) {
|
||||
if (!test_bit(txq_id, trans->txqs.queue_used)) {
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
|
||||
txq_id, ssn);
|
||||
goto out;
|
||||
@ -1176,7 +1175,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
iwl_pcie_txq_progress(txq);
|
||||
|
||||
if (iwl_queue_space(trans, txq) > txq->low_mark &&
|
||||
test_bit(txq_id, trans_pcie->queue_stopped)) {
|
||||
test_bit(txq_id, trans->txqs.queue_stopped)) {
|
||||
struct sk_buff_head overflow_skbs;
|
||||
|
||||
__skb_queue_head_init(&overflow_skbs);
|
||||
@ -1229,8 +1228,7 @@ out:
|
||||
/* Set wr_ptr of specific device and txq */
|
||||
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
|
||||
@ -1290,7 +1288,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
|
||||
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
unsigned long flags;
|
||||
int nfreed = 0;
|
||||
u16 r;
|
||||
@ -1302,7 +1300,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
|
||||
|
||||
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
|
||||
(!iwl_queue_used(txq, idx))) {
|
||||
WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used),
|
||||
WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
|
||||
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
|
||||
__func__, txq_id, idx,
|
||||
trans->trans_cfg->base_params->max_tfd_queue_size,
|
||||
@ -1364,11 +1362,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
||||
unsigned int wdg_timeout)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
int fifo = -1;
|
||||
bool scd_bug = false;
|
||||
|
||||
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
|
||||
if (test_and_set_bit(txq_id, trans->txqs.queue_used))
|
||||
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
|
||||
|
||||
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
|
||||
@ -1377,7 +1375,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
||||
fifo = cfg->fifo;
|
||||
|
||||
/* Disable the scheduler prior configuring the cmd queue */
|
||||
if (txq_id == trans_pcie->cmd_queue &&
|
||||
if (txq_id == trans->txqs.cmd.q_id &&
|
||||
trans_pcie->scd_set_active)
|
||||
iwl_scd_enable_set_active(trans, 0);
|
||||
|
||||
@ -1385,7 +1383,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
||||
iwl_scd_txq_set_inactive(trans, txq_id);
|
||||
|
||||
/* Set this queue as a chain-building queue unless it is CMD */
|
||||
if (txq_id != trans_pcie->cmd_queue)
|
||||
if (txq_id != trans->txqs.cmd.q_id)
|
||||
iwl_scd_txq_set_chain(trans, txq_id);
|
||||
|
||||
if (cfg->aggregate) {
|
||||
@ -1455,7 +1453,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
||||
SCD_QUEUE_STTS_REG_MSK);
|
||||
|
||||
/* enable the scheduler for this queue (only) */
|
||||
if (txq_id == trans_pcie->cmd_queue &&
|
||||
if (txq_id == trans->txqs.cmd.q_id &&
|
||||
trans_pcie->scd_set_active)
|
||||
iwl_scd_enable_set_active(trans, BIT(txq_id));
|
||||
|
||||
@ -1474,8 +1472,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
||||
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
|
||||
bool shared_mode)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
|
||||
txq->ampdu = !shared_mode;
|
||||
}
|
||||
@ -1488,8 +1485,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
|
||||
static const u32 zero_val[4] = {};
|
||||
|
||||
trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
|
||||
trans_pcie->txq[txq_id]->frozen = false;
|
||||
trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
|
||||
trans->txqs.txq[txq_id]->frozen = false;
|
||||
|
||||
/*
|
||||
* Upon HW Rfkill - we stop the device, and then stop the queues
|
||||
@ -1497,7 +1494,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
* allow the op_mode to call txq_disable after it already called
|
||||
* stop_device.
|
||||
*/
|
||||
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
|
||||
if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
|
||||
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
|
||||
"queue %d not used", txq_id);
|
||||
return;
|
||||
@ -1511,7 +1508,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
}
|
||||
|
||||
iwl_pcie_txq_unmap(trans, txq_id);
|
||||
trans_pcie->txq[txq_id]->ampdu = false;
|
||||
trans->txqs.txq[txq_id]->ampdu = false;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
|
||||
}
|
||||
@ -1531,7 +1528,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
|
||||
struct iwl_device_cmd *out_cmd;
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
unsigned long flags;
|
||||
@ -1657,7 +1654,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
sizeof(struct iwl_cmd_header_wide));
|
||||
out_cmd->hdr_wide.reserved = 0;
|
||||
out_cmd->hdr_wide.sequence =
|
||||
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
|
||||
cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
|
||||
INDEX_TO_SEQ(txq->write_ptr));
|
||||
|
||||
cmd_pos = sizeof(struct iwl_cmd_header_wide);
|
||||
@ -1665,7 +1662,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
} else {
|
||||
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
|
||||
out_cmd->hdr.sequence =
|
||||
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
|
||||
cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
|
||||
INDEX_TO_SEQ(txq->write_ptr));
|
||||
out_cmd->hdr.group_id = 0;
|
||||
|
||||
@ -1716,7 +1713,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
iwl_get_cmd_string(trans, cmd->id),
|
||||
group_id, out_cmd->hdr.cmd,
|
||||
le16_to_cpu(out_cmd->hdr.sequence),
|
||||
cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
|
||||
cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
|
||||
|
||||
/* start the TFD with the minimum copy bytes */
|
||||
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
|
||||
@ -1816,14 +1813,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||
struct iwl_device_cmd *cmd;
|
||||
struct iwl_cmd_meta *meta;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
|
||||
|
||||
/* If a Tx command is being handled and it isn't in the actual
|
||||
* command queue then there a command routing bug has been introduced
|
||||
* in the queue management code. */
|
||||
if (WARN(txq_id != trans_pcie->cmd_queue,
|
||||
if (WARN(txq_id != trans->txqs.cmd.q_id,
|
||||
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
|
||||
txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
|
||||
txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
|
||||
txq->write_ptr)) {
|
||||
iwl_print_hex_error(trans, pkt, 32);
|
||||
return;
|
||||
@ -1895,7 +1892,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
|
||||
int cmd_idx;
|
||||
int ret;
|
||||
|
||||
@ -2129,7 +2126,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
u16 tb1_len)
|
||||
{
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(txq->trans);
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
|
||||
unsigned int mss = skb_shinfo(skb)->gso_size;
|
||||
@ -2332,9 +2330,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
u16 wifi_seq;
|
||||
bool amsdu;
|
||||
|
||||
txq = trans_pcie->txq[txq_id];
|
||||
txq = trans->txqs.txq[txq_id];
|
||||
|
||||
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
|
||||
if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
|
||||
"TX on unused queue %d\n", txq_id))
|
||||
return -EINVAL;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user