First set of iwlwifi patches for 4.21
* PCI IDs for some new 9000-series cards; * Improve antenna usage on connection problems; * Some improvements in the debugging code; * Other clean-ups and small fixes; -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAlvn8cMACgkQoUecoho8 xfp+9g//UKZQqOrpF7IzOvoDRtqTzjRYbqupAnPE+FlgqOxXcgAgdnl10Kt3qV+Y cmqzOqpbRGoJsCJmRQCmyvR7IYiyOpnvezeTH2Doa457lNKolKEAEq9GMvjPVRuU ixf6WQp7hpdfaaQ/hD30BXK9uNew5uX938hyXMvrqzKAydn24UXXV684fFItaAha 2c00fiL++jgN/xlyPC7O6tERrcUILlkFrvFqnDr2AqyEfiuMInIwW6WtD8Mne+i2 YwZthWXTLahw5SVWQohwddrrn+xD9yf0ZhJSaEt9zGqGYmRRLztMr6obRP4sH2VV c2UbcIj6Yz1u/rGS6ZnrDAVrlQyjCKB7nm86nHobFGSYVPXASfY7wb83K2mUBLlQ ZXLFR3c3gc42fOoXCo99zvaA/Y2SAXpESUOEKE3YzywC1udisLHNdMvGROAc9xXg DoL6GX5r9IfghsUb7Hd7EaA56vwF4ehCf9DYe2wmFCW9gI1sgSNOZd20GDy+/gEM dJWH1h9ZrO0wmJpIkv/WfListJ1W75hqYYfteiyhdNsDObCcIbyk6gIubKUZGhLB suUEmRW4AGgjs/4hOv5SMzSSxLo+ydl9MqELpSgPuFyEvId1qxKuhZYmI1FNsi15 Uh0MYamDjV8f/+hnYE8MLuQv+w7nP6d4DGvvGQ6IgiTNDFhTct4= =sCPQ -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2018-11-11' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next First set of iwlwifi patches for 4.21 * PCI IDs for some new 9000-series cards; * Improve antenna usage on connection problems; * Some improvements in the debugging code; * Other clean-ups and small fixes;
This commit is contained in:
commit
12d56175c8
@ -240,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
|
||||
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
|
||||
return;
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
|
||||
/* Pull RXF1 */
|
||||
iwl_fwrt_dump_rxf(fwrt, dump_data,
|
||||
cfg->lmac[0].rxfifo1_size, 0, 0);
|
||||
@ -254,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
|
||||
LMAC2_PRPH_OFFSET, 2);
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
|
||||
/* Pull TXF data from LMAC1 */
|
||||
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
|
||||
/* Mark the number of TXF we're pulling now */
|
||||
@ -279,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
|
||||
fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
|
||||
/* Pull UMAC internal TXF data from all TXFs */
|
||||
@ -603,7 +603,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
|
||||
u32 fifo_len = 0;
|
||||
int i;
|
||||
|
||||
if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)))
|
||||
if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF))
|
||||
goto dump_txf;
|
||||
|
||||
/* Count RXF2 size */
|
||||
@ -614,7 +614,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
|
||||
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
|
||||
|
||||
dump_txf:
|
||||
if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)))
|
||||
if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF))
|
||||
goto dump_internal_txf;
|
||||
|
||||
/* Count TXF sizes */
|
||||
@ -627,7 +627,7 @@ dump_txf:
|
||||
}
|
||||
|
||||
dump_internal_txf:
|
||||
if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
|
||||
if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
|
||||
fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
|
||||
goto out;
|
||||
@ -639,6 +639,32 @@ out:
|
||||
return fifo_len;
|
||||
}
|
||||
|
||||
static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_error_dump_data **data)
|
||||
{
|
||||
int i;
|
||||
|
||||
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
|
||||
for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
struct page *pages =
|
||||
fwrt->fw_paging_db[i].fw_paging_block;
|
||||
dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
|
||||
|
||||
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
|
||||
(*data)->len = cpu_to_le32(sizeof(*paging) +
|
||||
PAGING_BLOCK_SIZE);
|
||||
paging = (void *)(*data)->data;
|
||||
paging->index = cpu_to_le32(i);
|
||||
dma_sync_single_for_cpu(fwrt->trans->dev, addr,
|
||||
PAGING_BLOCK_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
memcpy(paging->data, page_address(pages),
|
||||
PAGING_BLOCK_SIZE);
|
||||
(*data) = iwl_fw_error_next_data(*data);
|
||||
}
|
||||
}
|
||||
|
||||
static struct iwl_fw_error_dump_file *
|
||||
_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dump_ptrs *fw_error_dump)
|
||||
@ -655,13 +681,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
|
||||
u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
|
||||
0 : fwrt->trans->cfg->dccm2_len;
|
||||
bool monitor_dump_only = false;
|
||||
int i;
|
||||
|
||||
if (fwrt->dump.trig &&
|
||||
fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
|
||||
monitor_dump_only = true;
|
||||
|
||||
/* SRAM - include stack CCM if driver knows the values for it */
|
||||
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
|
||||
const struct fw_img *img;
|
||||
@ -680,22 +701,22 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
/* Make room for PRPH registers */
|
||||
if (!fwrt->trans->cfg->gen2 &&
|
||||
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
|
||||
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
|
||||
prph_len += iwl_fw_get_prph_len(fwrt);
|
||||
|
||||
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
|
||||
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
|
||||
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
|
||||
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
|
||||
}
|
||||
|
||||
file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO))
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_info);
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG))
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
|
||||
size_t hdr_len = sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_mem);
|
||||
|
||||
@ -712,10 +733,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
/* Make room for fw's virtual image pages, if it exists */
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
|
||||
!fwrt->trans->cfg->gen2 &&
|
||||
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
|
||||
fwrt->fw_paging_db[0].fw_paging_block)
|
||||
if (iwl_fw_dbg_is_paging_enabled(fwrt))
|
||||
file_len += fwrt->num_of_paging_blk *
|
||||
(sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_paging) +
|
||||
@ -727,12 +745,12 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
/* If we only want a monitor dump, reset the file length */
|
||||
if (monitor_dump_only) {
|
||||
if (fwrt->dump.monitor_only) {
|
||||
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
|
||||
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
fwrt->dump.desc)
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
|
||||
fwrt->dump.desc->len;
|
||||
@ -746,7 +764,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
|
||||
dump_data = (void *)dump_file->data;
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
|
||||
dump_data->len = cpu_to_le32(sizeof(*dump_info));
|
||||
dump_info = (void *)dump_data->data;
|
||||
@ -767,7 +785,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
|
||||
/* Dump shared memory configuration */
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
|
||||
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
|
||||
@ -804,7 +822,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
iwl_read_radio_regs(fwrt, &dump_data);
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
fwrt->dump.desc) {
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
|
||||
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
|
||||
@ -817,10 +835,10 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
/* In case we only want monitor dump, skip to dump trasport data */
|
||||
if (monitor_dump_only)
|
||||
if (fwrt->dump.monitor_only)
|
||||
goto out;
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
|
||||
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
|
||||
fwrt->fw->dbg.mem_tlv;
|
||||
|
||||
@ -865,30 +883,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
/* Dump fw's virtual image */
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
|
||||
!fwrt->trans->cfg->gen2 &&
|
||||
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
|
||||
fwrt->fw_paging_db[0].fw_paging_block) {
|
||||
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
|
||||
for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
struct page *pages =
|
||||
fwrt->fw_paging_db[i].fw_paging_block;
|
||||
dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
|
||||
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
|
||||
dump_data->len = cpu_to_le32(sizeof(*paging) +
|
||||
PAGING_BLOCK_SIZE);
|
||||
paging = (void *)dump_data->data;
|
||||
paging->index = cpu_to_le32(i);
|
||||
dma_sync_single_for_cpu(fwrt->trans->dev, addr,
|
||||
PAGING_BLOCK_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
memcpy(paging->data, page_address(pages),
|
||||
PAGING_BLOCK_SIZE);
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
}
|
||||
if (iwl_fw_dbg_is_paging_enabled(fwrt))
|
||||
iwl_dump_paging(fwrt, &dump_data);
|
||||
|
||||
if (prph_len) {
|
||||
iwl_dump_prph(fwrt->trans, &dump_data,
|
||||
@ -932,7 +928,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
||||
}
|
||||
|
||||
fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
|
||||
fwrt->dump.trig);
|
||||
fwrt->dump.monitor_only);
|
||||
file_len = le32_to_cpu(dump_file->file_len);
|
||||
fw_error_dump->fwrt_len = file_len;
|
||||
if (fw_error_dump->trans_ptr) {
|
||||
@ -998,7 +994,8 @@ void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
|
||||
|
||||
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
||||
const struct iwl_fw_dump_desc *desc, void *trigger,
|
||||
const struct iwl_fw_dump_desc *desc,
|
||||
bool monitor_only,
|
||||
unsigned int delay)
|
||||
{
|
||||
/*
|
||||
@ -1028,7 +1025,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
||||
le32_to_cpu(desc->trig_desc.type));
|
||||
|
||||
fwrt->dump.desc = desc;
|
||||
fwrt->dump.trig = trigger;
|
||||
fwrt->dump.monitor_only = monitor_only;
|
||||
|
||||
schedule_delayed_work(&fwrt->dump.wk, delay);
|
||||
|
||||
@ -1043,6 +1040,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
struct iwl_fw_dump_desc *desc;
|
||||
unsigned int delay = 0;
|
||||
bool monitor_only = false;
|
||||
|
||||
if (trigger) {
|
||||
u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
|
||||
@ -1059,6 +1057,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
trigger->occurrences = cpu_to_le16(occurrences);
|
||||
delay = le16_to_cpu(trigger->trig_dis_ms);
|
||||
monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
|
||||
}
|
||||
|
||||
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
|
||||
@ -1070,7 +1069,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
desc->trig_desc.type = cpu_to_le32(trig);
|
||||
memcpy(desc->trig_desc.data, str, len);
|
||||
|
||||
return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay);
|
||||
return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
|
||||
|
||||
|
@ -101,13 +101,12 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
|
||||
if (fwrt->dump.desc != &iwl_dump_desc_assert)
|
||||
kfree(fwrt->dump.desc);
|
||||
fwrt->dump.desc = NULL;
|
||||
fwrt->dump.trig = NULL;
|
||||
}
|
||||
|
||||
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
|
||||
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
||||
const struct iwl_fw_dump_desc *desc,
|
||||
void *trigger, unsigned int delay);
|
||||
bool monitor_only, unsigned int delay);
|
||||
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_dbg_trigger trig,
|
||||
const char *str, size_t len,
|
||||
@ -310,12 +309,25 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
|
||||
|
||||
void iwl_fw_error_dump_wk(struct work_struct *work);
|
||||
|
||||
static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type)
|
||||
{
|
||||
return (fwrt->fw->dbg.dump_mask & BIT(type));
|
||||
}
|
||||
|
||||
static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
return fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
|
||||
fwrt->trans->cfg->d3_debug_data_length &&
|
||||
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
|
||||
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
|
||||
}
|
||||
|
||||
static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
|
||||
!fwrt->trans->cfg->gen2 &&
|
||||
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
|
||||
fwrt->fw_paging_db[0].fw_paging_block;
|
||||
}
|
||||
|
||||
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
|
||||
|
@ -131,7 +131,7 @@ struct iwl_fw_runtime {
|
||||
/* debug */
|
||||
struct {
|
||||
const struct iwl_fw_dump_desc *desc;
|
||||
const struct iwl_fw_dbg_trigger_tlv *trig;
|
||||
bool monitor_only;
|
||||
struct delayed_work wk;
|
||||
|
||||
u8 conf;
|
||||
|
@ -8,6 +8,7 @@
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -30,6 +31,7 @@
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -394,6 +396,7 @@ enum aux_misc_master1_en {
|
||||
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
|
||||
#define RSA_ENABLE 0xA24B08
|
||||
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
|
||||
#define PREG_PRPH_WPROT_0 0xA04CE0
|
||||
#define SB_CPU_1_STATUS 0xA01E30
|
||||
#define SB_CPU_2_STATUS 0xA01E34
|
||||
#define UMAG_SB_CPU_1_STATUS 0xA038C0
|
||||
@ -420,4 +423,8 @@ enum {
|
||||
#define UREG_CHICK (0xA05C00)
|
||||
#define UREG_CHICK_MSI_ENABLE BIT(24)
|
||||
#define UREG_CHICK_MSIX_ENABLE BIT(25)
|
||||
|
||||
#define HPM_DEBUG 0xA03440
|
||||
#define PERSISTENCE_BIT BIT(12)
|
||||
#define PREG_WFPM_ACCESS BIT(12)
|
||||
#endif /* __iwl_prph_h__ */
|
||||
|
@ -602,8 +602,7 @@ struct iwl_trans_ops {
|
||||
void (*resume)(struct iwl_trans *trans);
|
||||
|
||||
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
|
||||
const struct iwl_fw_dbg_trigger_tlv
|
||||
*trigger);
|
||||
bool monitor_only);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -897,12 +896,11 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
static inline struct iwl_trans_dump_data *
|
||||
iwl_trans_dump_data(struct iwl_trans *trans,
|
||||
const struct iwl_fw_dbg_trigger_tlv *trigger)
|
||||
iwl_trans_dump_data(struct iwl_trans *trans, bool monitor_only)
|
||||
{
|
||||
if (!trans->ops->dump_data)
|
||||
return NULL;
|
||||
return trans->ops->dump_data(trans, trigger);
|
||||
return trans->ops->dump_data(trans, monitor_only);
|
||||
}
|
||||
|
||||
static inline struct iwl_device_cmd *
|
||||
|
@ -1956,7 +1956,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
||||
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
|
||||
iwl_mvm_dump_nic_error_log(mvm);
|
||||
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
|
||||
NULL, 0);
|
||||
false, 0);
|
||||
ret = 1;
|
||||
goto err;
|
||||
}
|
||||
|
@ -1299,10 +1299,11 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
|
||||
int len;
|
||||
|
||||
len = scnprintf(buf, sizeof(buf) - 1,
|
||||
"traffic=%d\ndbgfs=%d\nvcmd=%d\n",
|
||||
"traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_VCMD));
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_VCMD),
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
@ -1440,15 +1441,6 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
static const char * const chanwidths[] = {
|
||||
[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
|
||||
[NL80211_CHAN_WIDTH_20] = "ht20",
|
||||
[NL80211_CHAN_WIDTH_40] = "ht40",
|
||||
[NL80211_CHAN_WIDTH_80] = "vht80",
|
||||
[NL80211_CHAN_WIDTH_80P80] = "vht80p80",
|
||||
[NL80211_CHAN_WIDTH_160] = "vht160",
|
||||
};
|
||||
|
||||
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
|
||||
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
|
||||
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
|
||||
|
@ -965,11 +965,8 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
|
||||
tx->tx_flags = cpu_to_le32(tx_flags);
|
||||
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
|
||||
mvm->mgmt_last_antenna_idx =
|
||||
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
|
||||
mvm->mgmt_last_antenna_idx);
|
||||
}
|
||||
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION))
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
|
||||
|
||||
tx->rate_n_flags =
|
||||
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
|
||||
|
@ -809,6 +809,21 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
||||
!ieee80211_is_bufferable_mmpdu(hdr->frame_control))
|
||||
sta = NULL;
|
||||
|
||||
/* If there is no sta, and it's not offchannel - send through AP */
|
||||
if (info->control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
|
||||
struct iwl_mvm_vif *mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(info->control.vif);
|
||||
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
|
||||
|
||||
if (ap_sta_id < IWL_MVM_STATION_COUNT) {
|
||||
/* mac80211 holds rcu read lock */
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (sta) {
|
||||
if (iwl_mvm_defer_tx(mvm, sta, skb))
|
||||
return;
|
||||
@ -2379,6 +2394,12 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
||||
/* must be set before quota calculations */
|
||||
mvmvif->ap_ibss_active = true;
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
|
||||
iwl_mvm_vif_set_low_latency(mvmvif, true,
|
||||
LOW_LATENCY_VIF_TYPE);
|
||||
iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id);
|
||||
}
|
||||
|
||||
/* power updated needs to be done before quotas */
|
||||
iwl_mvm_power_update_mac(mvm);
|
||||
|
||||
@ -2441,6 +2462,12 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
|
||||
mvmvif->ap_ibss_active = false;
|
||||
mvm->ap_last_beacon_gp2 = 0;
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
|
||||
iwl_mvm_vif_set_low_latency(mvmvif, false,
|
||||
LOW_LATENCY_VIF_TYPE);
|
||||
iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id);
|
||||
}
|
||||
|
||||
iwl_mvm_bt_coex_vif_change(mvm);
|
||||
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
|
||||
|
@ -303,11 +303,13 @@ enum iwl_bt_force_ant_mode {
|
||||
* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
|
||||
* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
|
||||
* @LOW_LATENCY_VCMD: low latency mode set from vendor command
|
||||
* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
|
||||
*/
|
||||
enum iwl_mvm_low_latency_cause {
|
||||
LOW_LATENCY_TRAFFIC = BIT(0),
|
||||
LOW_LATENCY_DEBUGFS = BIT(1),
|
||||
LOW_LATENCY_VCMD = BIT(2),
|
||||
LOW_LATENCY_VIF_TYPE = BIT(3),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -844,7 +846,6 @@ struct iwl_mvm {
|
||||
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
|
||||
|
||||
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
|
||||
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
|
||||
struct work_struct add_stream_wk; /* To add streams to queues */
|
||||
|
||||
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
|
||||
@ -1521,6 +1522,11 @@ static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
|
||||
mvm->fw->valid_rx_ant;
|
||||
}
|
||||
|
||||
static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant)
|
||||
{
|
||||
*ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant);
|
||||
}
|
||||
|
||||
static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
|
||||
{
|
||||
u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
|
||||
@ -1846,6 +1852,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
/* get SystemLowLatencyMode - only needed for beacon threshold? */
|
||||
bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
|
||||
bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
|
||||
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency,
|
||||
u16 mac_id);
|
||||
|
||||
/* get VMACLowLatencyMode */
|
||||
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
|
||||
|
@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
INIT_LIST_HEAD(&mvm->aux_roc_te_list);
|
||||
INIT_LIST_HEAD(&mvm->async_handlers_list);
|
||||
spin_lock_init(&mvm->time_event_lock);
|
||||
spin_lock_init(&mvm->queue_info_lock);
|
||||
|
||||
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
|
||||
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
|
||||
@ -846,6 +845,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
|
||||
iwl_mvm_tof_init(mvm);
|
||||
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
|
||||
|
||||
return op_mode;
|
||||
|
||||
out_unregister:
|
||||
@ -1108,11 +1109,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
|
||||
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
|
||||
iwl_mvm_stop_mac_queues(mvm, mq);
|
||||
}
|
||||
@ -1138,11 +1135,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
|
||||
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
|
||||
iwl_mvm_start_mac_queues(mvm, mq);
|
||||
}
|
||||
@ -1240,7 +1233,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
*/
|
||||
if (!mvm->fw_restart && fw_error) {
|
||||
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
|
||||
NULL, 0);
|
||||
false, 0);
|
||||
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
struct iwl_mvm_reprobe *reprobe;
|
||||
|
||||
|
@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
|
||||
{
|
||||
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
|
||||
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
|
||||
struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
|
||||
u8 supp = 0;
|
||||
|
||||
if (he_cap && he_cap->has_he)
|
||||
return 0;
|
||||
|
||||
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
|
||||
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
|
||||
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
|
||||
|
@ -1422,12 +1422,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
/* update aggregation data for monitor sake on default queue */
|
||||
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
|
||||
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
|
||||
u64 he_phy_data;
|
||||
|
||||
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
|
||||
else
|
||||
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
|
||||
|
||||
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
|
||||
rx_status->ampdu_reference = mvm->ampdu_ref;
|
||||
|
@ -205,9 +205,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
|
||||
{
|
||||
u32 tx_ant;
|
||||
|
||||
mvm->scan_last_antenna_idx =
|
||||
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
|
||||
mvm->scan_last_antenna_idx);
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
|
||||
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
|
||||
if (band == NL80211_BAND_2GHZ && !no_cck)
|
||||
|
@ -319,9 +319,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@ -372,25 +370,17 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
||||
return -EINVAL;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (remove_mac_queue)
|
||||
mvm->hw_queue_to_mac80211[queue] &=
|
||||
~BIT(mac80211_queue);
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_trans_txq_free(mvm->trans, queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
|
||||
return 0;
|
||||
}
|
||||
|
||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
||||
|
||||
@ -426,10 +416,8 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
||||
mvm->hw_queue_to_mac80211[queue]);
|
||||
|
||||
/* If the queue is still enabled - nothing left to do in this func */
|
||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE)
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
@ -448,8 +436,6 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
||||
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
|
||||
mvm->queue_info[queue].reserved = false;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
|
||||
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
|
||||
@ -474,10 +460,8 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
@ -516,10 +500,8 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@ -545,6 +527,16 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* The TX path may have been using this TXQ_ID from the tid_data,
|
||||
* so make sure it's no longer running so that we can safely reuse
|
||||
* this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
|
||||
* above, but nothing guarantees we've stopped using them. Thus,
|
||||
* without this, we could get to iwl_mvm_disable_txq() and remove
|
||||
* the queue while still sending frames to it.
|
||||
*/
|
||||
synchronize_net();
|
||||
|
||||
return disable_agg_tids;
|
||||
}
|
||||
|
||||
@ -562,11 +554,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid = mvm->queue_info[queue].txq_tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
same_sta = sta_id == new_sta_id;
|
||||
|
||||
@ -610,7 +600,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
|
||||
* by the inactivity checker.
|
||||
*/
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
@ -696,10 +685,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
* value 3 and VO with value 0, so to check if ac X is lower than ac Y
|
||||
* we need to check if the numerical value of X is LARGER than of Y.
|
||||
*/
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"No redirection needed on TXQ #%d\n",
|
||||
queue);
|
||||
@ -711,7 +697,6 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
mq = mvm->hw_queue_to_mac80211[queue];
|
||||
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
|
||||
queue, iwl_mvm_ac_to_tx_fifo[ac]);
|
||||
@ -737,9 +722,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
|
||||
|
||||
/* Update the TID "owner" of the queue */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].txq_tid = tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
|
||||
|
||||
@ -748,9 +731,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
|
||||
|
||||
/* Update AC marking of the queue */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].mac80211_ac = ac;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Mark queue as shared in transport if shared
|
||||
@ -773,7 +754,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
|
||||
{
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* This should not be hit with new TX path */
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
@ -853,11 +834,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
||||
{
|
||||
bool enable_queue = true;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Make sure this TID isn't already enabled */
|
||||
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
|
||||
queue, tid);
|
||||
return false;
|
||||
@ -893,8 +871,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
||||
queue, mvm->queue_info[queue].tid_bitmap,
|
||||
mvm->hw_queue_to_mac80211[queue]);
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
return enable_queue;
|
||||
}
|
||||
|
||||
@ -949,9 +925,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
|
||||
return;
|
||||
@ -968,9 +942,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].txq_tid = tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
|
||||
queue, tid);
|
||||
}
|
||||
@ -992,10 +964,8 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Find TID for queue, and make sure it is the only one on the queue */
|
||||
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
|
||||
@ -1052,9 +1022,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1073,7 +1041,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
||||
int tid;
|
||||
|
||||
lockdep_assert_held(&mvmsta->lock);
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return false;
|
||||
@ -1174,8 +1142,6 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return -ENOSPC;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* we skip the CMD queue below by starting at 1 */
|
||||
@ -1230,12 +1196,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
/* this isn't so nice, but works OK due to the way we loop */
|
||||
spin_unlock(&mvm->queue_info_lock);
|
||||
|
||||
/* and we need this locking order */
|
||||
spin_lock(&mvmsta->lock);
|
||||
spin_lock(&mvm->queue_info_lock);
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
|
||||
inactive_tid_bitmap,
|
||||
&unshare_queues,
|
||||
@ -1243,11 +1204,10 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
||||
if (ret >= 0 && free_queue < 0)
|
||||
free_queue = ret;
|
||||
/* only unlock sta lock - we still need the queue info lock */
|
||||
spin_unlock(&mvmsta->lock);
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Reconfigure queues requiring reconfiguation */
|
||||
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
|
||||
@ -1296,8 +1256,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
tfd_queue_mask = mvmsta->tfd_queue_msk;
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
|
||||
* exists
|
||||
@ -1327,12 +1285,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
IWL_MVM_DQA_MIN_DATA_QUEUE,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE);
|
||||
if (queue < 0) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* try harder - perhaps kill an inactive queue */
|
||||
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
/* No free queue - we'll have to share */
|
||||
@ -1353,8 +1307,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
if (queue > 0 && !shared_queue)
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* This shouldn't happen - out of queues */
|
||||
if (WARN_ON(queue <= 0)) {
|
||||
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
|
||||
@ -1556,8 +1508,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
||||
/* run the general cleanup/unsharing of queues */
|
||||
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Make sure we have free resources for this STA */
|
||||
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
|
||||
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
|
||||
@ -1569,19 +1519,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
||||
IWL_MVM_DQA_MIN_DATA_QUEUE,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE);
|
||||
if (queue < 0) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
/* try again - this time kick out a queue if needed */
|
||||
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
|
||||
if (queue < 0) {
|
||||
IWL_ERR(mvm, "No available queues for new station\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
mvmsta->reserved_queue = queue;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
|
||||
@ -1822,6 +1768,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
||||
if (iwl_mvm_has_tlc_offload(mvm))
|
||||
iwl_mvm_rs_add_sta(mvm, mvm_sta);
|
||||
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
|
||||
|
||||
update_fw:
|
||||
ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
|
||||
if (ret)
|
||||
@ -2004,18 +1952,14 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
* is still marked as IWL_MVM_QUEUE_RESERVED, and
|
||||
* should be manually marked as free again
|
||||
*/
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
status = &mvm->queue_info[reserved_txq].status;
|
||||
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
|
||||
(*status != IWL_MVM_QUEUE_FREE),
|
||||
"sta_id %d reserved txq %d status %d",
|
||||
sta_id, reserved_txq, *status)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
sta_id, reserved_txq, *status))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*status = IWL_MVM_QUEUE_FREE;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
@ -2873,8 +2817,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
spin_lock(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Note the possible cases:
|
||||
* 1. An enabled TXQ - TXQ needs to become agg'ed
|
||||
@ -2889,7 +2831,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
if (txq_id < 0) {
|
||||
ret = txq_id;
|
||||
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
||||
goto release_locks;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* TXQ hasn't yet been enabled, so mark it only as reserved */
|
||||
@ -2900,11 +2842,9 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Can't start tid %d agg on shared queue!\n",
|
||||
tid);
|
||||
goto release_locks;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_unlock(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"AGG for tid %d will be on queue #%d\n",
|
||||
tid, txq_id);
|
||||
@ -2935,10 +2875,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
||||
release_locks:
|
||||
spin_unlock(&mvm->queue_info_lock);
|
||||
out:
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
@ -3007,9 +2944,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
queue_status = mvm->queue_info[queue].status;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Maybe there is no need to even alloc a queue... */
|
||||
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
|
||||
@ -3055,9 +2990,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
}
|
||||
|
||||
/* No need to mark as reserved */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
out:
|
||||
/*
|
||||
@ -3083,10 +3016,11 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
||||
{
|
||||
u16 txq_id = tid_data->txq_id;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
/*
|
||||
* The TXQ is marked as reserved only if no traffic came through yet
|
||||
* This means no traffic has been sent on this TID (agg'd or not), so
|
||||
@ -3098,8 +3032,6 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
||||
tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
@ -397,6 +397,9 @@ struct iwl_mvm_rxq_dup_data {
|
||||
* @ptk_pn: per-queue PTK PN data structures
|
||||
* @dup_data: per queue duplicate packet detection data
|
||||
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
|
||||
* @tx_ant: the index of the antenna to use for data tx to this station. Only
|
||||
* used during connection establishment (e.g. for the 4 way handshake
|
||||
* exchange).
|
||||
*
|
||||
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
|
||||
* in the structure for use by driver. This structure is placed in that
|
||||
@ -439,6 +442,7 @@ struct iwl_mvm_sta {
|
||||
u8 agg_tids;
|
||||
u8 sleep_tx_count;
|
||||
u8 avg_energy;
|
||||
u8 tx_ant;
|
||||
};
|
||||
|
||||
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
|
||||
|
@ -302,13 +302,30 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
offload_assist));
|
||||
}
|
||||
|
||||
static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta, __le16 fc)
|
||||
{
|
||||
if (info->band == NL80211_BAND_2GHZ &&
|
||||
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
|
||||
return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
|
||||
|
||||
if (sta && ieee80211_is_data(fc)) {
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
|
||||
}
|
||||
|
||||
return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
}
|
||||
|
||||
static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
int rate_idx;
|
||||
u8 rate_plcp;
|
||||
u32 rate_flags;
|
||||
u32 rate_flags = 0;
|
||||
|
||||
/* HT rate doesn't make sense for a non data frame */
|
||||
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
|
||||
@ -332,13 +349,6 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
||||
/* Get PLCP rate for tx_cmd->rate_n_flags */
|
||||
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
|
||||
|
||||
if (info->band == NL80211_BAND_2GHZ &&
|
||||
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
|
||||
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
|
||||
else
|
||||
rate_flags =
|
||||
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
|
||||
/* Set CCK flag as needed */
|
||||
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
|
||||
rate_flags |= RATE_MCS_CCK_MSK;
|
||||
@ -346,6 +356,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
||||
return (u32)rate_plcp | rate_flags;
|
||||
}
|
||||
|
||||
static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta, __le16 fc)
|
||||
{
|
||||
return iwl_mvm_get_tx_rate(mvm, info, sta) |
|
||||
iwl_mvm_get_tx_ant(mvm, info, sta, fc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the fields in the Tx cmd that are rate related
|
||||
*/
|
||||
@ -373,20 +391,21 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
|
||||
*/
|
||||
|
||||
if (ieee80211_is_data(fc) && sta) {
|
||||
tx_cmd->initial_rate_index = 0;
|
||||
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
|
||||
return;
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
|
||||
tx_cmd->initial_rate_index = 0;
|
||||
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
|
||||
return;
|
||||
}
|
||||
} else if (ieee80211_is_back_req(fc)) {
|
||||
tx_cmd->tx_flags |=
|
||||
cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
|
||||
}
|
||||
|
||||
mvm->mgmt_last_antenna_idx =
|
||||
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
|
||||
mvm->mgmt_last_antenna_idx);
|
||||
|
||||
/* Set the rate in the TX cmd */
|
||||
tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
|
||||
tx_cmd->rate_n_flags =
|
||||
cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
|
||||
}
|
||||
|
||||
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
|
||||
@ -491,6 +510,8 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
u16 offload_assist = 0;
|
||||
u32 rate_n_flags = 0;
|
||||
u16 flags = 0;
|
||||
struct iwl_mvm_sta *mvmsta = sta ?
|
||||
iwl_mvm_sta_from_mac80211(sta) : NULL;
|
||||
|
||||
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
||||
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
||||
@ -510,10 +531,16 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
if (!info->control.hw_key)
|
||||
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
|
||||
|
||||
/* For data packets rate info comes from the fw */
|
||||
if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
|
||||
/*
|
||||
* For data packets rate info comes from the fw. Only
|
||||
* set rate/antenna during connection establishment.
|
||||
*/
|
||||
if (sta && (!ieee80211_is_data(hdr->frame_control) ||
|
||||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
|
||||
flags |= IWL_TX_FLAGS_CMD_RATE;
|
||||
rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
|
||||
rate_n_flags =
|
||||
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
|
||||
hdr->frame_control);
|
||||
}
|
||||
|
||||
if (mvm->trans->cfg->device_family >=
|
||||
@ -1160,11 +1187,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
* If we have timed-out TIDs - schedule the worker that will
|
||||
* reconfig the queues and update them
|
||||
*
|
||||
* Note that the mvm->queue_info_lock isn't being taken here in
|
||||
* order to not serialize the TX flow. This isn't dangerous
|
||||
* because scheduling mvm->add_stream_wk can't ruin the state,
|
||||
* and if we DON'T schedule it due to some race condition then
|
||||
* next TX we get here we will.
|
||||
* Note that the no lock is taken here in order to not serialize
|
||||
* the TX flow. This isn't dangerous because scheduling
|
||||
* mvm->add_stream_wk can't ruin the state, and if we DON'T
|
||||
* schedule it due to some race condition then next TX we get
|
||||
* here we will.
|
||||
*/
|
||||
if (unlikely(mvm->queue_info[txq_id].status ==
|
||||
IWL_MVM_QUEUE_SHARED &&
|
||||
@ -1501,6 +1528,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
break;
|
||||
}
|
||||
|
||||
if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
|
||||
ieee80211_is_mgmt(hdr->frame_control))
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
|
||||
|
||||
/*
|
||||
* If we are freeing multiple frames, mark all the frames
|
||||
* but the first one as acked, since they were acknowledged
|
||||
@ -1600,6 +1631,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
iwl_mvm_tx_airtime(mvm, mvmsta,
|
||||
le16_to_cpu(tx_resp->wireless_media_time));
|
||||
|
||||
if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
|
||||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
|
||||
|
||||
if (sta->wme && tid != IWL_MGMT_TID) {
|
||||
struct iwl_mvm_tid_data *tid_data =
|
||||
&mvmsta->tid_data[tid];
|
||||
|
@ -285,6 +285,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
|
||||
return last_idx;
|
||||
}
|
||||
|
||||
#define FW_SYSASSERT_CPU_MASK 0xf0000000
|
||||
static const struct {
|
||||
const char *name;
|
||||
u8 num;
|
||||
@ -301,6 +302,9 @@ static const struct {
|
||||
{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
|
||||
{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
|
||||
{ "NMI_INTERRUPT_HOST", 0x66 },
|
||||
{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
|
||||
{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
|
||||
{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
|
||||
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
|
||||
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
|
||||
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
|
||||
@ -312,7 +316,7 @@ static const char *desc_lookup(u32 num)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
|
||||
if (advanced_lookup[i].num == num)
|
||||
if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
|
||||
return advanced_lookup[i].name;
|
||||
|
||||
/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
|
||||
@ -618,13 +622,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
|
||||
"Trying to reconfig unallocated queue %d\n", queue)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
"Trying to reconfig unallocated queue %d\n", queue))
|
||||
return -ENXIO;
|
||||
}
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
|
||||
|
||||
@ -768,6 +768,29 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
|
||||
return result;
|
||||
}
|
||||
|
||||
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
|
||||
bool low_latency, u16 mac_id)
|
||||
{
|
||||
struct iwl_mac_low_latency_cmd cmd = {
|
||||
.mac_id = cpu_to_le32(mac_id)
|
||||
};
|
||||
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
|
||||
return;
|
||||
|
||||
if (low_latency) {
|
||||
/* currently we don't care about the direction */
|
||||
cmd.low_latency_rx = 1;
|
||||
cmd.low_latency_tx = 1;
|
||||
}
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd))
|
||||
IWL_ERR(mvm, "Failed to send low latency command\n");
|
||||
}
|
||||
|
||||
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
bool low_latency,
|
||||
enum iwl_mvm_low_latency_cause cause)
|
||||
@ -786,24 +809,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
if (low_latency == prev)
|
||||
return 0;
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
|
||||
struct iwl_mac_low_latency_cmd cmd = {
|
||||
.mac_id = cpu_to_le32(mvmvif->id)
|
||||
};
|
||||
|
||||
if (low_latency) {
|
||||
/* currently we don't care about the direction */
|
||||
cmd.low_latency_rx = 1;
|
||||
cmd.low_latency_tx = 1;
|
||||
}
|
||||
res = iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(LOW_LATENCY_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd);
|
||||
if (res)
|
||||
IWL_ERR(mvm, "Failed to send low latency command\n");
|
||||
}
|
||||
iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
|
||||
|
||||
res = iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
if (res)
|
||||
|
@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
|
||||
|
||||
/* 9000 Series */
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
|
||||
|
@ -1729,6 +1729,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
|
||||
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 hpm;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&trans_pcie->mutex);
|
||||
@ -1739,6 +1740,17 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
||||
return err;
|
||||
}
|
||||
|
||||
hpm = iwl_trans_read_prph(trans, HPM_DEBUG);
|
||||
if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
|
||||
if (iwl_trans_read_prph(trans, PREG_PRPH_WPROT_0) &
|
||||
PREG_WFPM_ACCESS) {
|
||||
IWL_ERR(trans,
|
||||
"Error, can not clear persistence bit\n");
|
||||
return -EPERM;
|
||||
}
|
||||
iwl_trans_write_prph(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT);
|
||||
}
|
||||
|
||||
iwl_trans_pcie_sw_reset(trans);
|
||||
|
||||
err = iwl_pcie_apm_init(trans);
|
||||
@ -2978,7 +2990,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
|
||||
|
||||
static struct iwl_trans_dump_data
|
||||
*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
|
||||
const struct iwl_fw_dbg_trigger_tlv *trigger)
|
||||
bool monitor_only)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_fw_error_dump_data *data;
|
||||
@ -3002,7 +3014,7 @@ static struct iwl_trans_dump_data
|
||||
/* FW monitor */
|
||||
monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
|
||||
|
||||
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
|
||||
if (monitor_only) {
|
||||
if (!(trans->dbg_dump_mask &
|
||||
BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
|
||||
return NULL;
|
||||
|
@ -1228,8 +1228,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
|
||||
/* Place first TFD at index corresponding to start sequence number */
|
||||
txq->read_ptr = wr_ptr;
|
||||
txq->write_ptr = wr_ptr;
|
||||
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
|
||||
(txq->write_ptr) | (qid << 16));
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
|
||||
|
||||
iwl_free_resp(hcmd);
|
||||
|
Loading…
Reference in New Issue
Block a user