forked from Minki/linux
iwlwifi: move all bus-independent TX functions to common code
After moving out all Tx fields not related to pcie-bus it's time to move the code to a common place. We also rename all pcie functions name to txq. Signed-off-by: Mordechay Goodstein <mordechay.goodstein@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com> Link: https://lore.kernel.org/r/iwlwifi.20200930161256.3947a5276003.I3fe1bec2b25a965a49532df288f47b8b59eb1500@changeid Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
This commit is contained in:
parent
0739a7d70e
commit
0cd1ad2d7f
@ -13,6 +13,7 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
|
||||
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
|
||||
iwlwifi-objs += iwl-dbg-tlv.o
|
||||
iwlwifi-objs += iwl-trans.o
|
||||
iwlwifi-objs += queue/tx.o
|
||||
|
||||
iwlwifi-objs += fw/img.o fw/notif-wait.o
|
||||
iwlwifi-objs += fw/dbg.o
|
||||
|
@ -66,6 +66,7 @@
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-drv.h"
|
||||
#include "iwl-fh.h"
|
||||
#include "queue/tx.h"
|
||||
#include <linux/dmapool.h>
|
||||
|
||||
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
@ -150,11 +151,29 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
|
||||
WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
|
||||
|
||||
trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
|
||||
if (!trans->txqs.tso_hdr_page) {
|
||||
kmem_cache_destroy(trans->dev_cmd_pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return trans;
|
||||
}
|
||||
|
||||
void iwl_trans_free(struct iwl_trans *trans)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct iwl_tso_hdr_page *p =
|
||||
per_cpu_ptr(trans->txqs.tso_hdr_page, i);
|
||||
|
||||
if (p->page)
|
||||
__free_page(p->page);
|
||||
}
|
||||
|
||||
free_percpu(trans->txqs.tso_hdr_page);
|
||||
|
||||
kmem_cache_destroy(trans->dev_cmd_pool);
|
||||
}
|
||||
|
||||
|
@ -928,6 +928,7 @@ struct iwl_trans_txqs {
|
||||
bool bc_table_dword;
|
||||
u8 page_offs;
|
||||
u8 dev_cmd_offs;
|
||||
struct __percpu iwl_tso_hdr_page * tso_hdr_page;
|
||||
|
||||
struct {
|
||||
u8 fifo;
|
||||
|
@ -73,7 +73,7 @@ static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
|
||||
if (!result)
|
||||
return NULL;
|
||||
|
||||
if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
|
||||
if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) {
|
||||
void *old = result;
|
||||
dma_addr_t oldphys = *phys;
|
||||
|
||||
|
@ -79,6 +79,7 @@
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-drv.h"
|
||||
#include "queue/tx.h"
|
||||
|
||||
/*
|
||||
* RX related structures and functions
|
||||
@ -240,16 +241,6 @@ struct iwl_rb_allocator {
|
||||
struct work_struct rx_alloc;
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
|
||||
* @index -- current index
|
||||
*/
|
||||
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
|
||||
{
|
||||
return ++index &
|
||||
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_get_closed_rb_stts - get closed rb stts from different structs
|
||||
* @rxq - the rxq to get the rb stts from
|
||||
@ -268,28 +259,6 @@ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
|
||||
* @index -- current index
|
||||
*/
|
||||
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
|
||||
{
|
||||
return --index &
|
||||
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
|
||||
{
|
||||
return txq->first_tb_dma +
|
||||
sizeof(struct iwl_pcie_first_tb_buf) * idx;
|
||||
}
|
||||
|
||||
struct iwl_tso_hdr_page {
|
||||
struct page *page;
|
||||
u8 *pos;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
/**
|
||||
* enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
|
||||
@ -427,8 +396,6 @@ struct iwl_trans_pcie {
|
||||
|
||||
struct net_device napi_dev;
|
||||
|
||||
struct __percpu iwl_tso_hdr_page *tso_hdr_page;
|
||||
|
||||
/* INT ICT Table */
|
||||
__le32 *ict_tbl;
|
||||
dma_addr_t ict_tbl_dma;
|
||||
@ -566,19 +533,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
|
||||
/*****************************************************
|
||||
* TX / HCMD
|
||||
******************************************************/
|
||||
/*
|
||||
* We need this inline in case dma_addr_t is only 32-bits - since the
|
||||
* hardware is always 64-bit, the issue can still occur in that case,
|
||||
* so use u64 for 'phys' here to force the addition in 64-bit.
|
||||
*/
|
||||
static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
|
||||
{
|
||||
return upper_32_bits(phys) != upper_32_bits(phys + len);
|
||||
}
|
||||
|
||||
int iwl_pcie_tx_init(struct iwl_trans *trans);
|
||||
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
|
||||
int queue_size);
|
||||
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
|
||||
int iwl_pcie_tx_stop(struct iwl_trans *trans);
|
||||
void iwl_pcie_tx_free(struct iwl_trans *trans);
|
||||
@ -589,14 +544,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
|
||||
bool configure_scd);
|
||||
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
|
||||
bool shared_mode);
|
||||
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq);
|
||||
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
|
||||
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
|
||||
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq);
|
||||
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
@ -787,20 +738,6 @@ static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
|
||||
{
|
||||
return index & (q->n_window - 1);
|
||||
}
|
||||
|
||||
static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq, int idx)
|
||||
{
|
||||
if (trans->trans_cfg->use_tfh)
|
||||
idx = iwl_pcie_get_cmd_index(txq, idx);
|
||||
|
||||
return txq->tfds + trans->txqs.tfd.size * idx;
|
||||
}
|
||||
|
||||
static inline const char *queue_name(struct device *dev,
|
||||
struct iwl_trans_pcie *trans_p, int i)
|
||||
{
|
||||
@ -852,37 +789,6 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
||||
|
||||
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
|
||||
|
||||
static inline void iwl_wake_queue(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq)
|
||||
{
|
||||
if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
|
||||
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void iwl_stop_queue(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq)
|
||||
{
|
||||
if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
|
||||
iwl_op_mode_queue_full(trans->op_mode, txq->id);
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
|
||||
} else
|
||||
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
|
||||
txq->id);
|
||||
}
|
||||
|
||||
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
|
||||
{
|
||||
int index = iwl_pcie_get_cmd_index(q, i);
|
||||
int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
|
||||
int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
|
||||
|
||||
return w >= r ?
|
||||
(index >= r && index < w) :
|
||||
!(index < r && index >= w);
|
||||
}
|
||||
|
||||
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@ -949,23 +855,12 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
|
||||
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
|
||||
bool was_in_rfkill);
|
||||
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
|
||||
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
|
||||
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
|
||||
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
|
||||
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, bool cmd_queue);
|
||||
int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq, int slots_num, bool cmd_queue);
|
||||
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
||||
struct iwl_dma_ptr *ptr, size_t size);
|
||||
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
|
||||
void iwl_pcie_apply_destination(struct iwl_trans *trans);
|
||||
void iwl_pcie_free_tso_page(struct iwl_trans *trans,
|
||||
struct sk_buff *skb);
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
|
||||
struct sk_buff *skb);
|
||||
#endif
|
||||
|
||||
/* common functions that are used by gen3 transport */
|
||||
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
|
||||
@ -974,28 +869,10 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
|
||||
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill);
|
||||
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
|
||||
void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq);
|
||||
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
|
||||
struct iwl_txq **intxq, int size,
|
||||
unsigned int timeout);
|
||||
int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_host_cmd *hcmd);
|
||||
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
||||
__le16 flags, u8 sta_id, u8 tid,
|
||||
int cmd_id, int size,
|
||||
unsigned int timeout);
|
||||
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
|
||||
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
|
||||
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd);
|
||||
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
|
||||
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
|
||||
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
|
||||
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
|
||||
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
|
||||
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
|
||||
bool test, bool reset);
|
||||
#endif /* __iwl_trans_int_pcie_h__ */
|
||||
|
@ -1359,7 +1359,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
|
||||
sequence = le16_to_cpu(pkt->hdr.sequence);
|
||||
index = SEQ_TO_INDEX(sequence);
|
||||
cmd_index = iwl_pcie_get_cmd_index(txq, index);
|
||||
cmd_index = iwl_txq_get_cmd_index(txq, index);
|
||||
|
||||
if (rxq->id == trans_pcie->def_rx_queue)
|
||||
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
|
||||
|
@ -162,7 +162,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
|
||||
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"DEVICE_ENABLED bit was set and is now cleared\n");
|
||||
iwl_pcie_gen2_tx_stop(trans);
|
||||
iwl_txq_gen2_tx_stop(trans);
|
||||
iwl_pcie_rx_stop(trans);
|
||||
}
|
||||
|
||||
@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate or reset and init all Tx and Command queues */
|
||||
if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
|
||||
if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
|
||||
return -ENOMEM;
|
||||
|
||||
/* enable shadow regs in HW */
|
||||
|
@ -1955,7 +1955,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
|
||||
iwl_pcie_synchronize_irqs(trans);
|
||||
|
||||
if (trans->trans_cfg->gen2)
|
||||
iwl_pcie_gen2_tx_free(trans);
|
||||
iwl_txq_gen2_tx_free(trans);
|
||||
else
|
||||
iwl_pcie_tx_free(trans);
|
||||
iwl_pcie_rx_free(trans);
|
||||
@ -1979,15 +1979,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
|
||||
|
||||
iwl_pcie_free_fw_monitor(trans);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct iwl_tso_hdr_page *p =
|
||||
per_cpu_ptr(trans_pcie->tso_hdr_page, i);
|
||||
|
||||
if (p->page)
|
||||
__free_page(p->page);
|
||||
}
|
||||
|
||||
free_percpu(trans_pcie->tso_hdr_page);
|
||||
mutex_destroy(&trans_pcie->mutex);
|
||||
iwl_trans_free(trans);
|
||||
}
|
||||
@ -2280,36 +2271,6 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
|
||||
|
||||
#define IWL_FLUSH_WAIT_MS 2000
|
||||
|
||||
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
{
|
||||
u32 txq_id = txq->id;
|
||||
u32 status;
|
||||
bool active;
|
||||
u8 fifo;
|
||||
|
||||
if (trans->trans_cfg->use_tfh) {
|
||||
IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
|
||||
txq->read_ptr, txq->write_ptr);
|
||||
/* TODO: access new SCD registers and dump them */
|
||||
return;
|
||||
}
|
||||
|
||||
status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
|
||||
fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
|
||||
active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
|
||||
|
||||
IWL_ERR(trans,
|
||||
"Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
|
||||
txq_id, active ? "" : "in", fifo,
|
||||
jiffies_to_msecs(txq->wd_timeout),
|
||||
txq->read_ptr, txq->write_ptr,
|
||||
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
|
||||
(trans->trans_cfg->base_params->max_tfd_queue_size - 1),
|
||||
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
|
||||
(trans->trans_cfg->base_params->max_tfd_queue_size - 1),
|
||||
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
|
||||
struct iwl_trans_rxq_dma_data *data)
|
||||
{
|
||||
@ -2378,7 +2339,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
|
||||
if (txq->read_ptr != txq->write_ptr) {
|
||||
IWL_ERR(trans,
|
||||
"fail to flush all tx fifo queues Q %d\n", txq_idx);
|
||||
iwl_trans_pcie_log_scd_error(trans, txq);
|
||||
iwl_txq_log_scd_error(trans, txq);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -3339,7 +3300,7 @@ static struct iwl_trans_dump_data
|
||||
spin_lock_bh(&cmdq->lock);
|
||||
ptr = cmdq->write_ptr;
|
||||
for (i = 0; i < cmdq->n_window; i++) {
|
||||
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
|
||||
u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
|
||||
u8 tfdidx;
|
||||
u32 caplen, cmdlen;
|
||||
|
||||
@ -3362,7 +3323,7 @@ static struct iwl_trans_dump_data
|
||||
txcmd = (void *)((u8 *)txcmd->data + caplen);
|
||||
}
|
||||
|
||||
ptr = iwl_queue_dec_wrap(trans, ptr);
|
||||
ptr = iwl_txq_dec_wrap(trans, ptr);
|
||||
}
|
||||
spin_unlock_bh(&cmdq->lock);
|
||||
|
||||
@ -3481,13 +3442,13 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
|
||||
|
||||
.send_cmd = iwl_trans_pcie_gen2_send_hcmd,
|
||||
|
||||
.tx = iwl_trans_pcie_gen2_tx,
|
||||
.tx = iwl_txq_gen2_tx,
|
||||
.reclaim = iwl_trans_pcie_reclaim,
|
||||
|
||||
.set_q_ptrs = iwl_trans_pcie_set_q_ptrs,
|
||||
|
||||
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
|
||||
.txq_free = iwl_trans_pcie_dyn_txq_free,
|
||||
.txq_alloc = iwl_txq_dyn_alloc,
|
||||
.txq_free = iwl_txq_dyn_free,
|
||||
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
|
||||
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
@ -3534,11 +3495,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
}
|
||||
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
|
||||
|
||||
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
|
||||
if (!trans_pcie->tso_hdr_page) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_pci;
|
||||
}
|
||||
trans_pcie->debug_rfkill = -1;
|
||||
|
||||
if (!cfg_trans->base_params->pcie_l1_allowed) {
|
||||
@ -3671,7 +3627,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
out_free_ict:
|
||||
iwl_pcie_free_ict(trans);
|
||||
out_no_pci:
|
||||
free_percpu(trans_pcie->tso_hdr_page);
|
||||
destroy_workqueue(trans_pcie->rba.alloc_wq);
|
||||
out_free_trans:
|
||||
iwl_trans_free(trans);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -102,60 +102,6 @@
|
||||
*
|
||||
***************************************************/
|
||||
|
||||
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
|
||||
{
|
||||
unsigned int max;
|
||||
unsigned int used;
|
||||
|
||||
/*
|
||||
* To avoid ambiguity between empty and completely full queues, there
|
||||
* should always be less than max_tfd_queue_size elements in the queue.
|
||||
* If q->n_window is smaller than max_tfd_queue_size, there is no need
|
||||
* to reserve any queue entries for this purpose.
|
||||
*/
|
||||
if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
|
||||
max = q->n_window;
|
||||
else
|
||||
max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
|
||||
|
||||
/*
|
||||
* max_tfd_queue_size is a power of 2, so the following is equivalent to
|
||||
* modulo by max_tfd_queue_size and is well defined.
|
||||
*/
|
||||
used = (q->write_ptr - q->read_ptr) &
|
||||
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
||||
|
||||
if (WARN_ON(used > max))
|
||||
return 0;
|
||||
|
||||
return max - used;
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
|
||||
*/
|
||||
static int iwl_queue_init(struct iwl_txq *q, int slots_num)
|
||||
{
|
||||
q->n_window = slots_num;
|
||||
|
||||
/* slots_num must be power-of-two size, otherwise
|
||||
* iwl_pcie_get_cmd_index is broken. */
|
||||
if (WARN_ON(!is_power_of_2(slots_num)))
|
||||
return -EINVAL;
|
||||
|
||||
q->low_mark = q->n_window / 4;
|
||||
if (q->low_mark < 4)
|
||||
q->low_mark = 4;
|
||||
|
||||
q->high_mark = q->n_window / 8;
|
||||
if (q->high_mark < 2)
|
||||
q->high_mark = 2;
|
||||
|
||||
q->write_ptr = 0;
|
||||
q->read_ptr = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
||||
struct iwl_dma_ptr *ptr, size_t size)
|
||||
@ -180,24 +126,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
|
||||
memset(ptr, 0, sizeof(*ptr));
|
||||
}
|
||||
|
||||
static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
|
||||
{
|
||||
struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
|
||||
struct iwl_trans *trans = txq->trans;
|
||||
|
||||
spin_lock(&txq->lock);
|
||||
/* check if triggered erroneously */
|
||||
if (txq->read_ptr == txq->write_ptr) {
|
||||
spin_unlock(&txq->lock);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&txq->lock);
|
||||
|
||||
iwl_trans_pcie_log_scd_error(trans, txq);
|
||||
|
||||
iwl_force_nmi(trans);
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
|
||||
*/
|
||||
@ -402,7 +330,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq, int index)
|
||||
{
|
||||
int i, num_tbs;
|
||||
void *tfd = iwl_pcie_get_tfd(trans, txq, index);
|
||||
void *tfd = iwl_txq_get_tfd(trans, txq, index);
|
||||
|
||||
/* Sanity check on number of chunks */
|
||||
num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
|
||||
@ -459,7 +387,7 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
* idx is bounded by n_window
|
||||
*/
|
||||
int rd_ptr = txq->read_ptr;
|
||||
int idx = iwl_pcie_get_cmd_index(txq, rd_ptr);
|
||||
int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
|
||||
|
||||
lockdep_assert_held(&txq->lock);
|
||||
|
||||
@ -514,125 +442,6 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
return num_tbs;
|
||||
}
|
||||
|
||||
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, bool cmd_queue)
|
||||
{
|
||||
size_t tfd_sz = trans->txqs.tfd.size *
|
||||
trans->trans_cfg->base_params->max_tfd_queue_size;
|
||||
size_t tb0_buf_sz;
|
||||
int i;
|
||||
|
||||
if (WARN_ON(txq->entries || txq->tfds))
|
||||
return -EINVAL;
|
||||
|
||||
if (trans->trans_cfg->use_tfh)
|
||||
tfd_sz = trans->txqs.tfd.size * slots_num;
|
||||
|
||||
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
|
||||
txq->trans = trans;
|
||||
|
||||
txq->n_window = slots_num;
|
||||
|
||||
txq->entries = kcalloc(slots_num,
|
||||
sizeof(struct iwl_pcie_txq_entry),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!txq->entries)
|
||||
goto error;
|
||||
|
||||
if (cmd_queue)
|
||||
for (i = 0; i < slots_num; i++) {
|
||||
txq->entries[i].cmd =
|
||||
kmalloc(sizeof(struct iwl_device_cmd),
|
||||
GFP_KERNEL);
|
||||
if (!txq->entries[i].cmd)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Circular buffer of transmit frame descriptors (TFDs),
|
||||
* shared with device */
|
||||
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
|
||||
&txq->dma_addr, GFP_KERNEL);
|
||||
if (!txq->tfds)
|
||||
goto error;
|
||||
|
||||
BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
|
||||
|
||||
tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
|
||||
|
||||
txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
|
||||
&txq->first_tb_dma,
|
||||
GFP_KERNEL);
|
||||
if (!txq->first_tb_bufs)
|
||||
goto err_free_tfds;
|
||||
|
||||
return 0;
|
||||
err_free_tfds:
|
||||
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
|
||||
error:
|
||||
if (txq->entries && cmd_queue)
|
||||
for (i = 0; i < slots_num; i++)
|
||||
kfree(txq->entries[i].cmd);
|
||||
kfree(txq->entries);
|
||||
txq->entries = NULL;
|
||||
|
||||
return -ENOMEM;
|
||||
|
||||
}
|
||||
|
||||
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, bool cmd_queue)
|
||||
{
|
||||
int ret;
|
||||
u32 tfd_queue_max_size =
|
||||
trans->trans_cfg->base_params->max_tfd_queue_size;
|
||||
|
||||
txq->need_update = false;
|
||||
|
||||
/* max_tfd_queue_size must be power-of-two size, otherwise
|
||||
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
|
||||
if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
|
||||
"Max tfd queue size must be a power of two, but is %d",
|
||||
tfd_queue_max_size))
|
||||
return -EINVAL;
|
||||
|
||||
/* Initialize queue's high/low-water marks, and head/tail indexes */
|
||||
ret = iwl_queue_init(txq, slots_num);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock_init(&txq->lock);
|
||||
|
||||
if (cmd_queue) {
|
||||
static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
|
||||
|
||||
lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
|
||||
}
|
||||
|
||||
__skb_queue_head_init(&txq->overflow_q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_pcie_free_tso_page(struct iwl_trans *trans,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct page **page_ptr;
|
||||
struct page *next;
|
||||
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
|
||||
next = *page_ptr;
|
||||
*page_ptr = NULL;
|
||||
|
||||
while (next) {
|
||||
struct page *tmp = next;
|
||||
|
||||
next = *(void **)(page_address(next) + PAGE_SIZE -
|
||||
sizeof(void *));
|
||||
__free_page(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@ -668,10 +477,10 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
if (WARN_ON_ONCE(!skb))
|
||||
continue;
|
||||
|
||||
iwl_pcie_free_tso_page(trans, skb);
|
||||
iwl_txq_free_tso_page(trans, skb);
|
||||
}
|
||||
iwl_pcie_txq_free_tfd(trans, txq);
|
||||
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
|
||||
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
|
||||
|
||||
if (txq->read_ptr == txq->write_ptr) {
|
||||
unsigned long flags;
|
||||
@ -996,8 +805,8 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
|
||||
ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
|
||||
cmd_queue);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
|
||||
goto error;
|
||||
@ -1049,8 +858,8 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
else
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
|
||||
cmd_queue);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
|
||||
goto error;
|
||||
@ -1108,8 +917,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
struct sk_buff_head *skbs)
|
||||
{
|
||||
struct iwl_txq *txq = trans->txqs.txq[txq_id];
|
||||
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
|
||||
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
||||
int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
|
||||
int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
|
||||
int last_to_free;
|
||||
|
||||
/* This function is not meant to release cmd queue*/
|
||||
@ -1132,9 +941,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
/*Since we free until index _not_ inclusive, the one before index is
|
||||
* the last we will free. This one must be used */
|
||||
last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
|
||||
last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
|
||||
|
||||
if (!iwl_queue_used(txq, last_to_free)) {
|
||||
if (!iwl_txq_used(txq, last_to_free)) {
|
||||
IWL_ERR(trans,
|
||||
"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
|
||||
__func__, txq_id, last_to_free,
|
||||
@ -1148,14 +957,14 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
for (;
|
||||
read_ptr != tfd_num;
|
||||
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
|
||||
read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
|
||||
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
|
||||
read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
|
||||
struct sk_buff *skb = txq->entries[read_ptr].skb;
|
||||
|
||||
if (WARN_ON_ONCE(!skb))
|
||||
continue;
|
||||
|
||||
iwl_pcie_free_tso_page(trans, skb);
|
||||
iwl_txq_free_tso_page(trans, skb);
|
||||
|
||||
__skb_queue_tail(skbs, skb);
|
||||
|
||||
@ -1169,7 +978,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
iwl_pcie_txq_progress(txq);
|
||||
|
||||
if (iwl_queue_space(trans, txq) > txq->low_mark &&
|
||||
if (iwl_txq_space(trans, txq) > txq->low_mark &&
|
||||
test_bit(txq_id, trans->txqs.queue_stopped)) {
|
||||
struct sk_buff_head overflow_skbs;
|
||||
|
||||
@ -1203,13 +1012,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
/*
|
||||
* Note that we can very well be overflowing again.
|
||||
* In that case, iwl_queue_space will be small again
|
||||
* In that case, iwl_txq_space will be small again
|
||||
* and we won't wake mac80211's queue.
|
||||
*/
|
||||
iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
|
||||
}
|
||||
|
||||
if (iwl_queue_space(trans, txq) > txq->low_mark)
|
||||
if (iwl_txq_space(trans, txq) > txq->low_mark)
|
||||
iwl_wake_queue(trans, txq);
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
@ -1290,11 +1099,11 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
|
||||
|
||||
lockdep_assert_held(&txq->lock);
|
||||
|
||||
idx = iwl_pcie_get_cmd_index(txq, idx);
|
||||
r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
||||
idx = iwl_txq_get_cmd_index(txq, idx);
|
||||
r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
|
||||
|
||||
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
|
||||
(!iwl_queue_used(txq, idx))) {
|
||||
(!iwl_txq_used(txq, idx))) {
|
||||
WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
|
||||
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
|
||||
__func__, txq_id, idx,
|
||||
@ -1303,9 +1112,9 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
|
||||
return;
|
||||
}
|
||||
|
||||
for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
|
||||
r = iwl_queue_inc_wrap(trans, r)) {
|
||||
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
|
||||
for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
|
||||
r = iwl_txq_inc_wrap(trans, r)) {
|
||||
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
|
||||
|
||||
if (nfreed++ > 0) {
|
||||
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
|
||||
@ -1617,7 +1426,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
|
||||
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||
spin_unlock_bh(&txq->lock);
|
||||
|
||||
IWL_ERR(trans, "No space in command queue\n");
|
||||
@ -1626,7 +1435,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
goto free_dup_buf;
|
||||
}
|
||||
|
||||
idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
||||
idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
|
||||
out_cmd = txq->entries[idx].cmd;
|
||||
out_meta = &txq->entries[idx].meta;
|
||||
|
||||
@ -1709,7 +1518,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
|
||||
memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
|
||||
iwl_pcie_txq_build_tfd(trans, txq,
|
||||
iwl_pcie_get_first_tb_dma(txq, idx),
|
||||
iwl_txq_get_first_tb_dma(txq, idx),
|
||||
tb0_size, true);
|
||||
|
||||
/* map first command fragment, if any remains */
|
||||
@ -1773,7 +1582,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
/* Increment and update queue's write index */
|
||||
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
|
||||
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
|
||||
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
|
||||
@ -1818,7 +1627,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
|
||||
cmd_index = iwl_pcie_get_cmd_index(txq, index);
|
||||
cmd_index = iwl_txq_get_cmd_index(txq, index);
|
||||
cmd = txq->entries[cmd_index].cmd;
|
||||
meta = &txq->entries[cmd_index].meta;
|
||||
group_id = cmd->hdr.group_id;
|
||||
@ -2045,51 +1854,6 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
|
||||
struct page **page_ptr;
|
||||
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
|
||||
|
||||
if (WARN_ON(*page_ptr))
|
||||
return NULL;
|
||||
|
||||
if (!p->page)
|
||||
goto alloc;
|
||||
|
||||
/*
|
||||
* Check if there's enough room on this page
|
||||
*
|
||||
* Note that we put a page chaining pointer *last* in the
|
||||
* page - we need it somewhere, and if it's there then we
|
||||
* avoid DMA mapping the last bits of the page which may
|
||||
* trigger the 32-bit boundary hardware bug.
|
||||
*
|
||||
* (see also get_workaround_page() in tx-gen2.c)
|
||||
*/
|
||||
if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
|
||||
sizeof(void *))
|
||||
goto out;
|
||||
|
||||
/* We don't have enough room on this page, get a new one. */
|
||||
__free_page(p->page);
|
||||
|
||||
alloc:
|
||||
p->page = alloc_page(GFP_ATOMIC);
|
||||
if (!p->page)
|
||||
return NULL;
|
||||
p->pos = page_address(p->page);
|
||||
/* set the chaining pointer to NULL */
|
||||
*(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
|
||||
out:
|
||||
*page_ptr = p->page;
|
||||
get_page(p->page);
|
||||
return p;
|
||||
}
|
||||
|
||||
static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
|
||||
bool ipv6, unsigned int len)
|
||||
{
|
||||
@ -2132,7 +1896,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
IEEE80211_CCMP_HDR_LEN : 0;
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||
iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
|
||||
iwl_txq_get_tfd(trans, txq, txq->write_ptr),
|
||||
trans->txqs.tfd.size,
|
||||
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
|
||||
|
||||
@ -2355,11 +2119,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
|
||||
spin_lock(&txq->lock);
|
||||
|
||||
if (iwl_queue_space(trans, txq) < txq->high_mark) {
|
||||
iwl_stop_queue(trans, txq);
|
||||
if (iwl_txq_space(trans, txq) < txq->high_mark) {
|
||||
iwl_txq_stop(trans, txq);
|
||||
|
||||
/* don't put the packet on the ring, if there is no room */
|
||||
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
|
||||
if (unlikely(iwl_txq_space(trans, txq) < 3)) {
|
||||
struct iwl_device_tx_cmd **dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = (void *)((u8 *)skb->cb +
|
||||
@ -2392,7 +2156,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
||||
INDEX_TO_SEQ(txq->write_ptr)));
|
||||
|
||||
tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
|
||||
tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
|
||||
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
|
||||
offsetof(struct iwl_tx_cmd, scratch);
|
||||
|
||||
@ -2442,8 +2206,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||
iwl_pcie_get_tfd(trans, txq,
|
||||
txq->write_ptr),
|
||||
iwl_txq_get_tfd(trans, txq, txq->write_ptr),
|
||||
trans->txqs.tfd.size,
|
||||
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
|
||||
hdr_len);
|
||||
@ -2476,7 +2239,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
/* building the A-MSDU might have changed this data, so memcpy it now */
|
||||
memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
|
||||
|
||||
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
|
||||
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
|
||||
iwl_pcie_tfd_get_num_tbs(trans, tfd));
|
||||
@ -2499,7 +2262,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* Tell device the write index *just past* this latest filled TFD */
|
||||
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
|
||||
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
|
||||
if (!wait_write_ptr)
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
|
||||
|
1375
drivers/net/wireless/intel/iwlwifi/queue/tx.c
Normal file
1375
drivers/net/wireless/intel/iwlwifi/queue/tx.c
Normal file
File diff suppressed because it is too large
Load Diff
188
drivers/net/wireless/intel/iwlwifi/queue/tx.h
Normal file
188
drivers/net/wireless/intel/iwlwifi/queue/tx.h
Normal file
@ -0,0 +1,188 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in the
|
||||
* file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <linuxwifi@intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __iwl_trans_queue_tx_h__
|
||||
#define __iwl_trans_queue_tx_h__
|
||||
#include "iwl-fh.h"
|
||||
#include "fw/api/tx.h"
|
||||
|
||||
struct iwl_tso_hdr_page {
|
||||
struct page *page;
|
||||
u8 *pos;
|
||||
};
|
||||
|
||||
static inline dma_addr_t
|
||||
iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
|
||||
{
|
||||
return txq->first_tb_dma +
|
||||
sizeof(struct iwl_pcie_first_tb_buf) * idx;
|
||||
}
|
||||
|
||||
static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
|
||||
{
|
||||
return index & (q->n_window - 1);
|
||||
}
|
||||
|
||||
void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
|
||||
|
||||
static inline void iwl_wake_queue(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq)
|
||||
{
|
||||
if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
|
||||
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq, int idx)
|
||||
{
|
||||
if (trans->trans_cfg->use_tfh)
|
||||
idx = iwl_txq_get_cmd_index(txq, idx);
|
||||
|
||||
return txq->tfds + trans->txqs.tfd.size * idx;
|
||||
}
|
||||
|
||||
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
|
||||
bool cmd_queue);
|
||||
/*
|
||||
* We need this inline in case dma_addr_t is only 32-bits - since the
|
||||
* hardware is always 64-bit, the issue can still occur in that case,
|
||||
* so use u64 for 'phys' here to force the addition in 64-bit.
|
||||
*/
|
||||
static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
|
||||
{
|
||||
return upper_32_bits(phys) != upper_32_bits(phys + len);
|
||||
}
|
||||
|
||||
int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
|
||||
|
||||
static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
{
|
||||
if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
|
||||
iwl_op_mode_queue_full(trans->op_mode, txq->id);
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
|
||||
} else {
|
||||
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
|
||||
txq->id);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_txq_inc_wrap - increment queue index, wrap back to beginning
|
||||
* @index -- current index
|
||||
*/
|
||||
static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
|
||||
{
|
||||
return ++index &
|
||||
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_txq_dec_wrap - decrement queue index, wrap back to end
|
||||
* @index -- current index
|
||||
*/
|
||||
static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
|
||||
{
|
||||
return --index &
|
||||
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
|
||||
}
|
||||
|
||||
static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
|
||||
{
|
||||
int index = iwl_txq_get_cmd_index(q, i);
|
||||
int r = iwl_txq_get_cmd_index(q, q->read_ptr);
|
||||
int w = iwl_txq_get_cmd_index(q, q->write_ptr);
|
||||
|
||||
return w >= r ?
|
||||
(index >= r && index < w) :
|
||||
!(index < r && index >= w);
|
||||
}
|
||||
|
||||
void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
|
||||
|
||||
void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
|
||||
|
||||
int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
|
||||
struct iwl_tfh_tfd *tfd, dma_addr_t addr,
|
||||
u16 len);
|
||||
|
||||
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
|
||||
struct iwl_cmd_meta *meta,
|
||||
struct iwl_tfh_tfd *tfd);
|
||||
|
||||
int iwl_txq_dyn_alloc(struct iwl_trans *trans,
|
||||
__le16 flags, u8 sta_id, u8 tid,
|
||||
int cmd_id, int size,
|
||||
unsigned int timeout);
|
||||
|
||||
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
|
||||
|
||||
void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
|
||||
void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
|
||||
void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
|
||||
void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
|
||||
void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
|
||||
int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
|
||||
bool cmd_queue);
|
||||
int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
|
||||
struct sk_buff *skb);
|
||||
#endif
|
||||
#endif /* __iwl_trans_queue_tx_h__ */
|
Loading…
Reference in New Issue
Block a user