forked from Minki/linux
Merge branch 'for-linville' of git://github.com/kvalo/ath
This commit is contained in:
commit
946951e141
@ -637,6 +637,7 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
|
||||
ath10k_pci_wake(ar);
|
||||
src_ring->hw_index =
|
||||
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
|
||||
src_ring->hw_index &= nentries_mask;
|
||||
ath10k_pci_sleep(ar);
|
||||
}
|
||||
read_index = src_ring->hw_index;
|
||||
@ -950,10 +951,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
|
||||
|
||||
ath10k_pci_wake(ar);
|
||||
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
|
||||
src_ring->sw_index &= src_ring->nentries_mask;
|
||||
src_ring->hw_index = src_ring->sw_index;
|
||||
|
||||
src_ring->write_index =
|
||||
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
|
||||
src_ring->write_index &= src_ring->nentries_mask;
|
||||
ath10k_pci_sleep(ar);
|
||||
|
||||
src_ring->per_transfer_context = (void **)ptr;
|
||||
@ -1035,8 +1038,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
|
||||
|
||||
ath10k_pci_wake(ar);
|
||||
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
|
||||
dest_ring->sw_index &= dest_ring->nentries_mask;
|
||||
dest_ring->write_index =
|
||||
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
|
||||
dest_ring->write_index &= dest_ring->nentries_mask;
|
||||
ath10k_pci_sleep(ar);
|
||||
|
||||
dest_ring->per_transfer_context = (void **)ptr;
|
||||
|
@ -38,6 +38,7 @@
|
||||
#define ATH10K_SCAN_ID 0
|
||||
#define WMI_READY_TIMEOUT (5 * HZ)
|
||||
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
|
||||
#define ATH10K_NUM_CHANS 38
|
||||
|
||||
/* Antenna noise floor */
|
||||
#define ATH10K_DEFAULT_NOISE_FLOOR -95
|
||||
@ -285,6 +286,7 @@ struct ath10k {
|
||||
u32 hw_max_tx_power;
|
||||
u32 ht_cap_info;
|
||||
u32 vht_cap_info;
|
||||
u32 num_rf_chains;
|
||||
|
||||
struct targetdef *targetdef;
|
||||
struct hostdef *hostdef;
|
||||
@ -374,6 +376,12 @@ struct ath10k {
|
||||
|
||||
struct work_struct restart_work;
|
||||
|
||||
/* cycle count is reported twice for each visited channel during scan.
|
||||
* access protected by data_lock */
|
||||
u32 survey_last_rx_clear_count;
|
||||
u32 survey_last_cycle_count;
|
||||
struct survey_info survey[ATH10K_NUM_CHANS];
|
||||
|
||||
#ifdef CONFIG_ATH10K_DEBUGFS
|
||||
struct ath10k_debug debug;
|
||||
#endif
|
||||
|
@ -804,6 +804,37 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
|
||||
{
|
||||
struct htt_rx_desc *rxd;
|
||||
u32 flags, info;
|
||||
bool is_ip4, is_ip6;
|
||||
bool is_tcp, is_udp;
|
||||
bool ip_csum_ok, tcpudp_csum_ok;
|
||||
|
||||
rxd = (void *)skb->data - sizeof(*rxd);
|
||||
flags = __le32_to_cpu(rxd->attention.flags);
|
||||
info = __le32_to_cpu(rxd->msdu_start.info1);
|
||||
|
||||
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
|
||||
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
|
||||
is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
|
||||
is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
|
||||
ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
|
||||
tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
|
||||
|
||||
if (!is_ip4 && !is_ip6)
|
||||
return CHECKSUM_NONE;
|
||||
if (!is_tcp && !is_udp)
|
||||
return CHECKSUM_NONE;
|
||||
if (!ip_csum_ok)
|
||||
return CHECKSUM_NONE;
|
||||
if (!tcpudp_csum_ok)
|
||||
return CHECKSUM_NONE;
|
||||
|
||||
return CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
||||
struct htt_rx_indication *rx)
|
||||
{
|
||||
@ -815,6 +846,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
||||
u8 *fw_desc;
|
||||
int i, j;
|
||||
int ret;
|
||||
int ip_summed;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
@ -889,6 +921,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
||||
continue;
|
||||
}
|
||||
|
||||
/* The skb is not yet processed and it may be
|
||||
* reallocated. Since the offload is in the original
|
||||
* skb extract the checksum now and assign it later */
|
||||
ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
|
||||
|
||||
info.skb = msdu_head;
|
||||
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
|
||||
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
|
||||
@ -914,6 +951,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
||||
if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
|
||||
ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
|
||||
|
||||
info.skb->ip_summed = ip_summed;
|
||||
|
||||
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
|
||||
info.skb->data, info.skb->len);
|
||||
ath10k_process_rx(htt->ar, &info);
|
||||
@ -980,6 +1019,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
|
||||
info.status = HTT_RX_IND_MPDU_STATUS_OK;
|
||||
info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
|
||||
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
|
||||
info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
|
||||
|
||||
if (tkip_mic_err) {
|
||||
ath10k_warn("tkip mic error\n");
|
||||
|
@ -465,6 +465,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
flags1 = 0;
|
||||
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
|
||||
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
|
||||
|
||||
frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
|
||||
|
||||
|
@ -1406,9 +1406,9 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
|
||||
return;
|
||||
|
||||
qos_ctl = ieee80211_get_qos_ctl(hdr);
|
||||
memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
|
||||
skb->len - ieee80211_hdrlen(hdr->frame_control));
|
||||
skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
|
||||
memmove(skb->data + IEEE80211_QOS_CTL_LEN,
|
||||
skb->data, (void *)qos_ctl - (void *)skb->data);
|
||||
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
|
||||
}
|
||||
|
||||
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
|
||||
@ -1925,6 +1925,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
memset(arvif, 0, sizeof(*arvif));
|
||||
|
||||
arvif->ar = ar;
|
||||
arvif->vif = vif;
|
||||
|
||||
@ -2338,6 +2340,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
|
||||
arg.ssids[i].len = req->ssids[i].ssid_len;
|
||||
arg.ssids[i].ssid = req->ssids[i].ssid;
|
||||
}
|
||||
} else {
|
||||
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
|
||||
}
|
||||
|
||||
if (req->n_channels) {
|
||||
@ -2934,6 +2938,41 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw)
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
|
||||
static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
|
||||
struct survey_info *survey)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct survey_info *ar_survey = &ar->survey[idx];
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
|
||||
if (sband && idx >= sband->n_channels) {
|
||||
idx -= sband->n_channels;
|
||||
sband = NULL;
|
||||
}
|
||||
|
||||
if (!sband)
|
||||
sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
|
||||
|
||||
if (!sband || idx >= sband->n_channels) {
|
||||
ret = -ENOENT;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
memcpy(survey, ar_survey, sizeof(*survey));
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
survey->channel = &sband->channels[idx];
|
||||
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops ath10k_ops = {
|
||||
.tx = ath10k_tx,
|
||||
.start = ath10k_start,
|
||||
@ -2955,6 +2994,7 @@ static const struct ieee80211_ops ath10k_ops = {
|
||||
.flush = ath10k_flush,
|
||||
.tx_last_beacon = ath10k_tx_last_beacon,
|
||||
.restart_complete = ath10k_restart_complete,
|
||||
.get_survey = ath10k_get_survey,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ath10k_suspend,
|
||||
.resume = ath10k_resume,
|
||||
@ -3076,9 +3116,15 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
|
||||
.max = 8,
|
||||
.types = BIT(NL80211_IFTYPE_STATION)
|
||||
| BIT(NL80211_IFTYPE_P2P_CLIENT)
|
||||
| BIT(NL80211_IFTYPE_P2P_GO)
|
||||
| BIT(NL80211_IFTYPE_AP)
|
||||
}
|
||||
},
|
||||
{
|
||||
.max = 3,
|
||||
.types = BIT(NL80211_IFTYPE_P2P_GO)
|
||||
},
|
||||
{
|
||||
.max = 7,
|
||||
.types = BIT(NL80211_IFTYPE_AP)
|
||||
},
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_combination ath10k_if_comb = {
|
||||
@ -3093,19 +3139,18 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
|
||||
{
|
||||
struct ieee80211_sta_vht_cap vht_cap = {0};
|
||||
u16 mcs_map;
|
||||
int i;
|
||||
|
||||
vht_cap.vht_supported = 1;
|
||||
vht_cap.cap = ar->vht_cap_info;
|
||||
|
||||
/* FIXME: check dynamically how many streams board supports */
|
||||
mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
|
||||
IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
|
||||
IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
|
||||
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
|
||||
IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
|
||||
IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
|
||||
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
|
||||
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
|
||||
mcs_map = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (i < ar->num_rf_chains)
|
||||
mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
|
||||
else
|
||||
mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
|
||||
}
|
||||
|
||||
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
|
||||
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
|
||||
@ -3168,7 +3213,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
|
||||
if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
|
||||
ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
|
||||
|
||||
for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
|
||||
for (i = 0; i < ar->num_rf_chains; i++)
|
||||
ht_cap.mcs.rx_mask[i] = 0xFF;
|
||||
|
||||
ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
|
||||
@ -3310,6 +3355,8 @@ int ath10k_mac_register(struct ath10k *ar)
|
||||
ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
|
||||
ar->hw->wiphy->n_iface_combinations = 1;
|
||||
|
||||
ar->hw->netdev_features = NETIF_F_HW_CSUM;
|
||||
|
||||
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
|
||||
ath10k_reg_notifier);
|
||||
if (ret) {
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "ce.h"
|
||||
#include "pci.h"
|
||||
|
||||
unsigned int ath10k_target_ps;
|
||||
static unsigned int ath10k_target_ps;
|
||||
module_param(ath10k_target_ps, uint, 0644);
|
||||
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
|
||||
|
||||
@ -56,6 +56,8 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
|
||||
static void ath10k_pci_stop_ce(struct ath10k *ar);
|
||||
static void ath10k_pci_device_reset(struct ath10k *ar);
|
||||
static int ath10k_pci_reset_target(struct ath10k *ar);
|
||||
static int ath10k_pci_start_intr(struct ath10k *ar);
|
||||
static void ath10k_pci_stop_intr(struct ath10k *ar);
|
||||
|
||||
static const struct ce_attr host_ce_config_wlan[] = {
|
||||
/* host->target HTC control and raw streams */
|
||||
@ -1254,10 +1256,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
|
||||
}
|
||||
}
|
||||
|
||||
static void ath10k_pci_disable_irqs(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
|
||||
disable_irq(ar_pci->pdev->irq + i);
|
||||
}
|
||||
|
||||
static void ath10k_pci_hif_stop(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
|
||||
|
||||
/* Irqs are never explicitly re-enabled. They are implicitly re-enabled
|
||||
* by ath10k_pci_start_intr(). */
|
||||
ath10k_pci_disable_irqs(ar);
|
||||
|
||||
ath10k_pci_stop_ce(ar);
|
||||
|
||||
/* At this point, asynchronous threads are stopped, the target should
|
||||
@ -1267,6 +1284,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
|
||||
ath10k_pci_process_ce(ar);
|
||||
ath10k_pci_cleanup_ce(ar);
|
||||
ath10k_pci_buffer_cleanup(ar);
|
||||
|
||||
ar_pci->started = 0;
|
||||
}
|
||||
|
||||
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
|
||||
@ -1740,8 +1759,15 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
|
||||
|
||||
static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
int ret;
|
||||
|
||||
ret = ath10k_pci_start_intr(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not start interrupt handling (%d)\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring the target up cleanly.
|
||||
*
|
||||
@ -1756,15 +1782,11 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
||||
|
||||
ret = ath10k_pci_reset_target(ar);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_irq;
|
||||
|
||||
if (ath10k_target_ps) {
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
|
||||
} else {
|
||||
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
||||
/* Force AWAKE forever */
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
|
||||
ath10k_do_pci_wake(ar);
|
||||
}
|
||||
|
||||
ret = ath10k_pci_ce_init(ar);
|
||||
if (ret)
|
||||
@ -1785,16 +1807,22 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
||||
err_ce:
|
||||
ath10k_pci_ce_deinit(ar);
|
||||
err_ps:
|
||||
if (!ath10k_target_ps)
|
||||
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
||||
ath10k_do_pci_sleep(ar);
|
||||
err_irq:
|
||||
ath10k_pci_stop_intr(ar);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ath10k_pci_hif_power_down(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
ath10k_pci_stop_intr(ar);
|
||||
|
||||
ath10k_pci_ce_deinit(ar);
|
||||
if (!ath10k_target_ps)
|
||||
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
||||
ath10k_do_pci_sleep(ar);
|
||||
}
|
||||
|
||||
@ -1990,8 +2018,13 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
|
||||
ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
|
||||
ath10k_pci_msi_fw_handler,
|
||||
IRQF_SHARED, "ath10k_pci", ar);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ath10k_warn("request_irq(%d) failed %d\n",
|
||||
ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
|
||||
|
||||
pci_disable_msi(ar_pci->pdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
|
||||
ret = request_irq(ar_pci->pdev->irq + i,
|
||||
@ -2239,6 +2272,9 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
|
||||
case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
|
||||
break;
|
||||
case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2274,6 +2310,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
goto err_ar_pci;
|
||||
}
|
||||
|
||||
if (ath10k_target_ps)
|
||||
set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
|
||||
|
||||
ath10k_pci_dump_features(ar_pci);
|
||||
|
||||
ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
|
||||
@ -2358,22 +2397,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
ar_pci->cacheline_sz = dma_get_cache_alignment();
|
||||
|
||||
ret = ath10k_pci_start_intr(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not start interrupt handling (%d)\n", ret);
|
||||
goto err_iomap;
|
||||
}
|
||||
|
||||
ret = ath10k_core_register(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not register driver core (%d)\n", ret);
|
||||
goto err_intr;
|
||||
goto err_iomap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_intr:
|
||||
ath10k_pci_stop_intr(ar);
|
||||
err_iomap:
|
||||
pci_iounmap(pdev, mem);
|
||||
err_master:
|
||||
@ -2410,7 +2441,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
|
||||
tasklet_kill(&ar_pci->msi_fw_err);
|
||||
|
||||
ath10k_core_unregister(ar);
|
||||
ath10k_pci_stop_intr(ar);
|
||||
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
pci_iounmap(pdev, ar_pci->mem);
|
||||
|
@ -153,6 +153,7 @@ struct service_to_pipe {
|
||||
enum ath10k_pci_features {
|
||||
ATH10K_PCI_FEATURE_MSI_X = 0,
|
||||
ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
|
||||
ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
|
||||
|
||||
/* keep last */
|
||||
ATH10K_PCI_FEATURE_COUNT
|
||||
@ -335,20 +336,22 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
|
||||
return ioread32(ar_pci->mem + offset);
|
||||
}
|
||||
|
||||
extern unsigned int ath10k_target_ps;
|
||||
|
||||
void ath10k_do_pci_wake(struct ath10k *ar);
|
||||
void ath10k_do_pci_sleep(struct ath10k *ar);
|
||||
|
||||
static inline void ath10k_pci_wake(struct ath10k *ar)
|
||||
{
|
||||
if (ath10k_target_ps)
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
||||
ath10k_do_pci_wake(ar);
|
||||
}
|
||||
|
||||
static inline void ath10k_pci_sleep(struct ath10k *ar)
|
||||
{
|
||||
if (ath10k_target_ps)
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
||||
ath10k_do_pci_sleep(ar);
|
||||
}
|
||||
|
||||
|
@ -390,9 +390,82 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int freq_to_idx(struct ath10k *ar, int freq)
|
||||
{
|
||||
struct ieee80211_supported_band *sband;
|
||||
int band, ch, idx = 0;
|
||||
|
||||
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
|
||||
sband = ar->hw->wiphy->bands[band];
|
||||
if (!sband)
|
||||
continue;
|
||||
|
||||
for (ch = 0; ch < sband->n_channels; ch++, idx++)
|
||||
if (sband->channels[ch].center_freq == freq)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
exit:
|
||||
return idx;
|
||||
}
|
||||
|
||||
static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
|
||||
struct wmi_chan_info_event *ev;
|
||||
struct survey_info *survey;
|
||||
u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
|
||||
int idx;
|
||||
|
||||
ev = (struct wmi_chan_info_event *)skb->data;
|
||||
|
||||
err_code = __le32_to_cpu(ev->err_code);
|
||||
freq = __le32_to_cpu(ev->freq);
|
||||
cmd_flags = __le32_to_cpu(ev->cmd_flags);
|
||||
noise_floor = __le32_to_cpu(ev->noise_floor);
|
||||
rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
|
||||
cycle_count = __le32_to_cpu(ev->cycle_count);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_WMI,
|
||||
"chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
|
||||
err_code, freq, cmd_flags, noise_floor, rx_clear_count,
|
||||
cycle_count);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
|
||||
if (!ar->scan.in_progress) {
|
||||
ath10k_warn("chan info event without a scan request?\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
idx = freq_to_idx(ar, freq);
|
||||
if (idx >= ARRAY_SIZE(ar->survey)) {
|
||||
ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
|
||||
freq, idx);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
|
||||
/* During scanning chan info is reported twice for each
|
||||
* visited channel. The reported cycle count is global
|
||||
* and per-channel cycle count must be calculated */
|
||||
|
||||
cycle_count -= ar->survey_last_cycle_count;
|
||||
rx_clear_count -= ar->survey_last_rx_clear_count;
|
||||
|
||||
survey = &ar->survey[idx];
|
||||
survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
|
||||
survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
|
||||
survey->noise = noise_floor;
|
||||
survey->filled = SURVEY_INFO_CHANNEL_TIME |
|
||||
SURVEY_INFO_CHANNEL_TIME_RX |
|
||||
SURVEY_INFO_NOISE_DBM;
|
||||
}
|
||||
|
||||
ar->survey_last_rx_clear_count = rx_clear_count;
|
||||
ar->survey_last_cycle_count = cycle_count;
|
||||
|
||||
exit:
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
|
||||
@ -868,6 +941,13 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
|
||||
(__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
|
||||
ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
|
||||
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
|
||||
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
|
||||
|
||||
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
|
||||
ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
|
||||
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
|
||||
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
|
||||
}
|
||||
|
||||
ar->ath_common.regulatory.current_rd =
|
||||
__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
|
||||
@ -892,7 +972,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
|
||||
}
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_WMI,
|
||||
"wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
|
||||
"wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
|
||||
__le32_to_cpu(ev->sw_version),
|
||||
__le32_to_cpu(ev->sw_version_1),
|
||||
__le32_to_cpu(ev->abi_version),
|
||||
@ -901,7 +981,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
|
||||
__le32_to_cpu(ev->vht_cap_info),
|
||||
__le32_to_cpu(ev->vht_supp_mcs),
|
||||
__le32_to_cpu(ev->sys_cap_info),
|
||||
__le32_to_cpu(ev->num_mem_reqs));
|
||||
__le32_to_cpu(ev->num_mem_reqs),
|
||||
__le32_to_cpu(ev->num_rf_chains));
|
||||
|
||||
complete(&ar->wmi.service_ready);
|
||||
}
|
||||
|
@ -2931,6 +2931,11 @@ struct wmi_chan_info_event {
|
||||
__le32 cycle_count;
|
||||
} __packed;
|
||||
|
||||
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
|
||||
|
||||
/* FIXME: empirically extrapolated */
|
||||
#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
|
||||
|
||||
/* Beacon filter wmi command info */
|
||||
#define BCN_FLT_MAX_SUPPORTED_IES 256
|
||||
#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
|
||||
|
@ -1836,6 +1836,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
|
||||
|
||||
clear_bit(WMI_READY, &ar->flag);
|
||||
|
||||
if (ar->fw_recovery.enable)
|
||||
del_timer_sync(&ar->fw_recovery.hb_timer);
|
||||
|
||||
/*
|
||||
* After wmi_shudown all WMI events will be dropped. We
|
||||
* need to cleanup the buffers allocated in AP mode and
|
||||
|
@ -29,6 +29,9 @@ struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
|
||||
struct ath6kl_sta *conn = NULL;
|
||||
u8 i, max_conn;
|
||||
|
||||
if (is_zero_ether_addr(node_addr))
|
||||
return NULL;
|
||||
|
||||
max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
|
||||
|
||||
for (i = 0; i < max_conn; i++) {
|
||||
|
Loading…
Reference in New Issue
Block a user