forked from Minki/linux
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6
Conflicts: drivers/net/wireless/iwlwifi/iwl-agn-rxon.c drivers/net/wireless/rtlwifi/pci.c
This commit is contained in:
commit
c48b1f729a
@ -1216,10 +1216,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
|
||||
* receive commit_rxon request
|
||||
* abort any previous channel switch if still in process
|
||||
*/
|
||||
if (priv->switch_rxon.switch_in_progress &&
|
||||
(priv->switch_rxon.channel != ctx->staging.channel)) {
|
||||
if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
|
||||
(priv->switch_channel != ctx->staging.channel)) {
|
||||
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
|
||||
le16_to_cpu(priv->switch_rxon.channel));
|
||||
le16_to_cpu(priv->switch_channel));
|
||||
iwl_legacy_chswitch_done(priv, false);
|
||||
}
|
||||
|
||||
@ -1402,9 +1402,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
|
||||
return rc;
|
||||
}
|
||||
|
||||
priv->switch_rxon.channel = cmd.channel;
|
||||
priv->switch_rxon.switch_in_progress = true;
|
||||
|
||||
return iwl_legacy_send_cmd_pdu(priv,
|
||||
REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
|
||||
}
|
||||
|
@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
return;
|
||||
|
||||
if (priv->switch_rxon.switch_in_progress) {
|
||||
if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
|
||||
ieee80211_chswitch_done(ctx->vif, is_success);
|
||||
mutex_lock(&priv->mutex);
|
||||
priv->switch_rxon.switch_in_progress = false;
|
||||
mutex_unlock(&priv->mutex);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(iwl_legacy_chswitch_done);
|
||||
|
||||
@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
|
||||
|
||||
if (priv->switch_rxon.switch_in_progress) {
|
||||
if (!le32_to_cpu(csa->status) &&
|
||||
(csa->channel == priv->switch_rxon.channel)) {
|
||||
rxon->channel = csa->channel;
|
||||
ctx->staging.channel = csa->channel;
|
||||
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
|
||||
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
|
||||
return;
|
||||
|
||||
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
|
||||
rxon->channel = csa->channel;
|
||||
ctx->staging.channel = csa->channel;
|
||||
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
|
||||
le16_to_cpu(csa->channel));
|
||||
iwl_legacy_chswitch_done(priv, true);
|
||||
} else {
|
||||
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
|
||||
le16_to_cpu(csa->channel));
|
||||
iwl_legacy_chswitch_done(priv, false);
|
||||
}
|
||||
iwl_legacy_chswitch_done(priv, true);
|
||||
} else {
|
||||
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
|
||||
le16_to_cpu(csa->channel));
|
||||
iwl_legacy_chswitch_done(priv, false);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(iwl_legacy_rx_csa);
|
||||
|
@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv);
|
||||
#define STATUS_SCAN_HW 15
|
||||
#define STATUS_POWER_PMI 16
|
||||
#define STATUS_FW_ERROR 17
|
||||
|
||||
#define STATUS_CHANNEL_SWITCH_PENDING 18
|
||||
|
||||
static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
|
||||
{
|
||||
|
@ -854,17 +854,6 @@ struct traffic_stats {
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* iwl_switch_rxon: "channel switch" structure
|
||||
*
|
||||
* @ switch_in_progress: channel switch in progress
|
||||
* @ channel: new channel
|
||||
*/
|
||||
struct iwl_switch_rxon {
|
||||
bool switch_in_progress;
|
||||
__le16 channel;
|
||||
};
|
||||
|
||||
/*
|
||||
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
|
||||
* to perform continuous uCode event logging operation if enabled
|
||||
@ -1115,7 +1104,7 @@ struct iwl_priv {
|
||||
|
||||
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
|
||||
|
||||
struct iwl_switch_rxon switch_rxon;
|
||||
__le16 switch_channel;
|
||||
|
||||
/* 1st responses from initialize and runtime uCode images.
|
||||
* _4965's initialize alive response contains some calibration data. */
|
||||
|
@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
|
||||
goto out;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
|
||||
test_bit(STATUS_SCANNING, &priv->status))
|
||||
test_bit(STATUS_SCANNING, &priv->status) ||
|
||||
test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
|
||||
goto out;
|
||||
|
||||
if (!iwl_legacy_is_associated_ctx(ctx))
|
||||
goto out;
|
||||
|
||||
/* channel switch in progress */
|
||||
if (priv->switch_rxon.switch_in_progress == true)
|
||||
goto out;
|
||||
|
||||
if (priv->cfg->ops->lib->set_channel_switch) {
|
||||
|
||||
ch = channel->hw_value;
|
||||
@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
|
||||
* at this point, staging_rxon has the
|
||||
* configuration for channel switch
|
||||
*/
|
||||
if (priv->cfg->ops->lib->set_channel_switch(priv,
|
||||
ch_switch))
|
||||
priv->switch_rxon.switch_in_progress = false;
|
||||
set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
|
||||
priv->switch_channel = cpu_to_le16(ch);
|
||||
if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
|
||||
clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
|
||||
&priv->status);
|
||||
priv->switch_channel = 0;
|
||||
ieee80211_chswitch_done(ctx->vif, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&priv->mutex);
|
||||
if (!priv->switch_rxon.switch_in_progress)
|
||||
ieee80211_chswitch_done(ctx->vif, false);
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
}
|
||||
|
||||
|
@ -670,6 +670,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
|
||||
&rx_status,
|
||||
(u8 *) pdesc, skb);
|
||||
|
||||
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
|
||||
if (unlikely(!new_skb)) {
|
||||
RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
|
||||
DBG_DMESG,
|
||||
("can't alloc skb for rx\n"));
|
||||
goto done;
|
||||
}
|
||||
|
||||
pci_unmap_single(rtlpci->pdev,
|
||||
*((dma_addr_t *) skb->cb),
|
||||
rtlpci->rxbuffersize,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
|
||||
false,
|
||||
HW_DESC_RXPKT_LEN));
|
||||
@ -686,21 +699,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
|
||||
hdr = rtl_get_hdr(skb);
|
||||
fc = rtl_get_fc(skb);
|
||||
|
||||
/* try for new buffer - if allocation fails, drop
|
||||
* frame and reuse old buffer
|
||||
*/
|
||||
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
|
||||
if (unlikely(!new_skb)) {
|
||||
RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
|
||||
DBG_DMESG,
|
||||
("can't alloc skb for rx\n"));
|
||||
goto done;
|
||||
}
|
||||
pci_unmap_single(rtlpci->pdev,
|
||||
*((dma_addr_t *) skb->cb),
|
||||
rtlpci->rxbuffersize,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
if (!stats.crc && !stats.hwerror) {
|
||||
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
|
||||
sizeof(rx_status));
|
||||
|
@ -965,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
|
||||
|
||||
mutex_lock(&sdata->u.ibss.mtx);
|
||||
|
||||
sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
|
||||
memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
|
||||
sdata->u.ibss.ssid_len = 0;
|
||||
|
||||
active_ibss = ieee80211_sta_active_ibss(sdata);
|
||||
|
||||
if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
|
||||
@ -999,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
|
||||
kfree_skb(skb);
|
||||
|
||||
skb_queue_purge(&sdata->skb_queue);
|
||||
memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
|
||||
sdata->u.ibss.ssid_len = 0;
|
||||
|
||||
del_timer_sync(&sdata->u.ibss.timer);
|
||||
|
||||
|
@ -232,9 +232,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
|
||||
WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
|
||||
}
|
||||
|
||||
ieee80211_stop_queues_by_reason(&sdata->local->hw,
|
||||
IEEE80211_QUEUE_STOP_REASON_CSA);
|
||||
|
||||
/* channel_type change automatically detected */
|
||||
ieee80211_hw_config(local, 0);
|
||||
|
||||
@ -248,9 +245,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
ieee80211_wake_queues_by_reason(&sdata->local->hw,
|
||||
IEEE80211_QUEUE_STOP_REASON_CSA);
|
||||
|
||||
ht_opmode = le16_to_cpu(hti->operation_mode);
|
||||
|
||||
/* if bss configuration changed store the new one */
|
||||
|
@ -3406,11 +3406,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
|
||||
i = 0;
|
||||
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
|
||||
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
|
||||
request->ssids[i].ssid_len = nla_len(attr);
|
||||
if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
|
||||
if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
|
||||
err = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
request->ssids[i].ssid_len = nla_len(attr);
|
||||
memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
|
||||
i++;
|
||||
}
|
||||
@ -3572,12 +3572,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
|
||||
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
|
||||
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
|
||||
tmp) {
|
||||
request->ssids[i].ssid_len = nla_len(attr);
|
||||
if (request->ssids[i].ssid_len >
|
||||
IEEE80211_MAX_SSID_LEN) {
|
||||
if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
|
||||
err = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
request->ssids[i].ssid_len = nla_len(attr);
|
||||
memcpy(request->ssids[i].ssid, nla_data(attr),
|
||||
nla_len(attr));
|
||||
i++;
|
||||
|
Loading…
Reference in New Issue
Block a user