2010-10-23 16:15:41 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
2012-01-06 21:16:33 +00:00
|
|
|
* Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
|
2010-10-23 16:15:41 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
|
|
|
|
*
|
|
|
|
* The full GNU General Public License is included in this distribution in the
|
|
|
|
* file called LICENSE.
|
|
|
|
*
|
|
|
|
* Contact Information:
|
|
|
|
* Intel Linux Wireless <ilw@linux.intel.com>
|
|
|
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
#include "iwl-dev.h"
|
|
|
|
#include "iwl-agn.h"
|
|
|
|
#include "iwl-core.h"
|
|
|
|
#include "iwl-agn-calib.h"
|
2011-07-08 15:46:16 +00:00
|
|
|
#include "iwl-trans.h"
|
2011-08-26 06:10:36 +00:00
|
|
|
#include "iwl-shared.h"
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
static int iwlagn_disable_bss(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx,
|
|
|
|
struct iwl_rxon_cmd *send)
|
|
|
|
{
|
|
|
|
__le32 old_filter = send->filter_flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
|
2011-07-08 15:46:14 +00:00
|
|
|
CMD_SYNC, sizeof(*send), send);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
send->filter_flags = old_filter;
|
|
|
|
|
|
|
|
if (ret)
|
2011-11-10 14:55:02 +00:00
|
|
|
IWL_DEBUG_QUIET_RFKILL(priv,
|
|
|
|
"Error clearing ASSOC_MSK on BSS (%d)\n", ret);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iwlagn_disable_pan(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx,
|
|
|
|
struct iwl_rxon_cmd *send)
|
|
|
|
{
|
2011-01-05 00:22:01 +00:00
|
|
|
struct iwl_notification_wait disable_wait;
|
2010-10-23 16:15:41 +00:00
|
|
|
__le32 old_filter = send->filter_flags;
|
|
|
|
u8 old_dev_type = send->dev_type;
|
|
|
|
int ret;
|
|
|
|
|
2011-12-02 16:48:40 +00:00
|
|
|
iwl_init_notification_wait(priv->shrd, &disable_wait,
|
2011-04-13 10:14:47 +00:00
|
|
|
REPLY_WIPAN_DEACTIVATION_COMPLETE,
|
|
|
|
NULL, NULL);
|
2011-01-05 00:22:01 +00:00
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
|
|
|
send->dev_type = RXON_DEV_TYPE_P2P;
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
|
2011-07-08 15:46:14 +00:00
|
|
|
CMD_SYNC, sizeof(*send), send);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
send->filter_flags = old_filter;
|
|
|
|
send->dev_type = old_dev_type;
|
|
|
|
|
2011-01-05 00:22:01 +00:00
|
|
|
if (ret) {
|
2010-10-23 16:15:41 +00:00
|
|
|
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
|
2011-12-02 16:48:40 +00:00
|
|
|
iwl_remove_notification(priv->shrd, &disable_wait);
|
2011-01-05 00:22:01 +00:00
|
|
|
} else {
|
2011-12-02 16:48:40 +00:00
|
|
|
ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
|
2011-04-13 10:14:48 +00:00
|
|
|
if (ret)
|
2011-01-05 00:22:01 +00:00
|
|
|
IWL_ERR(priv, "Timed out waiting for PAN disable\n");
|
|
|
|
}
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-27 15:40:26 +00:00
|
|
|
static int iwlagn_disconn_pan(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx,
|
|
|
|
struct iwl_rxon_cmd *send)
|
|
|
|
{
|
|
|
|
__le32 old_filter = send->filter_flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
|
2011-07-08 15:46:14 +00:00
|
|
|
sizeof(*send), send);
|
2011-05-27 15:40:26 +00:00
|
|
|
|
|
|
|
send->filter_flags = old_filter;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-11-11 02:25:58 +00:00
|
|
|
static void iwlagn_update_qos(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!ctx->is_active)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ctx->qos_data.def_qos_parm.qos_flags = 0;
|
|
|
|
|
|
|
|
if (ctx->qos_data.qos_active)
|
|
|
|
ctx->qos_data.def_qos_parm.qos_flags |=
|
|
|
|
QOS_PARAM_FLG_UPDATE_EDCA_MSK;
|
|
|
|
|
|
|
|
if (ctx->ht.enabled)
|
|
|
|
ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
|
|
|
|
|
2011-11-10 14:55:23 +00:00
|
|
|
IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
|
2010-11-11 02:25:58 +00:00
|
|
|
ctx->qos_data.qos_active,
|
|
|
|
ctx->qos_data.def_qos_parm.qos_flags);
|
|
|
|
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->qos_cmd, CMD_SYNC,
|
2010-11-11 02:25:58 +00:00
|
|
|
sizeof(struct iwl_qosparam_cmd),
|
|
|
|
&ctx->qos_data.def_qos_parm);
|
|
|
|
if (ret)
|
2011-11-10 14:55:02 +00:00
|
|
|
IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
|
2010-11-11 02:25:58 +00:00
|
|
|
}
|
|
|
|
|
2010-10-23 16:15:42 +00:00
|
|
|
static int iwlagn_update_beacon(struct iwl_priv *priv,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
2011-08-26 06:10:44 +00:00
|
|
|
lockdep_assert_held(&priv->shrd->mutex);
|
2010-10-23 16:15:42 +00:00
|
|
|
|
|
|
|
dev_kfree_skb(priv->beacon_skb);
|
|
|
|
priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
|
|
|
|
if (!priv->beacon_skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
return iwlagn_send_beacon_cmd(priv);
|
|
|
|
}
|
|
|
|
|
2011-04-19 23:52:57 +00:00
|
|
|
static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
2011-04-19 23:52:58 +00:00
|
|
|
struct iwl_rxon_assoc_cmd rxon_assoc;
|
2011-04-19 23:52:57 +00:00
|
|
|
const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
|
|
|
|
const struct iwl_rxon_cmd *rxon2 = &ctx->active;
|
|
|
|
|
|
|
|
if ((rxon1->flags == rxon2->flags) &&
|
|
|
|
(rxon1->filter_flags == rxon2->filter_flags) &&
|
|
|
|
(rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
|
|
|
|
(rxon1->ofdm_ht_single_stream_basic_rates ==
|
|
|
|
rxon2->ofdm_ht_single_stream_basic_rates) &&
|
|
|
|
(rxon1->ofdm_ht_dual_stream_basic_rates ==
|
|
|
|
rxon2->ofdm_ht_dual_stream_basic_rates) &&
|
|
|
|
(rxon1->ofdm_ht_triple_stream_basic_rates ==
|
|
|
|
rxon2->ofdm_ht_triple_stream_basic_rates) &&
|
|
|
|
(rxon1->acquisition_data == rxon2->acquisition_data) &&
|
|
|
|
(rxon1->rx_chain == rxon2->rx_chain) &&
|
|
|
|
(rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
|
|
|
|
IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rxon_assoc.flags = ctx->staging.flags;
|
|
|
|
rxon_assoc.filter_flags = ctx->staging.filter_flags;
|
|
|
|
rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
|
|
|
|
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
|
|
|
|
rxon_assoc.reserved1 = 0;
|
|
|
|
rxon_assoc.reserved2 = 0;
|
|
|
|
rxon_assoc.reserved3 = 0;
|
|
|
|
rxon_assoc.ofdm_ht_single_stream_basic_rates =
|
|
|
|
ctx->staging.ofdm_ht_single_stream_basic_rates;
|
|
|
|
rxon_assoc.ofdm_ht_dual_stream_basic_rates =
|
|
|
|
ctx->staging.ofdm_ht_dual_stream_basic_rates;
|
|
|
|
rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
|
|
|
|
rxon_assoc.ofdm_ht_triple_stream_basic_rates =
|
|
|
|
ctx->staging.ofdm_ht_triple_stream_basic_rates;
|
|
|
|
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
|
|
|
|
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_assoc_cmd,
|
2011-07-08 15:46:14 +00:00
|
|
|
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
|
2011-04-19 23:52:57 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-19 23:52:59 +00:00
|
|
|
static int iwlagn_rxon_disconn(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct iwl_rxon_cmd *active = (void *)&ctx->active;
|
|
|
|
|
2011-05-27 15:40:26 +00:00
|
|
|
if (ctx->ctxid == IWL_RXON_CTX_BSS) {
|
2011-04-19 23:52:59 +00:00
|
|
|
ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
|
2011-05-27 15:40:26 +00:00
|
|
|
} else {
|
2011-04-19 23:52:59 +00:00
|
|
|
ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
|
2011-05-27 15:40:26 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (ctx->vif) {
|
|
|
|
ret = iwl_send_rxon_timing(priv, ctx);
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
|
|
|
|
}
|
|
|
|
}
|
2011-04-19 23:52:59 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Un-assoc RXON clears the station table and WEP
|
|
|
|
* keys, so we have to restore those afterwards.
|
|
|
|
*/
|
|
|
|
iwl_clear_ucode_stations(priv, ctx);
|
2011-06-22 13:34:09 +00:00
|
|
|
/* update -- might need P2P now */
|
|
|
|
iwl_update_bcast_station(priv, ctx);
|
2011-04-19 23:52:59 +00:00
|
|
|
iwl_restore_stations(priv, ctx);
|
|
|
|
ret = iwl_restore_default_wep_keys(priv, ctx);
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(active, &ctx->staging, sizeof(*active));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iwlagn_rxon_connect(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct iwl_rxon_cmd *active = (void *)&ctx->active;
|
|
|
|
|
|
|
|
/* RXON timing must be before associated RXON */
|
2011-05-27 15:40:26 +00:00
|
|
|
if (ctx->ctxid == IWL_RXON_CTX_BSS) {
|
|
|
|
ret = iwl_send_rxon_timing(priv, ctx);
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2011-04-19 23:52:59 +00:00
|
|
|
}
|
|
|
|
/* QoS info may be cleared by previous un-assoc RXON */
|
|
|
|
iwlagn_update_qos(priv, ctx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We'll run into this code path when beaconing is
|
|
|
|
* enabled, but then we also need to send the beacon
|
|
|
|
* to the device.
|
|
|
|
*/
|
|
|
|
if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
|
|
|
|
ret = iwlagn_update_beacon(priv, ctx->vif);
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv,
|
|
|
|
"Error sending required beacon (%d)!\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->start_calib = 0;
|
|
|
|
/*
|
|
|
|
* Apply the new configuration.
|
|
|
|
*
|
|
|
|
* Associated RXON doesn't clear the station table in uCode,
|
|
|
|
* so we don't need to restore stations etc. after this.
|
|
|
|
*/
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
|
2011-04-19 23:52:59 +00:00
|
|
|
sizeof(struct iwl_rxon_cmd), &ctx->staging);
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
memcpy(active, &ctx->staging, sizeof(*active));
|
|
|
|
|
|
|
|
iwl_reprogram_ap_sta(priv, ctx);
|
|
|
|
|
|
|
|
/* IBSS beacon needs to be sent after setting assoc */
|
|
|
|
if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
|
|
|
|
if (iwlagn_update_beacon(priv, ctx->vif))
|
|
|
|
IWL_ERR(priv, "Error sending IBSS beacon\n");
|
|
|
|
iwl_init_sensitivity(priv);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we issue a new RXON command which required a tune then
|
|
|
|
* we must send a new TXPOWER command or we won't be able to
|
|
|
|
* Tx any frames.
|
|
|
|
*
|
|
|
|
* It's expected we set power here if channel is changing.
|
|
|
|
*/
|
|
|
|
ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2011-06-03 14:54:13 +00:00
|
|
|
|
2011-10-12 08:16:35 +00:00
|
|
|
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
|
2011-12-16 15:07:36 +00:00
|
|
|
cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode)
|
2011-06-03 14:54:13 +00:00
|
|
|
ieee80211_request_smps(ctx->vif,
|
2011-12-16 15:07:36 +00:00
|
|
|
cfg(priv)->ht_params->smps_mode);
|
2011-06-03 14:54:13 +00:00
|
|
|
|
2011-04-19 23:52:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-07 15:27:41 +00:00
|
|
|
int iwlagn_set_pan_params(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
struct iwl_wipan_params_cmd cmd;
|
|
|
|
struct iwl_rxon_context *ctx_bss, *ctx_pan;
|
|
|
|
int slot0 = 300, slot1 = 0;
|
|
|
|
int ret;
|
|
|
|
|
2011-09-06 16:31:21 +00:00
|
|
|
if (priv->shrd->valid_contexts == BIT(IWL_RXON_CTX_BSS))
|
2011-07-07 15:27:41 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
|
|
|
|
2011-08-26 06:10:44 +00:00
|
|
|
lockdep_assert_held(&priv->shrd->mutex);
|
2011-07-07 15:27:41 +00:00
|
|
|
|
|
|
|
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
|
|
|
|
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the PAN context is inactive, then we don't need
|
|
|
|
* to update the PAN parameters, the last thing we'll
|
|
|
|
* have done before it goes inactive is making the PAN
|
|
|
|
* parameters be WLAN-only.
|
|
|
|
*/
|
|
|
|
if (!ctx_pan->is_active)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
memset(&cmd, 0, sizeof(cmd));
|
|
|
|
|
|
|
|
/* only 2 slots are currently allowed */
|
|
|
|
cmd.num_slots = 2;
|
|
|
|
|
|
|
|
cmd.slots[0].type = 0; /* BSS */
|
|
|
|
cmd.slots[1].type = 1; /* PAN */
|
|
|
|
|
2011-07-23 17:24:47 +00:00
|
|
|
if (priv->hw_roc_setup) {
|
2011-07-07 15:27:41 +00:00
|
|
|
/* both contexts must be used for this to happen */
|
2011-07-23 17:24:47 +00:00
|
|
|
slot1 = IWL_MIN_SLOT_TIME;
|
|
|
|
slot0 = 3000;
|
2011-07-07 15:27:41 +00:00
|
|
|
} else if (ctx_bss->vif && ctx_pan->vif) {
|
2011-07-18 08:59:22 +00:00
|
|
|
int bcnint = ctx_pan->beacon_int;
|
2011-07-07 15:27:41 +00:00
|
|
|
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
|
|
|
|
|
|
|
|
/* should be set, but seems unused?? */
|
|
|
|
cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
|
|
|
|
|
|
|
|
if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
|
|
|
|
bcnint &&
|
2011-07-18 08:59:22 +00:00
|
|
|
bcnint != ctx_bss->beacon_int) {
|
2011-07-07 15:27:41 +00:00
|
|
|
IWL_ERR(priv,
|
|
|
|
"beacon intervals don't match (%d, %d)\n",
|
2011-07-18 08:59:22 +00:00
|
|
|
ctx_bss->beacon_int, ctx_pan->beacon_int);
|
2011-07-07 15:27:41 +00:00
|
|
|
} else
|
|
|
|
bcnint = max_t(int, bcnint,
|
2011-07-18 08:59:22 +00:00
|
|
|
ctx_bss->beacon_int);
|
2011-07-07 15:27:41 +00:00
|
|
|
if (!bcnint)
|
|
|
|
bcnint = DEFAULT_BEACON_INTERVAL;
|
|
|
|
slot0 = bcnint / 2;
|
|
|
|
slot1 = bcnint - slot0;
|
|
|
|
|
2011-08-26 06:10:42 +00:00
|
|
|
if (test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
|
2011-07-07 15:27:41 +00:00
|
|
|
(!ctx_bss->vif->bss_conf.idle &&
|
|
|
|
!ctx_bss->vif->bss_conf.assoc)) {
|
|
|
|
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
|
|
|
|
slot1 = IWL_MIN_SLOT_TIME;
|
|
|
|
} else if (!ctx_pan->vif->bss_conf.idle &&
|
|
|
|
!ctx_pan->vif->bss_conf.assoc) {
|
2011-09-22 22:14:55 +00:00
|
|
|
slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
|
2011-07-07 15:27:41 +00:00
|
|
|
slot0 = IWL_MIN_SLOT_TIME;
|
|
|
|
}
|
|
|
|
} else if (ctx_pan->vif) {
|
|
|
|
slot0 = 0;
|
|
|
|
slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
|
2011-07-18 08:59:22 +00:00
|
|
|
ctx_pan->beacon_int;
|
2011-07-07 15:27:41 +00:00
|
|
|
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
|
|
|
|
|
2011-08-26 06:10:42 +00:00
|
|
|
if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
|
2011-07-07 15:27:41 +00:00
|
|
|
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
|
|
|
|
slot1 = IWL_MIN_SLOT_TIME;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.slots[0].width = cpu_to_le16(slot0);
|
|
|
|
cmd.slots[1].width = cpu_to_le16(slot1);
|
|
|
|
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WIPAN_PARAMS, CMD_SYNC,
|
2011-07-07 15:27:41 +00:00
|
|
|
sizeof(cmd), &cmd);
|
|
|
|
if (ret)
|
|
|
|
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
/**
|
|
|
|
* iwlagn_commit_rxon - commit staging_rxon to hardware
|
|
|
|
*
|
|
|
|
* The RXON command in staging_rxon is committed to the hardware and
|
|
|
|
* the active_rxon structure is updated with the new data. This
|
|
|
|
* function correctly transitions out of the RXON_ASSOC_MSK state if
|
|
|
|
* a HW tune is required based on the RXON structure changes.
|
2011-04-19 23:52:59 +00:00
|
|
|
*
|
|
|
|
* The connect/disconnect flow should be as the following:
|
|
|
|
*
|
|
|
|
* 1. make sure send RXON command with association bit unset if not connect
|
|
|
|
* this should include the channel and the band for the candidate
|
|
|
|
* to be connected to
|
|
|
|
* 2. Add Station before RXON association with the AP
|
|
|
|
* 3. RXON_timing has to send before RXON for connection
|
|
|
|
* 4. full RXON command - associated bit set
|
|
|
|
* 5. use RXON_ASSOC command to update any flags changes
|
2010-10-23 16:15:41 +00:00
|
|
|
*/
|
|
|
|
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|
|
|
{
|
|
|
|
/* cast away the const for active_rxon in this function */
|
|
|
|
struct iwl_rxon_cmd *active = (void *)&ctx->active;
|
|
|
|
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
|
|
|
|
int ret;
|
|
|
|
|
2011-08-26 06:10:44 +00:00
|
|
|
lockdep_assert_held(&priv->shrd->mutex);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
2011-08-26 06:10:42 +00:00
|
|
|
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
2010-11-26 19:09:42 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-08-26 06:11:04 +00:00
|
|
|
if (!iwl_is_alive(priv->shrd))
|
2010-10-23 16:15:41 +00:00
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/* This function hardcodes a bunch of dual-mode assumptions */
|
|
|
|
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
|
|
|
|
|
|
|
if (!ctx->is_active)
|
|
|
|
return 0;
|
|
|
|
|
2011-08-26 06:13:56 +00:00
|
|
|
/* override BSSID if necessary due to preauth */
|
|
|
|
if (ctx->preauth_bssid)
|
|
|
|
memcpy(ctx->staging.bssid_addr, ctx->bssid, ETH_ALEN);
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
/* always get timestamp with Rx frame */
|
|
|
|
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
|
|
|
|
|
2011-05-26 15:14:22 +00:00
|
|
|
/*
|
|
|
|
* force CTS-to-self frames protection if RTS-CTS is not preferred
|
|
|
|
* one aggregation protection method
|
|
|
|
*/
|
2011-12-16 15:07:36 +00:00
|
|
|
if (!(cfg(priv)->ht_params &&
|
|
|
|
cfg(priv)->ht_params->use_rts_for_aggregation))
|
2011-05-26 15:14:22 +00:00
|
|
|
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
|
|
|
|
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
|
|
|
|
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
|
|
|
else
|
|
|
|
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
|
|
|
|
2011-09-06 16:31:19 +00:00
|
|
|
iwl_print_rx_config_cmd(priv, ctx->ctxid);
|
2010-10-23 16:15:41 +00:00
|
|
|
ret = iwl_check_rxon_cmd(priv, ctx);
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* receive commit_rxon request
|
|
|
|
* abort any previous channel switch if still in process
|
|
|
|
*/
|
2011-08-26 06:10:42 +00:00
|
|
|
if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status) &&
|
2011-06-02 16:17:15 +00:00
|
|
|
(priv->switch_channel != ctx->staging.channel)) {
|
2010-10-23 16:15:41 +00:00
|
|
|
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
|
2011-06-02 16:17:15 +00:00
|
|
|
le16_to_cpu(priv->switch_channel));
|
2010-10-23 16:15:41 +00:00
|
|
|
iwl_chswitch_done(priv, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we don't need to send a full RXON, we can use
|
|
|
|
* iwl_rxon_assoc_cmd which is used to reconfigure filter
|
|
|
|
* and other flags for the current radio configuration.
|
|
|
|
*/
|
|
|
|
if (!iwl_full_rxon_required(priv, ctx)) {
|
2011-04-19 23:52:57 +00:00
|
|
|
ret = iwlagn_send_rxon_assoc(priv, ctx);
|
2010-10-23 16:15:41 +00:00
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(active, &ctx->staging, sizeof(*active));
|
2011-05-27 15:40:24 +00:00
|
|
|
/*
|
|
|
|
* We do not commit tx power settings while channel changing,
|
|
|
|
* do it now if after settings changed.
|
|
|
|
*/
|
|
|
|
iwl_set_tx_power(priv, priv->tx_power_next, false);
|
2011-06-03 14:54:13 +00:00
|
|
|
|
|
|
|
/* make sure we are in the right PS state */
|
|
|
|
iwl_power_update_mode(priv, true);
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-20 22:23:57 +00:00
|
|
|
iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
IWL_DEBUG_INFO(priv,
|
|
|
|
"Going to commit RXON\n"
|
|
|
|
" * with%s RXON_FILTER_ASSOC_MSK\n"
|
|
|
|
" * channel = %d\n"
|
|
|
|
" * bssid = %pM\n",
|
|
|
|
(new_assoc ? "" : "out"),
|
|
|
|
le16_to_cpu(ctx->staging.channel),
|
|
|
|
ctx->staging.bssid_addr);
|
|
|
|
|
|
|
|
/*
|
2010-11-10 17:56:45 +00:00
|
|
|
* Always clear associated first, but with the correct config.
|
|
|
|
* This is required as for example station addition for the
|
|
|
|
* AP station must be done after the BSSID is set to correctly
|
|
|
|
* set up filters in the device.
|
2010-10-23 16:15:41 +00:00
|
|
|
*/
|
2011-05-07 00:06:44 +00:00
|
|
|
ret = iwlagn_rxon_disconn(priv, ctx);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2010-10-23 16:15:41 +00:00
|
|
|
|
2011-07-01 14:59:26 +00:00
|
|
|
ret = iwlagn_set_pan_params(priv);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2011-05-27 15:40:26 +00:00
|
|
|
|
2011-04-19 23:52:59 +00:00
|
|
|
if (new_assoc)
|
|
|
|
return iwlagn_rxon_connect(priv, ctx);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-02 16:19:18 +00:00
|
|
|
void iwlagn_config_ht40(struct ieee80211_conf *conf,
|
|
|
|
struct iwl_rxon_context *ctx)
|
|
|
|
{
|
|
|
|
if (conf_is_ht40_minus(conf)) {
|
|
|
|
ctx->ht.extension_chan_offset =
|
|
|
|
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
|
|
|
|
ctx->ht.is_40mhz = true;
|
|
|
|
} else if (conf_is_ht40_plus(conf)) {
|
|
|
|
ctx->ht.extension_chan_offset =
|
|
|
|
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
|
|
|
|
ctx->ht.is_40mhz = true;
|
|
|
|
} else {
|
|
|
|
ctx->ht.extension_chan_offset =
|
|
|
|
IEEE80211_HT_PARAM_CHA_SEC_NONE;
|
|
|
|
ctx->ht.is_40mhz = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|
|
|
{
|
2012-02-09 14:08:15 +00:00
|
|
|
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
2010-10-23 16:15:41 +00:00
|
|
|
struct iwl_rxon_context *ctx;
|
|
|
|
struct ieee80211_conf *conf = &hw->conf;
|
|
|
|
struct ieee80211_channel *channel = conf->channel;
|
|
|
|
const struct iwl_channel_info *ch_info;
|
|
|
|
int ret = 0;
|
|
|
|
|
2011-10-10 14:27:10 +00:00
|
|
|
IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
2011-08-26 06:10:44 +00:00
|
|
|
mutex_lock(&priv->shrd->mutex);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
2011-11-10 14:55:04 +00:00
|
|
|
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
|
|
|
goto out;
|
|
|
|
|
2011-08-26 06:10:42 +00:00
|
|
|
if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
|
2010-10-23 16:15:41 +00:00
|
|
|
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-08-26 06:11:04 +00:00
|
|
|
if (!iwl_is_ready(priv->shrd)) {
|
2010-10-23 16:15:41 +00:00
|
|
|
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (changed & (IEEE80211_CONF_CHANGE_SMPS |
|
|
|
|
IEEE80211_CONF_CHANGE_CHANNEL)) {
|
|
|
|
/* mac80211 uses static for non-HT which is what we want */
|
|
|
|
priv->current_ht_config.smps = conf->smps_mode;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recalculate chain counts.
|
|
|
|
*
|
|
|
|
* If monitor mode is enabled then mac80211 will
|
|
|
|
* set up the SM PS mode to OFF if an HT channel is
|
|
|
|
* configured.
|
|
|
|
*/
|
2011-07-01 14:59:26 +00:00
|
|
|
for_each_context(priv, ctx)
|
|
|
|
iwlagn_set_rxon_chain(priv, ctx);
|
2010-10-23 16:15:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
ch_info = iwl_get_channel_info(priv, channel->band,
|
|
|
|
channel->hw_value);
|
|
|
|
if (!is_channel_valid(ch_info)) {
|
|
|
|
IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-08-26 06:10:43 +00:00
|
|
|
spin_lock_irqsave(&priv->shrd->lock, flags);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
for_each_context(priv, ctx) {
|
|
|
|
/* Configure HT40 channels */
|
2011-04-06 19:47:25 +00:00
|
|
|
if (ctx->ht.enabled != conf_is_ht(conf))
|
2010-11-10 17:56:43 +00:00
|
|
|
ctx->ht.enabled = conf_is_ht(conf);
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
if (ctx->ht.enabled) {
|
2011-12-02 16:19:18 +00:00
|
|
|
/* if HT40 is used, it should not change
|
|
|
|
* after associated except channel switch */
|
2011-12-14 16:22:36 +00:00
|
|
|
if (!ctx->ht.is_40mhz ||
|
|
|
|
!iwl_is_associated_ctx(ctx))
|
2011-12-02 16:19:18 +00:00
|
|
|
iwlagn_config_ht40(conf, ctx);
|
2010-10-23 16:15:41 +00:00
|
|
|
} else
|
|
|
|
ctx->ht.is_40mhz = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default to no protection. Protection mode will
|
|
|
|
* later be set from BSS config in iwl_ht_conf
|
|
|
|
*/
|
|
|
|
ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
|
|
|
|
|
|
|
|
/* if we are switching from ht to 2.4 clear flags
|
|
|
|
* from any ht related info since 2.4 does not
|
|
|
|
* support ht */
|
|
|
|
if (le16_to_cpu(ctx->staging.channel) !=
|
|
|
|
channel->hw_value)
|
|
|
|
ctx->staging.flags = 0;
|
|
|
|
|
|
|
|
iwl_set_rxon_channel(priv, channel, ctx);
|
|
|
|
iwl_set_rxon_ht(priv, &priv->current_ht_config);
|
|
|
|
|
|
|
|
iwl_set_flags_for_band(priv, ctx, channel->band,
|
|
|
|
ctx->vif);
|
|
|
|
}
|
|
|
|
|
2011-08-26 06:10:43 +00:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
iwl_update_bcast_stations(priv);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The list of supported rates and rate mask can be different
|
|
|
|
* for each band; since the band may have changed, reset
|
|
|
|
* the rate mask to what mac80211 lists.
|
|
|
|
*/
|
|
|
|
iwl_set_rate(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (changed & (IEEE80211_CONF_CHANGE_PS |
|
|
|
|
IEEE80211_CONF_CHANGE_IDLE)) {
|
|
|
|
ret = iwl_power_update_mode(priv, false);
|
|
|
|
if (ret)
|
|
|
|
IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (changed & IEEE80211_CONF_CHANGE_POWER) {
|
|
|
|
IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
|
|
|
|
priv->tx_power_user_lmt, conf->power_level);
|
|
|
|
|
|
|
|
iwl_set_tx_power(priv, conf->power_level, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_context(priv, ctx) {
|
|
|
|
if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
|
|
|
|
continue;
|
|
|
|
iwlagn_commit_rxon(priv, ctx);
|
|
|
|
}
|
|
|
|
out:
|
2011-08-26 06:10:44 +00:00
|
|
|
mutex_unlock(&priv->shrd->mutex);
|
2011-10-10 14:27:10 +00:00
|
|
|
IWL_DEBUG_MAC80211(priv, "leave\n");
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwlagn_check_needed_chains(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx,
|
|
|
|
struct ieee80211_bss_conf *bss_conf)
|
|
|
|
{
|
|
|
|
struct ieee80211_vif *vif = ctx->vif;
|
|
|
|
struct iwl_rxon_context *tmp;
|
|
|
|
struct ieee80211_sta *sta;
|
|
|
|
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
|
2011-02-25 11:24:11 +00:00
|
|
|
struct ieee80211_sta_ht_cap *ht_cap;
|
2010-10-23 16:15:41 +00:00
|
|
|
bool need_multiple;
|
|
|
|
|
2011-08-26 06:10:44 +00:00
|
|
|
lockdep_assert_held(&priv->shrd->mutex);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
switch (vif->type) {
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
rcu_read_lock();
|
|
|
|
sta = ieee80211_find_sta(vif, bss_conf->bssid);
|
2011-02-25 11:24:11 +00:00
|
|
|
if (!sta) {
|
2010-10-23 16:15:41 +00:00
|
|
|
/*
|
|
|
|
* If at all, this can only happen through a race
|
|
|
|
* when the AP disconnects us while we're still
|
|
|
|
* setting up the connection, in that case mac80211
|
|
|
|
* will soon tell us about that.
|
|
|
|
*/
|
|
|
|
need_multiple = false;
|
2011-02-25 11:24:11 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ht_cap = &sta->ht_cap;
|
|
|
|
|
|
|
|
need_multiple = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the peer advertises no support for receiving 2 and 3
|
|
|
|
* stream MCS rates, it can't be transmitting them either.
|
|
|
|
*/
|
|
|
|
if (ht_cap->mcs.rx_mask[1] == 0 &&
|
|
|
|
ht_cap->mcs.rx_mask[2] == 0) {
|
|
|
|
need_multiple = false;
|
|
|
|
} else if (!(ht_cap->mcs.tx_params &
|
|
|
|
IEEE80211_HT_MCS_TX_DEFINED)) {
|
|
|
|
/* If it can't TX MCS at all ... */
|
|
|
|
need_multiple = false;
|
|
|
|
} else if (ht_cap->mcs.tx_params &
|
|
|
|
IEEE80211_HT_MCS_TX_RX_DIFF) {
|
|
|
|
int maxstreams;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* But if it can receive them, it might still not
|
|
|
|
* be able to transmit them, which is what we need
|
|
|
|
* to check here -- so check the number of streams
|
|
|
|
* it advertises for TX (if different from RX).
|
|
|
|
*/
|
|
|
|
|
|
|
|
maxstreams = (ht_cap->mcs.tx_params &
|
|
|
|
IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
|
|
|
|
maxstreams >>=
|
|
|
|
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
|
|
|
|
maxstreams += 1;
|
|
|
|
|
|
|
|
if (maxstreams <= 1)
|
|
|
|
need_multiple = false;
|
2010-10-23 16:15:41 +00:00
|
|
|
}
|
2011-02-25 11:24:11 +00:00
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_ADHOC:
|
|
|
|
/* currently */
|
|
|
|
need_multiple = false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* only AP really */
|
|
|
|
need_multiple = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->ht_need_multiple_chains = need_multiple;
|
|
|
|
|
|
|
|
if (!need_multiple) {
|
|
|
|
/* check all contexts */
|
|
|
|
for_each_context(priv, tmp) {
|
|
|
|
if (!tmp->vif)
|
|
|
|
continue;
|
|
|
|
if (tmp->ht_need_multiple_chains) {
|
|
|
|
need_multiple = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ht_conf->single_chain_sufficient = !need_multiple;
|
|
|
|
}
|
|
|
|
|
2011-07-08 15:46:29 +00:00
|
|
|
static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
|
|
|
|
iwl_is_any_associated(priv)) {
|
|
|
|
struct iwl_calib_chain_noise_reset_cmd cmd;
|
|
|
|
|
|
|
|
/* clear data for chain noise calibration algorithm */
|
|
|
|
data->chain_noise_a = 0;
|
|
|
|
data->chain_noise_b = 0;
|
|
|
|
data->chain_noise_c = 0;
|
|
|
|
data->chain_signal_a = 0;
|
|
|
|
data->chain_signal_b = 0;
|
|
|
|
data->chain_signal_c = 0;
|
|
|
|
data->beacon_count = 0;
|
|
|
|
|
|
|
|
memset(&cmd, 0, sizeof(cmd));
|
|
|
|
iwl_set_calib_hdr(&cmd.hdr,
|
2011-07-13 15:38:57 +00:00
|
|
|
priv->phy_calib_chain_noise_reset_cmd);
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 06:10:48 +00:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv),
|
2011-07-08 15:46:29 +00:00
|
|
|
REPLY_PHY_CALIBRATION_CMD,
|
|
|
|
CMD_SYNC, sizeof(cmd), &cmd);
|
|
|
|
if (ret)
|
|
|
|
IWL_ERR(priv,
|
|
|
|
"Could not send REPLY_PHY_CALIBRATION_CMD\n");
|
|
|
|
data->state = IWL_CHAIN_NOISE_ACCUMULATE;
|
|
|
|
IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_bss_conf *bss_conf,
|
|
|
|
u32 changes)
|
|
|
|
{
|
2012-02-09 14:08:15 +00:00
|
|
|
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
2010-10-23 16:15:41 +00:00
|
|
|
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
|
|
|
|
int ret;
|
|
|
|
bool force = false;
|
|
|
|
|
2011-08-26 06:10:44 +00:00
|
|
|
mutex_lock(&priv->shrd->mutex);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
2011-08-26 06:11:04 +00:00
|
|
|
if (unlikely(!iwl_is_ready(priv->shrd))) {
|
2010-12-14 15:38:58 +00:00
|
|
|
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
|
2011-08-26 06:10:44 +00:00
|
|
|
mutex_unlock(&priv->shrd->mutex);
|
2010-12-02 19:02:28 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!ctx->vif)) {
|
|
|
|
IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
|
2011-08-26 06:10:44 +00:00
|
|
|
mutex_unlock(&priv->shrd->mutex);
|
2010-11-11 02:25:47 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
if (changes & BSS_CHANGED_BEACON_INT)
|
|
|
|
force = true;
|
|
|
|
|
|
|
|
if (changes & BSS_CHANGED_QOS) {
|
|
|
|
ctx->qos_data.qos_active = bss_conf->qos;
|
|
|
|
iwlagn_update_qos(priv, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
|
|
|
|
if (vif->bss_conf.use_short_preamble)
|
|
|
|
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
|
|
|
else
|
|
|
|
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
|
|
|
|
|
|
|
if (changes & BSS_CHANGED_ASSOC) {
|
|
|
|
if (bss_conf->assoc) {
|
|
|
|
priv->timestamp = bss_conf->timestamp;
|
|
|
|
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
|
|
|
} else {
|
2011-03-30 09:29:32 +00:00
|
|
|
/*
|
|
|
|
* If we disassociate while there are pending
|
|
|
|
* frames, just wake up the queues and let the
|
|
|
|
* frames "escape" ... This shouldn't really
|
|
|
|
* be happening to start with, but we should
|
|
|
|
* not get stuck in this case either since it
|
|
|
|
* can happen if userspace gets confused.
|
|
|
|
*/
|
|
|
|
if (ctx->last_tx_rejected) {
|
|
|
|
ctx->last_tx_rejected = false;
|
2011-08-26 06:11:24 +00:00
|
|
|
iwl_trans_wake_any_queue(trans(priv),
|
2011-11-10 14:55:24 +00:00
|
|
|
ctx->ctxid,
|
|
|
|
"Disassoc: flush queue");
|
2011-03-30 09:29:32 +00:00
|
|
|
}
|
2010-10-23 16:15:41 +00:00
|
|
|
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
2011-07-15 20:23:45 +00:00
|
|
|
|
|
|
|
if (ctx->ctxid == IWL_RXON_CTX_BSS)
|
|
|
|
priv->have_rekey_data = false;
|
2010-10-23 16:15:41 +00:00
|
|
|
}
|
2011-07-08 15:46:23 +00:00
|
|
|
|
|
|
|
iwlagn_bt_coex_rssi_monitor(priv);
|
2010-10-23 16:15:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->ht.enabled) {
|
|
|
|
ctx->ht.protection = bss_conf->ht_operation_mode &
|
|
|
|
IEEE80211_HT_OP_MODE_PROTECTION;
|
|
|
|
ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
|
|
|
|
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
|
|
|
|
iwlagn_check_needed_chains(priv, ctx, bss_conf);
|
2010-11-10 17:56:47 +00:00
|
|
|
iwl_set_rxon_ht(priv, &priv->current_ht_config);
|
2010-10-23 16:15:41 +00:00
|
|
|
}
|
|
|
|
|
2011-07-01 14:59:26 +00:00
|
|
|
iwlagn_set_rxon_chain(priv, ctx);
|
2010-10-23 16:15:41 +00:00
|
|
|
|
|
|
|
if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
|
|
|
|
ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
|
|
|
|
else
|
|
|
|
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
|
|
|
|
|
|
|
|
if (bss_conf->use_cts_prot)
|
|
|
|
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
|
|
|
|
else
|
|
|
|
ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
|
|
|
|
|
|
|
|
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
|
|
|
|
|
|
|
|
if (vif->type == NL80211_IFTYPE_AP ||
|
|
|
|
vif->type == NL80211_IFTYPE_ADHOC) {
|
|
|
|
if (vif->bss_conf.enable_beacon) {
|
|
|
|
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
|
|
|
priv->beacon_ctx = ctx;
|
|
|
|
} else {
|
|
|
|
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
|
|
|
priv->beacon_ctx = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-08 20:04:41 +00:00
|
|
|
/*
|
|
|
|
* If the ucode decides to do beacon filtering before
|
|
|
|
* association, it will lose beacons that are needed
|
|
|
|
* before sending frames out on passive channels. This
|
|
|
|
* causes association failures on those channels. Enable
|
|
|
|
* receiving beacons in such cases.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (vif->type == NL80211_IFTYPE_STATION) {
|
|
|
|
if (!bss_conf->assoc)
|
|
|
|
ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
|
|
|
|
else
|
|
|
|
ctx->staging.filter_flags &=
|
|
|
|
~RXON_FILTER_BCON_AWARE_MSK;
|
|
|
|
}
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
|
|
|
|
iwlagn_commit_rxon(priv, ctx);
|
|
|
|
|
2010-11-10 17:56:46 +00:00
|
|
|
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
|
|
|
|
/*
|
|
|
|
* The chain noise calibration will enable PM upon
|
|
|
|
* completion. If calibration has already been run
|
|
|
|
* then we need to enable power management here.
|
|
|
|
*/
|
|
|
|
if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
|
|
|
|
iwl_power_update_mode(priv, false);
|
|
|
|
|
|
|
|
/* Enable RX differential gain and sensitivity calibrations */
|
2011-07-08 15:46:29 +00:00
|
|
|
if (!priv->disable_chain_noise_cal)
|
|
|
|
iwlagn_chain_noise_reset(priv);
|
2010-11-10 17:56:46 +00:00
|
|
|
priv->start_calib = 1;
|
2011-08-26 06:13:56 +00:00
|
|
|
WARN_ON(ctx->preauth_bssid);
|
2010-11-10 17:56:46 +00:00
|
|
|
}
|
|
|
|
|
2010-10-23 16:15:41 +00:00
|
|
|
if (changes & BSS_CHANGED_IBSS) {
|
|
|
|
ret = iwlagn_manage_ibss_station(priv, vif,
|
|
|
|
bss_conf->ibss_joined);
|
|
|
|
if (ret)
|
|
|
|
IWL_ERR(priv, "failed to %s IBSS station %pM\n",
|
|
|
|
bss_conf->ibss_joined ? "add" : "remove",
|
|
|
|
bss_conf->bssid);
|
|
|
|
}
|
|
|
|
|
2010-10-23 16:15:42 +00:00
|
|
|
if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
|
|
|
|
priv->beacon_ctx) {
|
|
|
|
if (iwlagn_update_beacon(priv, vif))
|
|
|
|
IWL_ERR(priv, "Error sending IBSS beacon\n");
|
|
|
|
}
|
|
|
|
|
2011-08-26 06:10:44 +00:00
|
|
|
mutex_unlock(&priv->shrd->mutex);
|
2010-10-23 16:15:41 +00:00
|
|
|
}
|
2010-11-10 17:56:38 +00:00
|
|
|
|
|
|
|
void iwlagn_post_scan(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
struct iwl_rxon_context *ctx;
|
|
|
|
|
2011-06-03 14:54:14 +00:00
|
|
|
/*
|
|
|
|
* We do not commit power settings while scan is pending,
|
|
|
|
* do it now if the settings changed.
|
|
|
|
*/
|
|
|
|
iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
|
|
|
|
iwl_set_tx_power(priv, priv->tx_power_next, false);
|
|
|
|
|
2010-11-10 17:56:38 +00:00
|
|
|
/*
|
|
|
|
* Since setting the RXON may have been deferred while
|
|
|
|
* performing the scan, fire one off if needed
|
|
|
|
*/
|
|
|
|
for_each_context(priv, ctx)
|
|
|
|
if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
|
|
|
|
iwlagn_commit_rxon(priv, ctx);
|
|
|
|
|
2011-07-01 14:59:26 +00:00
|
|
|
iwlagn_set_pan_params(priv);
|
2010-11-10 17:56:38 +00:00
|
|
|
}
|