wireless-drivers-next patches for 5.2
Nothing really special standing out this time, iwlwifi being the most active driver. Major changes: iwlwifi * send NO_DATA events so they can be captured in radiotap * support for multiple BSSID * support for some new FW API versions * support new hardware * debugfs cleanups by Greg-KH qtnfmac * allow each MAC to specify its own regulatory rules -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJcuHgsAAoJEG4XJFUm622bfo8H/3uRRxsQBHGg6e3NpELaxpNV IfrPDtvxyfILzIepBBhnZYUY0OrlTHKfMmzFBD9FFMojsxBYddnLZ/0iKUNKfwLm KzToW/64YJ784dc+tw85gjh8I3MB+RRoD0l01M1HuOkzQ4hDNEGK3IsMHsBs/oTZ huiqTYsTxStOj53vOpQiBFZ1pYBtvGLMxBdSepDcR27bgT1gwriynCSkSNglDH8z /t3m6hDGtZa6uVkoIVH+BAMu6+vt+vIkU/TOdmiW/zqBL2JYq6cDE0uIb3bLAzN6 uvS1Rj42P3OwHUwFavlUBdr5Rdcj6P24S5ZhtVaGGWCBjMZI5/nO7IjzwyQnQuQ= =/6q9 -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-for-davem-2019-04-18' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== wireless-drivers-next patches for 5.2 Nothing really special standing out this time, iwlwifi being the most active driver. Major changes: iwlwifi * send NO_DATA events so they can be captured in radiotap * support for multiple BSSID * support for some new FW API versions * support new hardware * debugfs cleanups by Greg-KH qtnfmac * allow each MAC to specify its own regulatory rules ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f9a904efca
@ -1835,7 +1835,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
|
||||
static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
|
||||
{
|
||||
struct b43_phy_lp *lpphy = dev->phy.lp;
|
||||
struct lpphy_tx_gains gains, oldgains;
|
||||
struct lpphy_tx_gains oldgains;
|
||||
int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
|
||||
|
||||
lpphy_read_tx_pctl_mode_from_hardware(dev);
|
||||
@ -1849,9 +1849,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
|
||||
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
|
||||
|
||||
if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
|
||||
lpphy_papd_cal(dev, gains, 0, 1, 30);
|
||||
lpphy_papd_cal(dev, oldgains, 0, 1, 30);
|
||||
else
|
||||
lpphy_papd_cal(dev, gains, 0, 1, 65);
|
||||
lpphy_papd_cal(dev, oldgains, 0, 1, 65);
|
||||
|
||||
if (old_afe_ovr)
|
||||
lpphy_set_tx_gains(dev, oldgains);
|
||||
|
@ -490,11 +490,18 @@ fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
|
||||
void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr)
|
||||
{
|
||||
struct brcmf_bcdc *bcdc = drvr->proto->pd;
|
||||
|
||||
brcmf_fws_detach_pre_delif(bcdc->fws);
|
||||
}
|
||||
|
||||
void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr)
|
||||
{
|
||||
struct brcmf_bcdc *bcdc = drvr->proto->pd;
|
||||
|
||||
drvr->proto->pd = NULL;
|
||||
brcmf_fws_detach(bcdc->fws);
|
||||
brcmf_fws_detach_post_delif(bcdc->fws);
|
||||
kfree(bcdc);
|
||||
}
|
||||
|
@ -18,14 +18,16 @@
|
||||
|
||||
#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
|
||||
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
|
||||
void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
|
||||
void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr);
|
||||
void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr);
|
||||
void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state);
|
||||
void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
|
||||
bool success);
|
||||
struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr);
|
||||
#else
|
||||
static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
|
||||
static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
|
||||
static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {};
|
||||
static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {}
|
||||
#endif
|
||||
|
||||
#endif /* BRCMFMAC_BCDC_H */
|
||||
|
@ -628,15 +628,13 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
|
||||
|
||||
err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
addr &= SBSDIO_SB_OFT_ADDR_MASK;
|
||||
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
|
||||
|
||||
if (!err)
|
||||
err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
|
||||
mypkt);
|
||||
|
||||
err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt);
|
||||
out:
|
||||
brcmu_pkt_buf_free_skb(mypkt);
|
||||
|
||||
return err;
|
||||
|
@ -91,6 +91,7 @@ struct brcmf_bus_ops {
|
||||
int (*get_fwname)(struct device *dev, const char *ext,
|
||||
unsigned char *fw_name);
|
||||
void (*debugfs_create)(struct device *dev);
|
||||
int (*reset)(struct device *dev);
|
||||
};
|
||||
|
||||
|
||||
@ -245,6 +246,15 @@ void brcmf_bus_debugfs_create(struct brcmf_bus *bus)
|
||||
return bus->ops->debugfs_create(bus->dev);
|
||||
}
|
||||
|
||||
static inline
|
||||
int brcmf_bus_reset(struct brcmf_bus *bus)
|
||||
{
|
||||
if (!bus->ops->reset)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return bus->ops->reset(bus->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* interface functions from common layer
|
||||
*/
|
||||
@ -262,6 +272,8 @@ void brcmf_detach(struct device *dev);
|
||||
void brcmf_dev_reset(struct device *dev);
|
||||
/* Request from bus module to initiate a coredump */
|
||||
void brcmf_dev_coredump(struct device *dev);
|
||||
/* Indication that firmware has halted or crashed */
|
||||
void brcmf_fw_crashed(struct device *dev);
|
||||
|
||||
/* Configure the "global" bus state used by upper layers */
|
||||
void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
|
||||
|
@ -5464,6 +5464,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
|
||||
conn_info->req_ie =
|
||||
kmemdup(cfg->extra_buf, conn_info->req_ie_len,
|
||||
GFP_KERNEL);
|
||||
if (!conn_info->req_ie)
|
||||
conn_info->req_ie_len = 0;
|
||||
} else {
|
||||
conn_info->req_ie_len = 0;
|
||||
conn_info->req_ie = NULL;
|
||||
@ -5480,6 +5482,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
|
||||
conn_info->resp_ie =
|
||||
kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
|
||||
GFP_KERNEL);
|
||||
if (!conn_info->resp_ie)
|
||||
conn_info->resp_ie_len = 0;
|
||||
} else {
|
||||
conn_info->resp_ie_len = 0;
|
||||
conn_info->resp_ie = NULL;
|
||||
|
@ -841,17 +841,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
|
||||
bool rtnl_locked)
|
||||
{
|
||||
struct brcmf_if *ifp;
|
||||
int ifidx;
|
||||
|
||||
ifp = drvr->iflist[bsscfgidx];
|
||||
drvr->iflist[bsscfgidx] = NULL;
|
||||
if (!ifp) {
|
||||
bphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx);
|
||||
return;
|
||||
}
|
||||
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
|
||||
ifp->ifidx);
|
||||
if (drvr->if2bss[ifp->ifidx] == bsscfgidx)
|
||||
drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
|
||||
ifidx = ifp->ifidx;
|
||||
|
||||
if (ifp->ndev) {
|
||||
if (bsscfgidx == 0) {
|
||||
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
|
||||
@ -879,6 +879,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
|
||||
brcmf_p2p_ifp_removed(ifp, rtnl_locked);
|
||||
kfree(ifp);
|
||||
}
|
||||
|
||||
drvr->iflist[bsscfgidx] = NULL;
|
||||
if (drvr->if2bss[ifidx] == bsscfgidx)
|
||||
drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
|
||||
}
|
||||
|
||||
void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
|
||||
@ -1084,6 +1088,14 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void brcmf_core_bus_reset(struct work_struct *work)
|
||||
{
|
||||
struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
|
||||
bus_reset);
|
||||
|
||||
brcmf_bus_reset(drvr->bus_if);
|
||||
}
|
||||
|
||||
static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
|
||||
{
|
||||
int ret = -1;
|
||||
@ -1155,6 +1167,8 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
|
||||
#endif
|
||||
#endif /* CONFIG_INET */
|
||||
|
||||
INIT_WORK(&drvr->bus_reset, brcmf_core_bus_reset);
|
||||
|
||||
/* populate debugfs */
|
||||
brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
|
||||
brcmf_feat_debugfs_create(drvr);
|
||||
@ -1273,6 +1287,18 @@ void brcmf_dev_coredump(struct device *dev)
|
||||
brcmf_dbg(TRACE, "failed to create coredump\n");
|
||||
}
|
||||
|
||||
void brcmf_fw_crashed(struct device *dev)
|
||||
{
|
||||
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
|
||||
struct brcmf_pub *drvr = bus_if->drvr;
|
||||
|
||||
bphy_err(drvr, "Firmware has halted or crashed\n");
|
||||
|
||||
brcmf_dev_coredump(dev);
|
||||
|
||||
schedule_work(&drvr->bus_reset);
|
||||
}
|
||||
|
||||
void brcmf_detach(struct device *dev)
|
||||
{
|
||||
s32 i;
|
||||
@ -1299,6 +1325,8 @@ void brcmf_detach(struct device *dev)
|
||||
|
||||
brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
|
||||
|
||||
brcmf_proto_detach_pre_delif(drvr);
|
||||
|
||||
/* make sure primary interface removed last */
|
||||
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
|
||||
brcmf_remove_interface(drvr->iflist[i], false);
|
||||
@ -1308,7 +1336,7 @@ void brcmf_detach(struct device *dev)
|
||||
|
||||
brcmf_bus_stop(drvr->bus_if);
|
||||
|
||||
brcmf_proto_detach(drvr);
|
||||
brcmf_proto_detach_post_delif(drvr);
|
||||
|
||||
bus_if->drvr = NULL;
|
||||
wiphy_free(drvr->wiphy);
|
||||
|
@ -143,6 +143,8 @@ struct brcmf_pub {
|
||||
struct notifier_block inet6addr_notifier;
|
||||
struct brcmf_mp_device *settings;
|
||||
|
||||
struct work_struct bus_reset;
|
||||
|
||||
u8 clmver[BRCMF_DCMD_SMLEN];
|
||||
};
|
||||
|
||||
|
@ -711,7 +711,6 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
|
||||
size_t mp_path_len;
|
||||
u32 i, j;
|
||||
char end = '\0';
|
||||
size_t reqsz;
|
||||
|
||||
for (i = 0; i < table_size; i++) {
|
||||
if (mapping_table[i].chipid == chip &&
|
||||
@ -726,8 +725,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
reqsz = sizeof(*fwreq) + n_fwnames * sizeof(struct brcmf_fw_item);
|
||||
fwreq = kzalloc(reqsz, GFP_KERNEL);
|
||||
fwreq = kzalloc(struct_size(fwreq, items, n_fwnames), GFP_KERNEL);
|
||||
if (!fwreq)
|
||||
return NULL;
|
||||
|
||||
@ -743,6 +741,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
|
||||
|
||||
for (j = 0; j < n_fwnames; j++) {
|
||||
fwreq->items[j].path = fwnames[j].path;
|
||||
fwnames[j].path[0] = '\0';
|
||||
/* check if firmware path is provided by module parameter */
|
||||
if (brcmf_mp_global.firmware_path[0] != '\0') {
|
||||
strlcpy(fwnames[j].path, mp_path,
|
||||
|
@ -580,24 +580,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
|
||||
return ifidx == *(int *)arg;
|
||||
}
|
||||
|
||||
static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
|
||||
int ifidx)
|
||||
{
|
||||
bool (*matchfn)(struct sk_buff *, void *) = NULL;
|
||||
struct sk_buff *skb;
|
||||
int prec;
|
||||
|
||||
if (ifidx != -1)
|
||||
matchfn = brcmf_fws_ifidx_match;
|
||||
for (prec = 0; prec < q->num_prec; prec++) {
|
||||
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
|
||||
while (skb) {
|
||||
brcmu_pkt_buf_free_skb(skb);
|
||||
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
|
||||
{
|
||||
int i;
|
||||
@ -669,6 +651,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
|
||||
int ifidx)
|
||||
{
|
||||
bool (*matchfn)(struct sk_buff *, void *) = NULL;
|
||||
struct sk_buff *skb;
|
||||
int prec;
|
||||
u32 hslot;
|
||||
|
||||
if (ifidx != -1)
|
||||
matchfn = brcmf_fws_ifidx_match;
|
||||
for (prec = 0; prec < q->num_prec; prec++) {
|
||||
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
|
||||
while (skb) {
|
||||
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
|
||||
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
|
||||
true);
|
||||
brcmu_pkt_buf_free_skb(skb);
|
||||
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
|
||||
u32 slot_id)
|
||||
{
|
||||
@ -2200,6 +2204,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
|
||||
brcmf_fws_lock(fws);
|
||||
ifp->fws_desc = NULL;
|
||||
brcmf_dbg(TRACE, "deleting %s\n", entry->name);
|
||||
brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx],
|
||||
ifp->ifidx);
|
||||
brcmf_fws_macdesc_deinit(entry);
|
||||
brcmf_fws_cleanup(fws, ifp->ifidx);
|
||||
brcmf_fws_unlock(fws);
|
||||
@ -2437,17 +2443,25 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
|
||||
return fws;
|
||||
|
||||
fail:
|
||||
brcmf_fws_detach(fws);
|
||||
brcmf_fws_detach_pre_delif(fws);
|
||||
brcmf_fws_detach_post_delif(fws);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
void brcmf_fws_detach(struct brcmf_fws_info *fws)
|
||||
void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws)
|
||||
{
|
||||
if (!fws)
|
||||
return;
|
||||
|
||||
if (fws->fws_wq)
|
||||
if (fws->fws_wq) {
|
||||
destroy_workqueue(fws->fws_wq);
|
||||
fws->fws_wq = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws)
|
||||
{
|
||||
if (!fws)
|
||||
return;
|
||||
|
||||
/* cleanup */
|
||||
brcmf_fws_lock(fws);
|
||||
|
@ -19,7 +19,8 @@
|
||||
#define FWSIGNAL_H_
|
||||
|
||||
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
|
||||
void brcmf_fws_detach(struct brcmf_fws_info *fws);
|
||||
void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws);
|
||||
void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws);
|
||||
void brcmf_fws_debugfs_create(struct brcmf_pub *drvr);
|
||||
bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
|
||||
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
|
||||
|
@ -345,6 +345,10 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
|
||||
BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
|
||||
};
|
||||
|
||||
static void brcmf_pcie_setup(struct device *dev, int ret,
|
||||
struct brcmf_fw_request *fwreq);
|
||||
static struct brcmf_fw_request *
|
||||
brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
|
||||
|
||||
static u32
|
||||
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
|
||||
@ -730,7 +734,7 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
|
||||
}
|
||||
if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
|
||||
brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
|
||||
brcmf_dev_coredump(&devinfo->pdev->dev);
|
||||
brcmf_fw_crashed(&devinfo->pdev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1409,6 +1413,36 @@ int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int brcmf_pcie_reset(struct device *dev)
|
||||
{
|
||||
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
|
||||
struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
|
||||
struct brcmf_pciedev_info *devinfo = buspub->devinfo;
|
||||
struct brcmf_fw_request *fwreq;
|
||||
int err;
|
||||
|
||||
brcmf_detach(dev);
|
||||
|
||||
brcmf_pcie_release_irq(devinfo);
|
||||
brcmf_pcie_release_scratchbuffers(devinfo);
|
||||
brcmf_pcie_release_ringbuffers(devinfo);
|
||||
brcmf_pcie_reset_device(devinfo);
|
||||
|
||||
fwreq = brcmf_pcie_prepare_fw_request(devinfo);
|
||||
if (!fwreq) {
|
||||
dev_err(dev, "Failed to prepare FW request\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to prepare FW request\n");
|
||||
kfree(fwreq);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
|
||||
.txdata = brcmf_pcie_tx,
|
||||
.stop = brcmf_pcie_down,
|
||||
@ -1418,6 +1452,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
|
||||
.get_ramsize = brcmf_pcie_get_ramsize,
|
||||
.get_memdump = brcmf_pcie_get_memdump,
|
||||
.get_fwname = brcmf_pcie_get_fwname,
|
||||
.reset = brcmf_pcie_reset,
|
||||
};
|
||||
|
||||
|
||||
|
@ -67,16 +67,22 @@ fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void brcmf_proto_detach(struct brcmf_pub *drvr)
|
||||
void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr)
|
||||
{
|
||||
brcmf_dbg(TRACE, "Enter\n");
|
||||
|
||||
if (drvr->proto) {
|
||||
if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
|
||||
brcmf_proto_bcdc_detach(drvr);
|
||||
brcmf_proto_bcdc_detach_post_delif(drvr);
|
||||
else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF)
|
||||
brcmf_proto_msgbuf_detach(drvr);
|
||||
kfree(drvr->proto);
|
||||
drvr->proto = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr)
|
||||
{
|
||||
if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
|
||||
brcmf_proto_bcdc_detach_pre_delif(drvr);
|
||||
}
|
||||
|
@ -54,7 +54,8 @@ struct brcmf_proto {
|
||||
|
||||
|
||||
int brcmf_proto_attach(struct brcmf_pub *drvr);
|
||||
void brcmf_proto_detach(struct brcmf_pub *drvr);
|
||||
void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr);
|
||||
void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr);
|
||||
|
||||
static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
|
||||
struct sk_buff *skb,
|
||||
|
@ -622,6 +622,7 @@ BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
|
||||
/* Note the names are not postfixed with a1 for backward compatibility */
|
||||
BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio");
|
||||
BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
|
||||
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
|
||||
BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
|
||||
BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
|
||||
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
|
||||
@ -642,7 +643,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
|
||||
BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
|
||||
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0),
|
||||
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1),
|
||||
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
|
||||
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
|
||||
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
|
||||
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
|
||||
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
|
||||
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
|
||||
@ -1090,8 +1092,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
|
||||
|
||||
/* dongle indicates the firmware has halted/crashed */
|
||||
if (hmb_data & HMB_DATA_FWHALT) {
|
||||
brcmf_err("mailbox indicates firmware halted\n");
|
||||
brcmf_dev_coredump(&sdiod->func1->dev);
|
||||
brcmf_dbg(SDIO, "mailbox indicates firmware halted\n");
|
||||
brcmf_fw_crashed(&sdiod->func1->dev);
|
||||
}
|
||||
|
||||
/* Dongle recomposed rx frames, accept them again */
|
||||
|
@ -160,7 +160,7 @@ struct brcmf_usbdev_info {
|
||||
|
||||
struct usb_device *usbdev;
|
||||
struct device *dev;
|
||||
struct mutex dev_init_lock;
|
||||
struct completion dev_init_done;
|
||||
|
||||
int ctl_in_pipe, ctl_out_pipe;
|
||||
struct urb *ctl_urb; /* URB for control endpoint */
|
||||
@ -445,22 +445,17 @@ fail:
|
||||
|
||||
}
|
||||
|
||||
static void brcmf_usb_free_q(struct list_head *q, bool pending)
|
||||
static void brcmf_usb_free_q(struct list_head *q)
|
||||
{
|
||||
struct brcmf_usbreq *req, *next;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry_safe(req, next, q, list) {
|
||||
if (!req->urb) {
|
||||
brcmf_err("bad req\n");
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
if (pending) {
|
||||
usb_kill_urb(req->urb);
|
||||
} else {
|
||||
usb_free_urb(req->urb);
|
||||
list_del_init(&req->list);
|
||||
}
|
||||
usb_free_urb(req->urb);
|
||||
list_del_init(&req->list);
|
||||
}
|
||||
}
|
||||
|
||||
@ -682,12 +677,18 @@ static int brcmf_usb_up(struct device *dev)
|
||||
|
||||
static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (devinfo->ctl_urb)
|
||||
usb_kill_urb(devinfo->ctl_urb);
|
||||
if (devinfo->bulk_urb)
|
||||
usb_kill_urb(devinfo->bulk_urb);
|
||||
brcmf_usb_free_q(&devinfo->tx_postq, true);
|
||||
brcmf_usb_free_q(&devinfo->rx_postq, true);
|
||||
if (devinfo->tx_reqs)
|
||||
for (i = 0; i < devinfo->bus_pub.ntxq; i++)
|
||||
usb_kill_urb(devinfo->tx_reqs[i].urb);
|
||||
if (devinfo->rx_reqs)
|
||||
for (i = 0; i < devinfo->bus_pub.nrxq; i++)
|
||||
usb_kill_urb(devinfo->rx_reqs[i].urb);
|
||||
}
|
||||
|
||||
static void brcmf_usb_down(struct device *dev)
|
||||
@ -1023,8 +1024,8 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
|
||||
brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
|
||||
|
||||
/* free the URBS */
|
||||
brcmf_usb_free_q(&devinfo->rx_freeq, false);
|
||||
brcmf_usb_free_q(&devinfo->tx_freeq, false);
|
||||
brcmf_usb_free_q(&devinfo->rx_freeq);
|
||||
brcmf_usb_free_q(&devinfo->tx_freeq);
|
||||
|
||||
usb_free_urb(devinfo->ctl_urb);
|
||||
usb_free_urb(devinfo->bulk_urb);
|
||||
@ -1193,11 +1194,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
mutex_unlock(&devinfo->dev_init_lock);
|
||||
complete(&devinfo->dev_init_done);
|
||||
return;
|
||||
error:
|
||||
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
|
||||
mutex_unlock(&devinfo->dev_init_lock);
|
||||
complete(&devinfo->dev_init_done);
|
||||
device_release_driver(dev);
|
||||
}
|
||||
|
||||
@ -1265,7 +1266,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
|
||||
if (ret)
|
||||
goto fail;
|
||||
/* we are done */
|
||||
mutex_unlock(&devinfo->dev_init_lock);
|
||||
complete(&devinfo->dev_init_done);
|
||||
return 0;
|
||||
}
|
||||
bus->chip = bus_pub->devid;
|
||||
@ -1325,11 +1326,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
||||
|
||||
devinfo->usbdev = usb;
|
||||
devinfo->dev = &usb->dev;
|
||||
/* Take an init lock, to protect for disconnect while still loading.
|
||||
/* Init completion, to protect for disconnect while still loading.
|
||||
* Necessary because of the asynchronous firmware load construction
|
||||
*/
|
||||
mutex_init(&devinfo->dev_init_lock);
|
||||
mutex_lock(&devinfo->dev_init_lock);
|
||||
init_completion(&devinfo->dev_init_done);
|
||||
|
||||
usb_set_intfdata(intf, devinfo);
|
||||
|
||||
@ -1407,7 +1407,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
mutex_unlock(&devinfo->dev_init_lock);
|
||||
complete(&devinfo->dev_init_done);
|
||||
kfree(devinfo);
|
||||
usb_set_intfdata(intf, NULL);
|
||||
return ret;
|
||||
@ -1422,7 +1422,7 @@ brcmf_usb_disconnect(struct usb_interface *intf)
|
||||
devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
|
||||
|
||||
if (devinfo) {
|
||||
mutex_lock(&devinfo->dev_init_lock);
|
||||
wait_for_completion(&devinfo->dev_init_done);
|
||||
/* Make sure that devinfo still exists. Firmware probe routines
|
||||
* may have released the device and cleared the intfdata.
|
||||
*/
|
||||
|
@ -577,7 +577,6 @@ il4965_math_div_round(s32 num, s32 denom, s32 * res)
|
||||
sign = -sign;
|
||||
denom = -denom;
|
||||
}
|
||||
*res = 1;
|
||||
*res = ((num * 2 + denom) / (denom * 2)) * sign;
|
||||
|
||||
return 1;
|
||||
|
@ -89,6 +89,7 @@
|
||||
#define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-"
|
||||
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
|
||||
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
|
||||
#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
|
||||
|
||||
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
|
||||
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
|
||||
@ -180,7 +181,11 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
.dbgc_supported = true, \
|
||||
.min_umac_error_event_table = 0x400000, \
|
||||
.d3_debug_data_base_addr = 0x401000, \
|
||||
.d3_debug_data_length = 60 * 1024
|
||||
.d3_debug_data_length = 60 * 1024, \
|
||||
.fw_mon_smem_write_ptr_addr = 0xa0c16c, \
|
||||
.fw_mon_smem_write_ptr_msk = 0xfffff, \
|
||||
.fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \
|
||||
.fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
|
||||
|
||||
#define IWL_DEVICE_AX200_COMMON \
|
||||
IWL_DEVICE_22000_COMMON, \
|
||||
@ -190,7 +195,8 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
IWL_DEVICE_22000_COMMON, \
|
||||
.device_family = IWL_DEVICE_FAMILY_22000, \
|
||||
.base_params = &iwl_22000_base_params, \
|
||||
.csr = &iwl_csr_v1
|
||||
.csr = &iwl_csr_v1, \
|
||||
.gp2_reg_addr = 0xa02c68
|
||||
|
||||
#define IWL_DEVICE_22560 \
|
||||
IWL_DEVICE_22000_COMMON, \
|
||||
@ -203,7 +209,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
.device_family = IWL_DEVICE_FAMILY_AX210, \
|
||||
.base_params = &iwl_22000_base_params, \
|
||||
.csr = &iwl_csr_v1, \
|
||||
.min_txq_size = 128
|
||||
.min_txq_size = 128, \
|
||||
.gp2_reg_addr = 0xd02c68, \
|
||||
.min_256_ba_txq_size = 512
|
||||
|
||||
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 22000",
|
||||
@ -440,12 +448,20 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
|
||||
const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = {
|
||||
.name = "Intel(R) Wi-Fi 7 AX211 160MHz",
|
||||
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
IWL_DEVICE_AX210,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
|
||||
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
|
||||
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
IWL_DEVICE_AX210,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0 = {
|
||||
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
|
||||
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
|
||||
IWL_DEVICE_AX210,
|
||||
};
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright (C) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -20,7 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright (C) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -148,7 +148,11 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
||||
.d3_debug_data_length = 92 * 1024, \
|
||||
.ht_params = &iwl9000_ht_params, \
|
||||
.nvm_ver = IWL9000_NVM_VERSION, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
|
||||
.fw_mon_smem_write_ptr_addr = 0xa0476c, \
|
||||
.fw_mon_smem_write_ptr_msk = 0xfffff, \
|
||||
.fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \
|
||||
.fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
|
||||
|
||||
|
||||
const struct iwl_cfg iwl9160_2ac_cfg = {
|
||||
|
@ -60,12 +60,13 @@
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/*
|
||||
/**
|
||||
* struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
|
||||
*
|
||||
* @tlv_version: version info
|
||||
* @apply_point: &enum iwl_fw_ini_apply_point
|
||||
* @data: TLV data followed
|
||||
**/
|
||||
*/
|
||||
struct iwl_fw_ini_header {
|
||||
__le32 tlv_version;
|
||||
__le32 apply_point;
|
||||
@ -73,7 +74,7 @@ struct iwl_fw_ini_header {
|
||||
} __packed; /* FW_DEBUG_TLV_HEADER_S */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION)
|
||||
* struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION)
|
||||
* buffer allocation TLV - for debug
|
||||
*
|
||||
* @iwl_fw_ini_header: header
|
||||
@ -84,7 +85,7 @@ struct iwl_fw_ini_header {
|
||||
* @max_fragments: the maximum allowed fragmentation in the desired memory
|
||||
* allocation above
|
||||
* @min_frag_size: the minimum allowed fragmentation size in bytes
|
||||
*/
|
||||
*/
|
||||
struct iwl_fw_ini_allocation_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
__le32 allocation_id;
|
||||
@ -95,33 +96,52 @@ struct iwl_fw_ini_allocation_tlv {
|
||||
} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD)
|
||||
* Generic Host command pass through TLV
|
||||
* enum iwl_fw_ini_dbg_domain - debug domains
|
||||
* allows to send host cmd or collect memory region if a given domain is enabled
|
||||
*
|
||||
* @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on
|
||||
* @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain
|
||||
*/
|
||||
enum iwl_fw_ini_dbg_domain {
|
||||
IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0,
|
||||
IWL_FW_INI_DBG_DOMAIN_REPORT_PS,
|
||||
}; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_hcmd
|
||||
*
|
||||
* @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC
|
||||
* @group: the desired cmd group
|
||||
* @padding: all zeros for dword alignment
|
||||
* @data: all of the relevant command (0xf6/0xf5) to be sent
|
||||
*/
|
||||
* @reserved: to align to FW struct
|
||||
* @data: all of the relevant command data to be sent
|
||||
*/
|
||||
struct iwl_fw_ini_hcmd {
|
||||
u8 id;
|
||||
u8 group;
|
||||
__le16 padding;
|
||||
__le16 reserved;
|
||||
u8 data[0];
|
||||
} __packed; /* FW_DEBUG_TLV_HCMD_DATA_S */
|
||||
} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_hcmd_tlv
|
||||
* struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD)
|
||||
* Generic Host command pass through TLV
|
||||
*
|
||||
* @header: header
|
||||
* @domain: send command only if the specific domain is enabled
|
||||
* &enum iwl_fw_ini_dbg_domain
|
||||
* @period_msec: period in which the hcmd will be sent to FW. Measured in msec
|
||||
* (0 = one time command).
|
||||
* @hcmd: a variable length host-command to be sent to apply the configuration.
|
||||
*/
|
||||
struct iwl_fw_ini_hcmd_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
__le32 domain;
|
||||
__le32 period_msec;
|
||||
struct iwl_fw_ini_hcmd hcmd;
|
||||
} __packed; /* FW_DEBUG_TLV_HCMD_S_VER_1 */
|
||||
} __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */
|
||||
|
||||
/*
|
||||
* struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW)
|
||||
/**
|
||||
* struct iwl_fw_ini_debug_flow_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_FLOW)
|
||||
*
|
||||
* @header: header
|
||||
* @debug_flow_cfg: &enum iwl_fw_ini_debug_flow
|
||||
@ -134,8 +154,20 @@ struct iwl_fw_ini_debug_flow_tlv {
|
||||
#define IWL_FW_INI_MAX_REGION_ID 64
|
||||
#define IWL_FW_INI_MAX_NAME 32
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump.
|
||||
*
|
||||
* @id_and_grp: id and group of dhc response.
|
||||
* @desc: dhc response descriptor.
|
||||
*/
|
||||
struct iwl_fw_ini_region_cfg_dhc {
|
||||
__le32 id_and_grp;
|
||||
__le32 desc;
|
||||
} __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region
|
||||
*
|
||||
* @num_of_range: the amount of ranges in the region
|
||||
* @range_data_size: size of the data to read per range, in bytes.
|
||||
*/
|
||||
@ -146,6 +178,7 @@ struct iwl_fw_ini_region_cfg_internal {
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region
|
||||
*
|
||||
* @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region
|
||||
* @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region.
|
||||
* It is unused for tx.
|
||||
@ -163,34 +196,43 @@ struct iwl_fw_ini_region_cfg_fifos {
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_region_cfg
|
||||
*
|
||||
* @region_id: ID of this dump configuration
|
||||
* @region_type: &enum iwl_fw_ini_region_type
|
||||
* @num_regions: amount of regions in the address array.
|
||||
* @domain: dump this region only if the specific domain is enabled
|
||||
* &enum iwl_fw_ini_dbg_domain
|
||||
* @name_len: name length
|
||||
* @name: file name to use for this region
|
||||
* @internal: used in case the region uses internal memory.
|
||||
* @allocation_id: For DRAM type field substitutes for allocation_id
|
||||
* @fifos: used in case of fifos region.
|
||||
* @dhc_desc: dhc response descriptor.
|
||||
* @notif_id_and_grp: dump this region only if the specific notification
|
||||
* occurred.
|
||||
* @offset: offset to use for each memory base address
|
||||
* @start_addr: array of addresses.
|
||||
*/
|
||||
struct iwl_fw_ini_region_cfg {
|
||||
__le32 region_id;
|
||||
__le32 region_type;
|
||||
__le32 domain;
|
||||
__le32 name_len;
|
||||
u8 name[IWL_FW_INI_MAX_NAME];
|
||||
union {
|
||||
struct iwl_fw_ini_region_cfg_internal internal;
|
||||
__le32 allocation_id;
|
||||
struct iwl_fw_ini_region_cfg_fifos fifos;
|
||||
};
|
||||
struct iwl_fw_ini_region_cfg_dhc dhc_desc;
|
||||
__le32 notif_id_and_grp;
|
||||
}; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */
|
||||
__le32 offset;
|
||||
__le32 start_addr[];
|
||||
} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_S */
|
||||
} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG)
|
||||
* DUMP sections define IDs and triggers that use those IDs TLV
|
||||
* struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS)
|
||||
* defines memory regions to dump
|
||||
*
|
||||
* @header: header
|
||||
* @num_regions: how many different region section and IDs are coming next
|
||||
* @region_config: list of dump configurations
|
||||
@ -199,13 +241,12 @@ struct iwl_fw_ini_region_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
__le32 num_regions;
|
||||
struct iwl_fw_ini_region_cfg region_config[];
|
||||
} __packed; /* FW_DEBUG_TLV_REGIONS_S_VER_1 */
|
||||
} __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG)
|
||||
* Region sections define IDs and triggers that use those IDs TLV
|
||||
* struct iwl_fw_ini_trigger
|
||||
*
|
||||
* @trigger_id: enum &iwl_fw_ini_tigger_id
|
||||
* @trigger_id: &enum iwl_fw_ini_trigger_id
|
||||
* @override_trig: determines how apply trigger in case a trigger with the
|
||||
* same id is already in use. Using the first 2 bytes:
|
||||
* Byte 0: if 0, override trigger configuration, otherwise use the
|
||||
@ -214,6 +255,7 @@ struct iwl_fw_ini_region_tlv {
|
||||
* existing trigger.
|
||||
* @dump_delay: delay from trigger fire to dump, in usec
|
||||
* @occurrences: max amount of times to be fired
|
||||
* @reserved: to align to FW struct
|
||||
* @ignore_consec: ignore consecutive triggers, in usec
|
||||
* @force_restart: force FW restart
|
||||
* @multi_dut: initiate debug dump data on several DUTs
|
||||
@ -226,17 +268,18 @@ struct iwl_fw_ini_trigger {
|
||||
__le32 override_trig;
|
||||
__le32 dump_delay;
|
||||
__le32 occurrences;
|
||||
__le32 reserved;
|
||||
__le32 ignore_consec;
|
||||
__le32 force_restart;
|
||||
__le32 multi_dut;
|
||||
__le32 trigger_data;
|
||||
__le32 num_regions;
|
||||
__le32 data[];
|
||||
} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_S */
|
||||
} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG)
|
||||
* DUMP sections define IDs and triggers that use those IDs TLV
|
||||
* struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS)
|
||||
* Triggers that hold memory regions to dump in case a trigger fires
|
||||
*
|
||||
* @header: header
|
||||
* @num_triggers: how many different triggers section and IDs are coming next
|
||||
@ -246,16 +289,18 @@ struct iwl_fw_ini_trigger_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
__le32 num_triggers;
|
||||
struct iwl_fw_ini_trigger trigger_config[];
|
||||
} __packed; /* FW_TLV_DEBUG_TRIGGERS_S_VER_1 */
|
||||
} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_trigger_id
|
||||
*
|
||||
* @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
|
||||
* @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
|
||||
* @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
|
||||
* @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification
|
||||
* @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION: FW generic notification
|
||||
* @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification
|
||||
* @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
|
||||
* @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically
|
||||
* @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
|
||||
* @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
|
||||
* threshold was crossed
|
||||
@ -299,47 +344,51 @@ enum iwl_fw_ini_trigger_id {
|
||||
|
||||
/* FW triggers */
|
||||
IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4,
|
||||
IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION = 5,
|
||||
IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5,
|
||||
|
||||
/* User trigger */
|
||||
IWL_FW_TRIGGER_ID_USER_TRIGGER = 6,
|
||||
|
||||
/* periodic uses the data field for the interval time */
|
||||
IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7,
|
||||
|
||||
/* Host triggers */
|
||||
IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 7,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 8,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 9,
|
||||
IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 10,
|
||||
IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 11,
|
||||
IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 12,
|
||||
IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 13,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_START = 14,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 15,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 16,
|
||||
IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 17,
|
||||
IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 18,
|
||||
IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 19,
|
||||
IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 20,
|
||||
IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 21,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 22,
|
||||
IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 23,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 24,
|
||||
IWL_FW_TRIGGER_ID_HOST_D3_START = 25,
|
||||
IWL_FW_TRIGGER_ID_HOST_D3_END = 26,
|
||||
IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 27,
|
||||
IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 28,
|
||||
IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 29,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 30,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 31,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 32,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 33,
|
||||
IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 34,
|
||||
IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 35,
|
||||
IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10,
|
||||
IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11,
|
||||
IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12,
|
||||
IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13,
|
||||
IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17,
|
||||
IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18,
|
||||
IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19,
|
||||
IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20,
|
||||
IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21,
|
||||
IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23,
|
||||
IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25,
|
||||
IWL_FW_TRIGGER_ID_HOST_D3_START = 26,
|
||||
IWL_FW_TRIGGER_ID_HOST_D3_END = 27,
|
||||
IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28,
|
||||
IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29,
|
||||
IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34,
|
||||
IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35,
|
||||
IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36,
|
||||
|
||||
IWL_FW_TRIGGER_ID_NUM,
|
||||
}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_apply_point
|
||||
*
|
||||
* @IWL_FW_INI_APPLY_INVALID: invalid
|
||||
* @IWL_FW_INI_APPLY_EARLY: pre loading FW
|
||||
* @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive
|
||||
@ -360,6 +409,7 @@ enum iwl_fw_ini_apply_point {
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_allocation_id
|
||||
*
|
||||
* @IWL_FW_INI_ALLOCATION_INVALID: invalid
|
||||
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
|
||||
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
|
||||
@ -380,18 +430,22 @@ enum iwl_fw_ini_allocation_id {
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_buffer_location
|
||||
*
|
||||
* @IWL_FW_INI_LOCATION_INVALID: invalid
|
||||
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
|
||||
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
|
||||
* @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
|
||||
*/
|
||||
enum iwl_fw_ini_buffer_location {
|
||||
IWL_FW_INI_LOCATION_INVALID,
|
||||
IWL_FW_INI_LOCATION_SRAM_PATH,
|
||||
IWL_FW_INI_LOCATION_DRAM_PATH,
|
||||
IWL_FW_INI_LOCATION_NPK_PATH,
|
||||
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_debug_flow
|
||||
*
|
||||
* @IWL_FW_INI_DEBUG_INVALID: invalid
|
||||
* @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
|
||||
* @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
|
||||
@ -404,6 +458,7 @@ enum iwl_fw_ini_debug_flow {
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_region_type
|
||||
*
|
||||
* @IWL_FW_INI_REGION_INVALID: invalid
|
||||
* @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
|
||||
* @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
|
||||
@ -416,6 +471,8 @@ enum iwl_fw_ini_debug_flow {
|
||||
* @IWL_FW_INI_REGION_RXF: RX fifo
|
||||
* @IWL_FW_INI_REGION_PAGING: paging memory
|
||||
* @IWL_FW_INI_REGION_CSR: CSR registers
|
||||
* @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
|
||||
* @IWL_FW_INI_REGION_DHC: dhc response to dump
|
||||
* @IWL_FW_INI_REGION_NUM: number of region types
|
||||
*/
|
||||
enum iwl_fw_ini_region_type {
|
||||
@ -431,6 +488,8 @@ enum iwl_fw_ini_region_type {
|
||||
IWL_FW_INI_REGION_RXF,
|
||||
IWL_FW_INI_REGION_PAGING,
|
||||
IWL_FW_INI_REGION_CSR,
|
||||
IWL_FW_INI_REGION_NOTIFICATION,
|
||||
IWL_FW_INI_REGION_DHC,
|
||||
IWL_FW_INI_REGION_NUM
|
||||
}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
|
||||
|
||||
|
@ -541,6 +541,66 @@ enum iwl_he_htc_flags {
|
||||
#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS)
|
||||
#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
|
||||
|
||||
/**
|
||||
* struct iwl_he_sta_context_cmd_v1 - configure FW to work with HE AP
|
||||
* @sta_id: STA id
|
||||
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
|
||||
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
|
||||
* @reserved1: reserved byte for future use
|
||||
* @reserved2: reserved byte for future use
|
||||
* @flags: see %iwl_11ax_sta_ctxt_flags
|
||||
* @ref_bssid_addr: reference BSSID used by the AP
|
||||
* @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
|
||||
* @htc_flags: which features are supported in HTC
|
||||
* @frag_flags: frag support in A-MSDU
|
||||
* @frag_level: frag support level
|
||||
* @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
|
||||
* @frag_min_size: min frag size (except last frag)
|
||||
* @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
|
||||
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
|
||||
* @htc_trig_based_pkt_ext: default PE in 4us units
|
||||
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
|
||||
* @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
|
||||
* @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
|
||||
* @reserved3: reserved byte for future use
|
||||
* @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
|
||||
*/
|
||||
struct iwl_he_sta_context_cmd_v1 {
|
||||
u8 sta_id;
|
||||
u8 tid_limit;
|
||||
u8 reserved1;
|
||||
u8 reserved2;
|
||||
__le32 flags;
|
||||
|
||||
/* The below fields are set via Multiple BSSID IE */
|
||||
u8 ref_bssid_addr[6];
|
||||
__le16 reserved0;
|
||||
|
||||
/* The below fields are set via HE-capabilities IE */
|
||||
__le32 htc_flags;
|
||||
|
||||
u8 frag_flags;
|
||||
u8 frag_level;
|
||||
u8 frag_max_num;
|
||||
u8 frag_min_size;
|
||||
|
||||
/* The below fields are set via PPE thresholds element */
|
||||
struct iwl_he_pkt_ext pkt_ext;
|
||||
|
||||
/* The below fields are set via HE-Operation IE */
|
||||
u8 bss_color;
|
||||
u8 htc_trig_based_pkt_ext;
|
||||
__le16 frame_time_rts_th;
|
||||
|
||||
/* Random access parameter set (i.e. RAPS) */
|
||||
u8 rand_alloc_ecwmin;
|
||||
u8 rand_alloc_ecwmax;
|
||||
__le16 reserved3;
|
||||
|
||||
/* The below fields are set via MU EDCA parameter set element */
|
||||
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
|
||||
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_he_sta_context_cmd - configure FW to work with HE AP
|
||||
* @sta_id: STA id
|
||||
@ -564,6 +624,14 @@ enum iwl_he_htc_flags {
|
||||
* @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
|
||||
* @reserved3: reserved byte for future use
|
||||
* @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
|
||||
* @max_bssid_indicator: indicator of the max bssid supported on the associated
|
||||
* bss
|
||||
* @bssid_index: index of the associated VAP
|
||||
* @ema_ap: AP supports enhanced Multi BSSID advertisement
|
||||
* @profile_periodicity: number of Beacon periods that are needed to receive the
|
||||
* complete VAPs info
|
||||
* @bssid_count: actual number of VAPs in the MultiBSS Set
|
||||
* @reserved4: alignment
|
||||
*/
|
||||
struct iwl_he_sta_context_cmd {
|
||||
u8 sta_id;
|
||||
@ -599,7 +667,14 @@ struct iwl_he_sta_context_cmd {
|
||||
|
||||
/* The below fields are set via MU EDCA parameter set element */
|
||||
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
|
||||
} __packed; /* STA_CONTEXT_DOT11AX_API_S */
|
||||
|
||||
u8 max_bssid_indicator;
|
||||
u8 bssid_index;
|
||||
u8 ema_ap;
|
||||
u8 profile_periodicity;
|
||||
u8 bssid_count;
|
||||
u8 reserved4[3];
|
||||
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_he_monitor_cmd - configure air sniffer for HE
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright(C) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright(C) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -233,7 +233,8 @@ struct iwl_nvm_get_info_phy {
|
||||
__le32 rx_chains;
|
||||
} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
|
||||
|
||||
#define IWL_NUM_CHANNELS (51)
|
||||
#define IWL_NUM_CHANNELS_V1 51
|
||||
#define IWL_NUM_CHANNELS 110
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_regulatory - regulatory information
|
||||
@ -241,12 +242,38 @@ struct iwl_nvm_get_info_phy {
|
||||
* @channel_profile: regulatory data of this channel
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct iwl_nvm_get_info_regulatory {
|
||||
struct iwl_nvm_get_info_regulatory_v1 {
|
||||
__le32 lar_enabled;
|
||||
__le16 channel_profile[IWL_NUM_CHANNELS];
|
||||
__le16 channel_profile[IWL_NUM_CHANNELS_V1];
|
||||
__le16 reserved;
|
||||
} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_regulatory - regulatory information
|
||||
* @lar_enabled: is LAR enabled
|
||||
* @n_channels: number of valid channels in the array
|
||||
* @channel_profile: regulatory data of this channel
|
||||
*/
|
||||
struct iwl_nvm_get_info_regulatory {
|
||||
__le32 lar_enabled;
|
||||
__le32 n_channels;
|
||||
__le32 channel_profile[IWL_NUM_CHANNELS];
|
||||
} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_rsp_v3 - response to get NVM data
|
||||
* @general: general NVM data
|
||||
* @mac_sku: data relating to MAC sku
|
||||
* @phy_sku: data relating to PHY sku
|
||||
* @regulatory: regulatory data
|
||||
*/
|
||||
struct iwl_nvm_get_info_rsp_v3 {
|
||||
struct iwl_nvm_get_info_general general;
|
||||
struct iwl_nvm_get_info_sku mac_sku;
|
||||
struct iwl_nvm_get_info_phy phy_sku;
|
||||
struct iwl_nvm_get_info_regulatory_v1 regulatory;
|
||||
} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_rsp - response to get NVM data
|
||||
* @general: general NVM data
|
||||
@ -259,7 +286,7 @@ struct iwl_nvm_get_info_rsp {
|
||||
struct iwl_nvm_get_info_sku mac_sku;
|
||||
struct iwl_nvm_get_info_phy phy_sku;
|
||||
struct iwl_nvm_get_info_regulatory regulatory;
|
||||
} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
|
||||
} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -688,13 +688,6 @@ struct iwl_rx_mpdu_desc {
|
||||
|
||||
#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
|
||||
|
||||
#define IWL_CD_STTS_OPTIMIZED_POS 0
|
||||
#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
|
||||
#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
|
||||
#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
|
||||
#define IWL_CD_STTS_WIFI_STATUS_POS 4
|
||||
#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
|
||||
|
||||
#define RX_NO_DATA_CHAIN_A_POS 0
|
||||
#define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS)
|
||||
#define RX_NO_DATA_CHAIN_B_POS 8
|
||||
@ -747,62 +740,6 @@ struct iwl_rx_no_data {
|
||||
__le32 rx_vec[2];
|
||||
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
|
||||
* @IWL_CD_STTS_UNUSED: unused
|
||||
* @IWL_CD_STTS_UNUSED_2: unused
|
||||
* @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
|
||||
* In sniffer mode, when split is used, set in last CD completion. (RX)
|
||||
* @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
|
||||
* all CD completion. (RX)
|
||||
* @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
|
||||
* @IWL_CD_STTS_ERROR: general error (RX)
|
||||
*/
|
||||
enum iwl_completion_desc_transfer_status {
|
||||
IWL_CD_STTS_UNUSED,
|
||||
IWL_CD_STTS_UNUSED_2,
|
||||
IWL_CD_STTS_END_TRANSFER,
|
||||
IWL_CD_STTS_OVERFLOW,
|
||||
IWL_CD_STTS_ABORTED,
|
||||
IWL_CD_STTS_ERROR,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
|
||||
* @IWL_CD_STTS_VALID: the packet is valid (RX)
|
||||
* @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
|
||||
* @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
|
||||
* @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
|
||||
* @IWL_CD_STTS_DUP: duplicate packet (RX)
|
||||
* @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
|
||||
* @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
|
||||
* @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
|
||||
* @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
|
||||
* @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
|
||||
* @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
|
||||
* @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
|
||||
* @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
|
||||
* @IWL_CD_STTS_NOT_USED: completed but not used (RX)
|
||||
* @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
|
||||
*/
|
||||
enum iwl_completion_desc_wifi_status {
|
||||
IWL_CD_STTS_VALID,
|
||||
IWL_CD_STTS_FCS_ERR,
|
||||
IWL_CD_STTS_SEC_KEY_ERR,
|
||||
IWL_CD_STTS_DECRYPTION_ERR,
|
||||
IWL_CD_STTS_DUP,
|
||||
IWL_CD_STTS_ICV_MIC_ERR,
|
||||
IWL_CD_STTS_INTERNAL_SNAP_ERR,
|
||||
IWL_CD_STTS_SEC_PORT_FAIL,
|
||||
IWL_CD_STTS_BA_OLD_SN,
|
||||
IWL_CD_STTS_QOS_NULL,
|
||||
IWL_CD_STTS_MAC_HDR_ERR,
|
||||
IWL_CD_STTS_MAX_RETRANS,
|
||||
IWL_CD_STTS_EX_LIFETIME,
|
||||
IWL_CD_STTS_NOT_USED,
|
||||
IWL_CD_STTS_REPLAY_ERR,
|
||||
};
|
||||
|
||||
struct iwl_frame_release {
|
||||
u8 baid;
|
||||
u8 reserved;
|
||||
|
@ -788,7 +788,53 @@ struct iwl_umac_scan_complete {
|
||||
__le32 reserved;
|
||||
} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
|
||||
|
||||
#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
|
||||
#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 5
|
||||
#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 7
|
||||
|
||||
/**
|
||||
* struct iwl_scan_offload_profile_match_v1 - match information
|
||||
* @bssid: matched bssid
|
||||
* @reserved: reserved
|
||||
* @channel: channel where the match occurred
|
||||
* @energy: energy
|
||||
* @matching_feature: feature matches
|
||||
* @matching_channels: bitmap of channels that matched, referencing
|
||||
* the channels passed in the scan offload request.
|
||||
*/
|
||||
struct iwl_scan_offload_profile_match_v1 {
|
||||
u8 bssid[ETH_ALEN];
|
||||
__le16 reserved;
|
||||
u8 channel;
|
||||
u8 energy;
|
||||
u8 matching_feature;
|
||||
u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1];
|
||||
} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_scan_offload_profiles_query_v1 - match results query response
|
||||
* @matched_profiles: bitmap of matched profiles, referencing the
|
||||
* matches passed in the scan offload request
|
||||
* @last_scan_age: age of the last offloaded scan
|
||||
* @n_scans_done: number of offloaded scans done
|
||||
* @gp2_d0u: GP2 when D0U occurred
|
||||
* @gp2_invoked: GP2 when scan offload was invoked
|
||||
* @resume_while_scanning: not used
|
||||
* @self_recovery: obsolete
|
||||
* @reserved: reserved
|
||||
* @matches: array of match information, one for each match
|
||||
*/
|
||||
struct iwl_scan_offload_profiles_query_v1 {
|
||||
__le32 matched_profiles;
|
||||
__le32 last_scan_age;
|
||||
__le32 n_scans_done;
|
||||
__le32 gp2_d0u;
|
||||
__le32 gp2_invoked;
|
||||
u8 resume_while_scanning;
|
||||
u8 self_recovery;
|
||||
__le16 reserved;
|
||||
struct iwl_scan_offload_profile_match_v1 matches[IWL_SCAN_MAX_PROFILES];
|
||||
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_scan_offload_profile_match - match information
|
||||
* @bssid: matched bssid
|
||||
@ -797,7 +843,7 @@ struct iwl_umac_scan_complete {
|
||||
* @energy: energy
|
||||
* @matching_feature: feature matches
|
||||
* @matching_channels: bitmap of channels that matched, referencing
|
||||
* the channels passed in tue scan offload request
|
||||
* the channels passed in the scan offload request.
|
||||
*/
|
||||
struct iwl_scan_offload_profile_match {
|
||||
u8 bssid[ETH_ALEN];
|
||||
@ -806,7 +852,7 @@ struct iwl_scan_offload_profile_match {
|
||||
u8 energy;
|
||||
u8 matching_feature;
|
||||
u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
|
||||
} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
|
||||
} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_scan_offload_profiles_query - match results query response
|
||||
@ -831,7 +877,7 @@ struct iwl_scan_offload_profiles_query {
|
||||
u8 self_recovery;
|
||||
__le16 reserved;
|
||||
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
|
||||
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
|
||||
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
|
||||
|
||||
/**
|
||||
* struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
|
||||
|
@ -804,8 +804,8 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
static struct iwl_fw_error_dump_file *
|
||||
_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dump_ptrs *fw_error_dump)
|
||||
iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dump_ptrs *fw_error_dump)
|
||||
{
|
||||
struct iwl_fw_error_dump_file *dump_file;
|
||||
struct iwl_fw_error_dump_data *dump_data;
|
||||
@ -967,10 +967,11 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
if (fifo_len) {
|
||||
iwl_fw_dump_rxf(fwrt, &dump_data);
|
||||
iwl_fw_dump_txf(fwrt, &dump_data);
|
||||
if (radio_len)
|
||||
iwl_read_radio_regs(fwrt, &dump_data);
|
||||
}
|
||||
|
||||
if (radio_len)
|
||||
iwl_read_radio_regs(fwrt, &dump_data);
|
||||
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
fwrt->dump.desc) {
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
|
||||
@ -1049,14 +1050,14 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
__le32 *val = range->data;
|
||||
u32 addr, prph_val, offset = le32_to_cpu(reg->offset);
|
||||
u32 prph_val;
|
||||
u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
|
||||
int i;
|
||||
|
||||
range->start_addr = reg->start_addr[idx];
|
||||
range->start_addr = cpu_to_le64(addr);
|
||||
range->range_data_size = reg->internal.range_data_size;
|
||||
for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
|
||||
addr = le32_to_cpu(range->start_addr) + i;
|
||||
prph_val = iwl_read_prph(fwrt->trans, addr + offset);
|
||||
prph_val = iwl_read_prph(fwrt->trans, addr + i);
|
||||
if (prph_val == 0x5a5a5a5a)
|
||||
return -EBUSY;
|
||||
*val++ = cpu_to_le32(prph_val);
|
||||
@ -1071,16 +1072,13 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
__le32 *val = range->data;
|
||||
u32 addr, offset = le32_to_cpu(reg->offset);
|
||||
u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
|
||||
int i;
|
||||
|
||||
range->start_addr = reg->start_addr[idx];
|
||||
range->start_addr = cpu_to_le64(addr);
|
||||
range->range_data_size = reg->internal.range_data_size;
|
||||
for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
|
||||
addr = le32_to_cpu(range->start_addr) + i;
|
||||
*val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans,
|
||||
addr + offset));
|
||||
}
|
||||
for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4)
|
||||
*val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
|
||||
|
||||
return sizeof(*range) + le32_to_cpu(range->range_data_size);
|
||||
}
|
||||
@ -1090,12 +1088,11 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
|
||||
void *range_ptr, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
u32 addr = le32_to_cpu(range->start_addr);
|
||||
u32 offset = le32_to_cpu(reg->offset);
|
||||
u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
|
||||
|
||||
range->start_addr = reg->start_addr[idx];
|
||||
range->start_addr = cpu_to_le64(addr);
|
||||
range->range_data_size = reg->internal.range_data_size;
|
||||
iwl_trans_read_mem_bytes(fwrt->trans, addr + offset, range->data,
|
||||
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
|
||||
le32_to_cpu(reg->internal.range_data_size));
|
||||
|
||||
return sizeof(*range) + le32_to_cpu(range->range_data_size);
|
||||
@ -1109,7 +1106,7 @@ iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
u32 page_size = fwrt->trans->init_dram.paging[idx].size;
|
||||
|
||||
range->start_addr = cpu_to_le32(idx);
|
||||
range->start_addr = cpu_to_le64(idx);
|
||||
range->range_data_size = cpu_to_le32(page_size);
|
||||
memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
|
||||
page_size);
|
||||
@ -1129,7 +1126,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
|
||||
dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
|
||||
u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
|
||||
|
||||
range->start_addr = cpu_to_le32(idx);
|
||||
range->start_addr = cpu_to_le64(idx);
|
||||
range->range_data_size = cpu_to_le32(page_size);
|
||||
dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
@ -1152,7 +1149,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
|
||||
if (start_addr == 0x5a5a5a5a)
|
||||
return -EBUSY;
|
||||
|
||||
range->start_addr = cpu_to_le32(start_addr);
|
||||
range->start_addr = cpu_to_le64(start_addr);
|
||||
range->range_data_size = cpu_to_le32(fwrt->trans->fw_mon[idx].size);
|
||||
|
||||
memcpy(range->data, fwrt->trans->fw_mon[idx].block,
|
||||
@ -1228,10 +1225,11 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
|
||||
struct iwl_ini_txf_iter_data *iter;
|
||||
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
|
||||
u32 offs = le32_to_cpu(reg->offset), addr;
|
||||
u32 registers_size =
|
||||
le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
|
||||
__le32 *val = range->data;
|
||||
le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
|
||||
__le32 *data;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
@ -1249,11 +1247,18 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
|
||||
|
||||
/* read txf registers */
|
||||
/*
|
||||
* read txf registers. for each register, write to the dump the
|
||||
* register address and its value
|
||||
*/
|
||||
for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
|
||||
addr = le32_to_cpu(reg->start_addr[i]) + offs;
|
||||
|
||||
*val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
|
||||
reg_dump->addr = cpu_to_le32(addr);
|
||||
reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
|
||||
addr));
|
||||
|
||||
reg_dump++;
|
||||
}
|
||||
|
||||
if (reg->fifos.header_only) {
|
||||
@ -1270,8 +1275,9 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
/* Read FIFO */
|
||||
addr = TXF_READ_MODIFY_DATA + offs;
|
||||
for (i = 0; i < iter->fifo_size; i += sizeof(__le32))
|
||||
*val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
|
||||
data = (void *)reg_dump;
|
||||
for (i = 0; i < iter->fifo_size; i += sizeof(*data))
|
||||
*data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
|
||||
|
||||
out:
|
||||
iwl_trans_release_nic_access(fwrt->trans, &flags);
|
||||
@ -1327,10 +1333,11 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
|
||||
struct iwl_ini_rxf_data rxf_data;
|
||||
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
|
||||
u32 offs = le32_to_cpu(reg->offset), addr;
|
||||
u32 registers_size =
|
||||
le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
|
||||
__le32 *val = range->data;
|
||||
le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
|
||||
__le32 *data;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
@ -1341,17 +1348,22 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
|
||||
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
|
||||
return -EBUSY;
|
||||
|
||||
offs += rxf_data.offset;
|
||||
|
||||
range->fifo_num = cpu_to_le32(rxf_data.fifo_num);
|
||||
range->num_of_registers = reg->fifos.num_of_registers;
|
||||
range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
|
||||
|
||||
/* read rxf registers */
|
||||
/*
|
||||
* read rxf registers. for each register, write to the dump the
|
||||
* register address and its value
|
||||
*/
|
||||
for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
|
||||
addr = le32_to_cpu(reg->start_addr[i]) + offs;
|
||||
|
||||
*val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
|
||||
reg_dump->addr = cpu_to_le32(addr);
|
||||
reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
|
||||
addr));
|
||||
|
||||
reg_dump++;
|
||||
}
|
||||
|
||||
if (reg->fifos.header_only) {
|
||||
@ -1359,6 +1371,12 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* region register have absolute value so apply rxf offset after
|
||||
* reading the registers
|
||||
*/
|
||||
offs += rxf_data.offset;
|
||||
|
||||
/* Lock fence */
|
||||
iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
|
||||
/* Set fence pointer to the same place like WR pointer */
|
||||
@ -1369,8 +1387,9 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
/* Read FIFO */
|
||||
addr = RXF_FIFO_RD_FENCE_INC + offs;
|
||||
for (i = 0; i < rxf_data.size; i += sizeof(__le32))
|
||||
*val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
|
||||
data = (void *)reg_dump;
|
||||
for (i = 0; i < rxf_data.size; i += sizeof(*data))
|
||||
*data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
|
||||
|
||||
out:
|
||||
iwl_trans_release_nic_access(fwrt->trans, &flags);
|
||||
@ -1384,32 +1403,86 @@ static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
struct iwl_fw_ini_error_dump *dump = data;
|
||||
|
||||
dump->header.version = cpu_to_le32(IWL_INI_DUMP_MEM_VER);
|
||||
|
||||
return dump->ranges;
|
||||
}
|
||||
|
||||
static void
|
||||
*iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_region_cfg *reg,
|
||||
struct iwl_fw_ini_monitor_dump *data,
|
||||
u32 write_ptr_addr, u32 write_ptr_msk,
|
||||
u32 cycle_cnt_addr, u32 cycle_cnt_msk)
|
||||
{
|
||||
u32 write_ptr, cycle_cnt;
|
||||
unsigned long flags;
|
||||
|
||||
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
|
||||
IWL_ERR(fwrt, "Failed to get monitor header\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
write_ptr = iwl_read_prph_no_grab(fwrt->trans, write_ptr_addr);
|
||||
cycle_cnt = iwl_read_prph_no_grab(fwrt->trans, cycle_cnt_addr);
|
||||
|
||||
iwl_trans_release_nic_access(fwrt->trans, &flags);
|
||||
|
||||
data->header.version = cpu_to_le32(IWL_INI_DUMP_MONITOR_VER);
|
||||
data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk);
|
||||
data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk);
|
||||
|
||||
return data->ranges;
|
||||
}
|
||||
|
||||
static void
|
||||
*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_region_cfg *reg,
|
||||
void *data)
|
||||
{
|
||||
struct iwl_fw_ini_monitor_dram_dump *mon_dump = (void *)data;
|
||||
u32 write_ptr, cycle_cnt;
|
||||
unsigned long flags;
|
||||
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
|
||||
u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk;
|
||||
|
||||
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
|
||||
IWL_ERR(fwrt, "Failed to get DRAM monitor header\n");
|
||||
switch (fwrt->trans->cfg->device_family) {
|
||||
case IWL_DEVICE_FAMILY_9000:
|
||||
case IWL_DEVICE_FAMILY_22000:
|
||||
write_ptr_addr = MON_BUFF_WRPTR_VER2;
|
||||
write_ptr_msk = -1;
|
||||
cycle_cnt_addr = MON_BUFF_CYCLE_CNT_VER2;
|
||||
cycle_cnt_msk = -1;
|
||||
break;
|
||||
default:
|
||||
IWL_ERR(fwrt, "Unsupported device family %d\n",
|
||||
fwrt->trans->cfg->device_family);
|
||||
return NULL;
|
||||
}
|
||||
write_ptr = iwl_read_umac_prph_no_grab(fwrt->trans,
|
||||
MON_BUFF_WRPTR_VER2);
|
||||
cycle_cnt = iwl_read_umac_prph_no_grab(fwrt->trans,
|
||||
MON_BUFF_CYCLE_CNT_VER2);
|
||||
iwl_trans_release_nic_access(fwrt->trans, &flags);
|
||||
|
||||
mon_dump->write_ptr = cpu_to_le32(write_ptr);
|
||||
mon_dump->cycle_cnt = cpu_to_le32(cycle_cnt);
|
||||
return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, write_ptr_addr,
|
||||
write_ptr_msk, cycle_cnt_addr,
|
||||
cycle_cnt_msk);
|
||||
}
|
||||
|
||||
static void
|
||||
*iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_region_cfg *reg,
|
||||
void *data)
|
||||
{
|
||||
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
|
||||
const struct iwl_cfg *cfg = fwrt->trans->cfg;
|
||||
|
||||
if (fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
|
||||
fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
|
||||
IWL_ERR(fwrt, "Unsupported device family %d\n",
|
||||
fwrt->trans->cfg->device_family);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump,
|
||||
cfg->fw_mon_smem_write_ptr_addr,
|
||||
cfg->fw_mon_smem_write_ptr_msk,
|
||||
cfg->fw_mon_smem_cycle_cnt_ptr_addr,
|
||||
cfg->fw_mon_smem_cycle_cnt_ptr_msk);
|
||||
|
||||
return mon_dump->ranges;
|
||||
}
|
||||
|
||||
static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
@ -1418,6 +1491,8 @@ static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
struct iwl_fw_ini_fifo_error_dump *dump = data;
|
||||
|
||||
dump->header.version = cpu_to_le32(IWL_INI_DUMP_FIFO_VER);
|
||||
|
||||
return dump->ranges;
|
||||
}
|
||||
|
||||
@ -1509,7 +1584,8 @@ static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
|
||||
static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_region_cfg *reg)
|
||||
{
|
||||
u32 size = sizeof(struct iwl_fw_ini_monitor_dram_dump);
|
||||
u32 size = sizeof(struct iwl_fw_ini_monitor_dump) +
|
||||
sizeof(struct iwl_fw_ini_error_dump_range);
|
||||
|
||||
if (fwrt->trans->num_blocks)
|
||||
size += fwrt->trans->fw_mon[0].size;
|
||||
@ -1517,6 +1593,15 @@ static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
|
||||
return size;
|
||||
}
|
||||
|
||||
static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_region_cfg *reg)
|
||||
{
|
||||
return sizeof(struct iwl_fw_ini_monitor_dump) +
|
||||
iwl_dump_ini_mem_ranges(fwrt, reg) *
|
||||
(sizeof(struct iwl_fw_ini_error_dump_range) +
|
||||
le32_to_cpu(reg->internal.range_data_size));
|
||||
}
|
||||
|
||||
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_region_cfg *reg)
|
||||
{
|
||||
@ -1524,7 +1609,7 @@ static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
|
||||
void *fifo_iter = fwrt->dump.fifo_iter;
|
||||
u32 size = 0;
|
||||
u32 fifo_hdr = sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
|
||||
le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
|
||||
le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2;
|
||||
|
||||
fwrt->dump.fifo_iter = &iter;
|
||||
while (iwl_ini_txf_iter(fwrt, reg)) {
|
||||
@ -1547,7 +1632,7 @@ static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_ini_rxf_data rx_data;
|
||||
u32 size = sizeof(struct iwl_fw_ini_fifo_error_dump) +
|
||||
sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
|
||||
le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
|
||||
le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2;
|
||||
|
||||
if (reg->fifos.header_only)
|
||||
return size;
|
||||
@ -1584,17 +1669,17 @@ struct iwl_dump_ini_mem_ops {
|
||||
* @fwrt: fw runtime struct.
|
||||
* @data: dump memory data.
|
||||
* @reg: region to copy to the dump.
|
||||
* @ops: memory dump operations.
|
||||
*/
|
||||
static void
|
||||
iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_region_type type,
|
||||
struct iwl_fw_error_dump_data **data,
|
||||
struct iwl_fw_ini_region_cfg *reg,
|
||||
struct iwl_dump_ini_mem_ops *ops)
|
||||
{
|
||||
struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data;
|
||||
u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type);
|
||||
void *range;
|
||||
u32 num_of_ranges, i;
|
||||
|
||||
if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size ||
|
||||
!ops->fill_mem_hdr || !ops->fill_range))
|
||||
@ -1605,6 +1690,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
||||
(*data)->type = cpu_to_le32(type | INI_DUMP_BIT);
|
||||
(*data)->len = cpu_to_le32(ops->get_size(fwrt, reg));
|
||||
|
||||
header->region_id = reg->region_id;
|
||||
header->num_of_ranges = cpu_to_le32(num_of_ranges);
|
||||
header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME,
|
||||
le32_to_cpu(reg->name_len)));
|
||||
@ -1643,7 +1729,6 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
|
||||
for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) {
|
||||
u32 reg_id = le32_to_cpu(trigger->data[i]);
|
||||
struct iwl_fw_ini_region_cfg *reg;
|
||||
enum iwl_fw_ini_region_type type;
|
||||
|
||||
if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
|
||||
continue;
|
||||
@ -1652,13 +1737,11 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
|
||||
if (WARN(!reg, "Unassigned region %d\n", reg_id))
|
||||
continue;
|
||||
|
||||
type = le32_to_cpu(reg->region_type);
|
||||
switch (type) {
|
||||
switch (le32_to_cpu(reg->region_type)) {
|
||||
case IWL_FW_INI_REGION_DEVICE_MEMORY:
|
||||
case IWL_FW_INI_REGION_PERIPHERY_MAC:
|
||||
case IWL_FW_INI_REGION_PERIPHERY_PHY:
|
||||
case IWL_FW_INI_REGION_PERIPHERY_AUX:
|
||||
case IWL_FW_INI_REGION_INTERNAL_BUFFER:
|
||||
case IWL_FW_INI_REGION_CSR:
|
||||
size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg);
|
||||
break;
|
||||
@ -1668,7 +1751,7 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
|
||||
case IWL_FW_INI_REGION_RXF:
|
||||
size += hdr_len + iwl_dump_ini_rxf_get_size(fwrt, reg);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_PAGING: {
|
||||
case IWL_FW_INI_REGION_PAGING:
|
||||
size += hdr_len;
|
||||
if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
|
||||
size += iwl_dump_ini_paging_get_size(fwrt, reg);
|
||||
@ -1677,13 +1760,16 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
|
||||
reg);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IWL_FW_INI_REGION_DRAM_BUFFER:
|
||||
if (!fwrt->trans->num_blocks)
|
||||
break;
|
||||
size += hdr_len +
|
||||
iwl_dump_ini_mon_dram_get_size(fwrt, reg);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_INTERNAL_BUFFER:
|
||||
size += hdr_len +
|
||||
iwl_dump_ini_mon_smem_get_size(fwrt, reg);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_DRAM_IMR:
|
||||
/* Undefined yet */
|
||||
default:
|
||||
@ -1701,7 +1787,6 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
u32 reg_id = le32_to_cpu(trigger->data[i]);
|
||||
enum iwl_fw_ini_region_type type;
|
||||
struct iwl_fw_ini_region_cfg *reg;
|
||||
struct iwl_dump_ini_mem_ops ops;
|
||||
|
||||
@ -1713,15 +1798,17 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
|
||||
if (!reg)
|
||||
continue;
|
||||
|
||||
type = le32_to_cpu(reg->region_type);
|
||||
switch (type) {
|
||||
/* currently the driver supports always on domain only */
|
||||
if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
|
||||
continue;
|
||||
|
||||
switch (le32_to_cpu(reg->region_type)) {
|
||||
case IWL_FW_INI_REGION_DEVICE_MEMORY:
|
||||
case IWL_FW_INI_REGION_INTERNAL_BUFFER:
|
||||
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
|
||||
ops.get_size = iwl_dump_ini_mem_get_size;
|
||||
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
|
||||
ops.fill_range = iwl_dump_ini_dev_mem_iter;
|
||||
iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
|
||||
iwl_dump_ini_mem(fwrt, data, reg, &ops);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_PERIPHERY_MAC:
|
||||
case IWL_FW_INI_REGION_PERIPHERY_PHY:
|
||||
@ -1730,16 +1817,23 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
|
||||
ops.get_size = iwl_dump_ini_mem_get_size;
|
||||
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
|
||||
ops.fill_range = iwl_dump_ini_prph_iter;
|
||||
iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
|
||||
iwl_dump_ini_mem(fwrt, data, reg, &ops);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_DRAM_BUFFER:
|
||||
ops.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges;
|
||||
ops.get_size = iwl_dump_ini_mon_dram_get_size;
|
||||
ops.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header;
|
||||
ops.fill_range = iwl_dump_ini_mon_dram_iter;
|
||||
iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
|
||||
iwl_dump_ini_mem(fwrt, data, reg, &ops);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_PAGING: {
|
||||
case IWL_FW_INI_REGION_INTERNAL_BUFFER:
|
||||
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
|
||||
ops.get_size = iwl_dump_ini_mon_smem_get_size;
|
||||
ops.fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header;
|
||||
ops.fill_range = iwl_dump_ini_dev_mem_iter;
|
||||
iwl_dump_ini_mem(fwrt, data, reg, &ops);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_PAGING:
|
||||
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
|
||||
if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
|
||||
ops.get_num_of_ranges =
|
||||
@ -1754,9 +1848,8 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
|
||||
ops.fill_range = iwl_dump_ini_paging_gen2_iter;
|
||||
}
|
||||
|
||||
iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
|
||||
iwl_dump_ini_mem(fwrt, data, reg, &ops);
|
||||
break;
|
||||
}
|
||||
case IWL_FW_INI_REGION_TXF: {
|
||||
struct iwl_ini_txf_iter_data iter = { .init = true };
|
||||
void *fifo_iter = fwrt->dump.fifo_iter;
|
||||
@ -1766,7 +1859,7 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
|
||||
ops.get_size = iwl_dump_ini_txf_get_size;
|
||||
ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
|
||||
ops.fill_range = iwl_dump_ini_txf_iter;
|
||||
iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
|
||||
iwl_dump_ini_mem(fwrt, data, reg, &ops);
|
||||
fwrt->dump.fifo_iter = fifo_iter;
|
||||
break;
|
||||
}
|
||||
@ -1775,14 +1868,14 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
|
||||
ops.get_size = iwl_dump_ini_rxf_get_size;
|
||||
ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
|
||||
ops.fill_range = iwl_dump_ini_rxf_iter;
|
||||
iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
|
||||
iwl_dump_ini_mem(fwrt, data, reg, &ops);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_CSR:
|
||||
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
|
||||
ops.get_size = iwl_dump_ini_mem_get_size;
|
||||
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
|
||||
ops.fill_range = iwl_dump_ini_csr_iter;
|
||||
iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
|
||||
iwl_dump_ini_mem(fwrt, data, reg, &ops);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_DRAM_IMR:
|
||||
/* This is undefined yet */
|
||||
@ -1793,16 +1886,13 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
static struct iwl_fw_error_dump_file *
|
||||
_iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dump_ptrs *fw_error_dump)
|
||||
iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type);
|
||||
int size;
|
||||
struct iwl_fw_error_dump_data *dump_data;
|
||||
struct iwl_fw_error_dump_file *dump_file;
|
||||
struct iwl_fw_ini_trigger *trigger;
|
||||
|
||||
if (id == FW_DBG_TRIGGER_FW_ASSERT)
|
||||
id = IWL_FW_TRIGGER_ID_FW_ASSERT;
|
||||
enum iwl_fw_ini_trigger_id id = fwrt->dump.ini_trig_id;
|
||||
|
||||
if (!iwl_fw_ini_trigger_on(fwrt, id))
|
||||
return NULL;
|
||||
@ -1819,8 +1909,6 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
|
||||
if (!dump_file)
|
||||
return NULL;
|
||||
|
||||
fw_error_dump->fwrt_ptr = dump_file;
|
||||
|
||||
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
|
||||
dump_data = (void *)dump_file->data;
|
||||
dump_file->file_len = cpu_to_le32(size);
|
||||
@ -1830,47 +1918,27 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
|
||||
return dump_file;
|
||||
}
|
||||
|
||||
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
||||
static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_fw_dump_ptrs *fw_error_dump;
|
||||
struct iwl_fw_dump_ptrs fw_error_dump = {};
|
||||
struct iwl_fw_error_dump_file *dump_file;
|
||||
struct scatterlist *sg_dump_data;
|
||||
u32 file_len;
|
||||
u32 dump_mask = fwrt->fw->dbg.dump_mask;
|
||||
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
|
||||
|
||||
/* there's no point in fw dump if the bus is dead */
|
||||
if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
|
||||
IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
|
||||
dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump);
|
||||
if (!dump_file)
|
||||
goto out;
|
||||
}
|
||||
|
||||
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
|
||||
if (!fw_error_dump)
|
||||
goto out;
|
||||
|
||||
if (fwrt->trans->ini_valid)
|
||||
dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump);
|
||||
else
|
||||
dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
|
||||
|
||||
if (!dump_file) {
|
||||
kfree(fw_error_dump);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only)
|
||||
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
|
||||
|
||||
if (!fwrt->trans->ini_valid)
|
||||
fw_error_dump->trans_ptr =
|
||||
iwl_trans_dump_data(fwrt->trans, dump_mask);
|
||||
|
||||
fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
|
||||
file_len = le32_to_cpu(dump_file->file_len);
|
||||
fw_error_dump->fwrt_len = file_len;
|
||||
if (fw_error_dump->trans_ptr) {
|
||||
file_len += fw_error_dump->trans_ptr->len;
|
||||
fw_error_dump.fwrt_len = file_len;
|
||||
|
||||
if (fw_error_dump.trans_ptr) {
|
||||
file_len += fw_error_dump.trans_ptr->len;
|
||||
dump_file->file_len = cpu_to_le32(file_len);
|
||||
}
|
||||
|
||||
@ -1878,27 +1946,49 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
||||
if (sg_dump_data) {
|
||||
sg_pcopy_from_buffer(sg_dump_data,
|
||||
sg_nents(sg_dump_data),
|
||||
fw_error_dump->fwrt_ptr,
|
||||
fw_error_dump->fwrt_len, 0);
|
||||
if (fw_error_dump->trans_ptr)
|
||||
fw_error_dump.fwrt_ptr,
|
||||
fw_error_dump.fwrt_len, 0);
|
||||
if (fw_error_dump.trans_ptr)
|
||||
sg_pcopy_from_buffer(sg_dump_data,
|
||||
sg_nents(sg_dump_data),
|
||||
fw_error_dump->trans_ptr->data,
|
||||
fw_error_dump->trans_ptr->len,
|
||||
fw_error_dump->fwrt_len);
|
||||
fw_error_dump.trans_ptr->data,
|
||||
fw_error_dump.trans_ptr->len,
|
||||
fw_error_dump.fwrt_len);
|
||||
dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
vfree(fw_error_dump->fwrt_ptr);
|
||||
vfree(fw_error_dump->trans_ptr);
|
||||
kfree(fw_error_dump);
|
||||
vfree(fw_error_dump.fwrt_ptr);
|
||||
vfree(fw_error_dump.trans_ptr);
|
||||
|
||||
out:
|
||||
iwl_fw_free_dump_desc(fwrt);
|
||||
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
|
||||
|
||||
static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_fw_error_dump_file *dump_file;
|
||||
struct scatterlist *sg_dump_data;
|
||||
u32 file_len;
|
||||
|
||||
dump_file = iwl_fw_error_ini_dump_file(fwrt);
|
||||
if (!dump_file)
|
||||
goto out;
|
||||
|
||||
file_len = le32_to_cpu(dump_file->file_len);
|
||||
|
||||
sg_dump_data = alloc_sgtable(file_len);
|
||||
if (sg_dump_data) {
|
||||
sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data),
|
||||
dump_file, file_len, 0);
|
||||
dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
vfree(dump_file);
|
||||
out:
|
||||
fwrt->dump.ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
|
||||
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
|
||||
}
|
||||
|
||||
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
|
||||
.trig_desc = {
|
||||
@ -1912,6 +2002,17 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
||||
bool monitor_only,
|
||||
unsigned int delay)
|
||||
{
|
||||
u32 trig_type = le32_to_cpu(desc->trig_desc.type);
|
||||
int ret;
|
||||
|
||||
if (fwrt->trans->ini_valid) {
|
||||
ret = iwl_fw_dbg_ini_collect(fwrt, trig_type);
|
||||
if (!ret)
|
||||
iwl_fw_free_dump_desc(fwrt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
|
||||
return -EBUSY;
|
||||
|
||||
@ -1953,10 +2054,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
|
||||
|
||||
int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_dbg_trigger trig,
|
||||
const char *str, size_t len,
|
||||
struct iwl_fw_dbg_trigger_tlv *trigger)
|
||||
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_dbg_trigger trig,
|
||||
const char *str, size_t len,
|
||||
struct iwl_fw_dbg_trigger_tlv *trigger)
|
||||
{
|
||||
struct iwl_fw_dump_desc *desc;
|
||||
unsigned int delay = 0;
|
||||
@ -1993,50 +2094,64 @@ int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(_iwl_fw_dbg_collect);
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
|
||||
|
||||
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
u32 id, const char *str, size_t len)
|
||||
int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_trigger_id id)
|
||||
{
|
||||
struct iwl_fw_dump_desc *desc;
|
||||
struct iwl_fw_ini_active_triggers *active;
|
||||
u32 occur, delay;
|
||||
|
||||
if (!fwrt->trans->ini_valid)
|
||||
return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL);
|
||||
|
||||
if (id == FW_DBG_TRIGGER_USER)
|
||||
id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
|
||||
|
||||
active = &fwrt->dump.active_trigs[id];
|
||||
|
||||
if (WARN_ON(!active->active))
|
||||
if (WARN_ON(!iwl_fw_ini_trigger_on(fwrt, id)))
|
||||
return -EINVAL;
|
||||
|
||||
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
|
||||
return -EBUSY;
|
||||
|
||||
active = &fwrt->dump.active_trigs[id];
|
||||
delay = le32_to_cpu(active->trig->dump_delay);
|
||||
occur = le32_to_cpu(active->trig->occurrences);
|
||||
if (!occur)
|
||||
return 0;
|
||||
|
||||
active->trig->occurrences = cpu_to_le32(--occur);
|
||||
|
||||
if (le32_to_cpu(active->trig->force_restart)) {
|
||||
IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
|
||||
iwl_force_nmi(fwrt->trans);
|
||||
return 0;
|
||||
}
|
||||
|
||||
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
fwrt->dump.ini_trig_id = id;
|
||||
|
||||
active->trig->occurrences = cpu_to_le32(--occur);
|
||||
IWL_WARN(fwrt, "Collecting data: ini trigger %d fired.\n", id);
|
||||
|
||||
desc->len = len;
|
||||
desc->trig_desc.type = cpu_to_le32(id);
|
||||
memcpy(desc->trig_desc.data, str, len);
|
||||
schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay));
|
||||
|
||||
return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay);
|
||||
return 0;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
|
||||
IWL_EXPORT_SYMBOL(_iwl_fw_dbg_ini_collect);
|
||||
|
||||
int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id)
|
||||
{
|
||||
int id;
|
||||
|
||||
switch (legacy_trigger_id) {
|
||||
case FW_DBG_TRIGGER_FW_ASSERT:
|
||||
case FW_DBG_TRIGGER_ALIVE_TIMEOUT:
|
||||
case FW_DBG_TRIGGER_DRIVER:
|
||||
id = IWL_FW_TRIGGER_ID_FW_ASSERT;
|
||||
break;
|
||||
case FW_DBG_TRIGGER_USER:
|
||||
id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
|
||||
break;
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return _iwl_fw_dbg_ini_collect(fwrt, id);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_ini_collect);
|
||||
|
||||
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dbg_trigger_tlv *trigger,
|
||||
@ -2064,8 +2179,8 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
|
||||
len = strlen(buf) + 1;
|
||||
}
|
||||
|
||||
ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
|
||||
trigger);
|
||||
ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
|
||||
trigger);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -2139,9 +2254,20 @@ void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
|
||||
return;
|
||||
}
|
||||
|
||||
/* there's no point in fw dump if the bus is dead */
|
||||
if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
|
||||
IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_fw_dbg_stop_recording(fwrt, ¶ms);
|
||||
|
||||
iwl_fw_error_dump(fwrt);
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
|
||||
if (fwrt->trans->ini_valid)
|
||||
iwl_fw_error_ini_dump(fwrt);
|
||||
else
|
||||
iwl_fw_error_dump(fwrt);
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
|
||||
|
||||
/* start recording again if the firmware is not crashed */
|
||||
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
|
||||
@ -2285,6 +2411,10 @@ static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
|
||||
.data = { data->data, },
|
||||
};
|
||||
|
||||
/* currently the driver supports always on domain only */
|
||||
if (le32_to_cpu(hcmd_tlv->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
|
||||
return;
|
||||
|
||||
iwl_trans_send_cmd(fwrt->trans, &hcmd);
|
||||
}
|
||||
|
||||
|
@ -108,18 +108,17 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
|
||||
fwrt->dump.umac_err_id = 0;
|
||||
}
|
||||
|
||||
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
|
||||
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
||||
const struct iwl_fw_dump_desc *desc,
|
||||
bool monitor_only, unsigned int delay);
|
||||
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_dbg_trigger trig_type);
|
||||
int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_dbg_trigger trig,
|
||||
const char *str, size_t len,
|
||||
struct iwl_fw_dbg_trigger_tlv *trigger);
|
||||
int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_trigger_id id);
|
||||
int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id);
|
||||
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
u32 id, const char *str, size_t len);
|
||||
enum iwl_fw_dbg_trigger trig, const char *str,
|
||||
size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
|
||||
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dbg_trigger_tlv *trigger,
|
||||
const char *fmt, ...) __printf(3, 4);
|
||||
@ -229,10 +228,8 @@ iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_trigger *trig;
|
||||
u32 usec;
|
||||
|
||||
|
||||
|
||||
if (!fwrt->trans->ini_valid || id >= IWL_FW_TRIGGER_ID_NUM ||
|
||||
!fwrt->dump.active_trigs[id].active)
|
||||
if (!fwrt->trans->ini_valid || id == IWL_FW_TRIGGER_ID_INVALID ||
|
||||
id >= IWL_FW_TRIGGER_ID_NUM || !fwrt->dump.active_trigs[id].active)
|
||||
return false;
|
||||
|
||||
trig = fwrt->dump.active_trigs[id].trig;
|
||||
@ -461,4 +458,14 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
|
||||
/* This bit is used to differentiate the legacy dump from the ini dump */
|
||||
#define INI_DUMP_BIT BIT(31)
|
||||
|
||||
static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
if (fwrt->trans->ini_valid && fwrt->trans->hw_error) {
|
||||
_iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR);
|
||||
fwrt->trans->hw_error = false;
|
||||
} else {
|
||||
iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __iwl_fw_dbg_h__ */
|
||||
|
@ -211,6 +211,9 @@ struct iwl_fw_error_dump_info {
|
||||
* @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
|
||||
* @fw_mon_base_ptr: base pointer of the data
|
||||
* @fw_mon_cycle_cnt: number of wraparounds
|
||||
* @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit
|
||||
* so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold
|
||||
* MSB 32 bits
|
||||
* @reserved: for future use
|
||||
* @data: captured data
|
||||
*/
|
||||
@ -218,7 +221,8 @@ struct iwl_fw_error_dump_fw_mon {
|
||||
__le32 fw_mon_wr_ptr;
|
||||
__le32 fw_mon_base_ptr;
|
||||
__le32 fw_mon_cycle_cnt;
|
||||
__le32 reserved[3];
|
||||
__le32 fw_mon_base_high_ptr;
|
||||
__le32 reserved[2];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
@ -274,25 +278,33 @@ struct iwl_fw_error_dump_mem {
|
||||
u8 data[];
|
||||
};
|
||||
|
||||
#define IWL_INI_DUMP_MEM_VER 1
|
||||
#define IWL_INI_DUMP_MONITOR_VER 1
|
||||
#define IWL_INI_DUMP_FIFO_VER 1
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_error_dump_range - range of memory
|
||||
* @start_addr: the start address of this range
|
||||
* @range_data_size: the size of this range, in bytes
|
||||
* @start_addr: the start address of this range
|
||||
* @data: the actual memory
|
||||
*/
|
||||
struct iwl_fw_ini_error_dump_range {
|
||||
__le32 start_addr;
|
||||
__le32 range_data_size;
|
||||
__le64 start_addr;
|
||||
__le32 data[];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_error_dump_header - ini region dump header
|
||||
* @version: dump version
|
||||
* @region_id: id of the region
|
||||
* @num_of_ranges: number of ranges in this region
|
||||
* @name_len: number of bytes allocated to the name string of this region
|
||||
* @name: name of the region
|
||||
*/
|
||||
struct iwl_fw_ini_error_dump_header {
|
||||
__le32 version;
|
||||
__le32 region_id;
|
||||
__le32 num_of_ranges;
|
||||
__le32 name_len;
|
||||
u8 name[IWL_FW_INI_MAX_NAME];
|
||||
@ -311,13 +323,24 @@ struct iwl_fw_ini_error_dump {
|
||||
/* This bit is used to differentiate between lmac and umac rxf */
|
||||
#define IWL_RXF_UMAC_BIT BIT(31)
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_error_dump_register - ini register dump
|
||||
* @addr: address of the register
|
||||
* @data: data of the register
|
||||
*/
|
||||
struct iwl_fw_ini_error_dump_register {
|
||||
__le32 addr;
|
||||
__le32 data;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_fifo_error_dump_range - ini fifo range dump
|
||||
* @fifo_num: the fifo num. In case of rxf and umac rxf, set BIT(31) to
|
||||
* distinguish between lmac and umac
|
||||
* @num_of_registers: num of registers to dump, dword size each
|
||||
* @range_data_size: the size of the registers and fifo data
|
||||
* @data: fifo data
|
||||
* @range_data_size: the size of the data
|
||||
* @data: consist of
|
||||
* num_of_registers * (register address + register value) + fifo data
|
||||
*/
|
||||
struct iwl_fw_ini_fifo_error_dump_range {
|
||||
__le32 fifo_num;
|
||||
@ -351,13 +374,13 @@ struct iwl_fw_error_dump_rb {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_monitor_dram_dump - ini dram monitor dump
|
||||
* struct iwl_fw_ini_monitor_dump - ini monitor dump
|
||||
* @header - header of the region
|
||||
* @write_ptr - write pointer position in the dram
|
||||
* @write_ptr - write pointer position in the buffer
|
||||
* @cycle_cnt - cycles count
|
||||
* @ranges - the memory ranges of this this region
|
||||
*/
|
||||
struct iwl_fw_ini_monitor_dram_dump {
|
||||
struct iwl_fw_ini_monitor_dump {
|
||||
struct iwl_fw_ini_error_dump_header header;
|
||||
__le32 write_ptr;
|
||||
__le32 cycle_cnt;
|
||||
|
@ -272,8 +272,15 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
|
||||
* version of the beacon notification.
|
||||
* @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of
|
||||
* BEACON_FILTER_CONFIG_API_S_VER_4.
|
||||
* @IWL_UCODE_TLV_API_REGULATORY_NVM_INFO: This ucode supports v4 of
|
||||
* REGULATORY_NVM_GET_INFO_RSP_API_S.
|
||||
* @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of
|
||||
* LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S.
|
||||
* @IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS: This ucode supports v2 of
|
||||
* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S and v3 of
|
||||
* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
|
||||
* @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
|
||||
* STA_CONTEXT_DOT11AX_API_S
|
||||
*
|
||||
* @NUM_IWL_UCODE_TLV_API: number of bits used
|
||||
*/
|
||||
@ -300,7 +307,10 @@ enum iwl_ucode_tlv_api {
|
||||
IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45,
|
||||
IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46,
|
||||
IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47,
|
||||
IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = (__force iwl_ucode_tlv_api_t)48,
|
||||
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49,
|
||||
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50,
|
||||
IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
|
||||
|
||||
NUM_IWL_UCODE_TLV_API
|
||||
#ifdef __CHECKER__
|
||||
@ -350,6 +360,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
|
||||
* IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command
|
||||
* @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
|
||||
* (6 GHz).
|
||||
* @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command
|
||||
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
|
||||
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
|
||||
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
|
||||
@ -420,6 +431,7 @@ enum iwl_ucode_tlv_capa {
|
||||
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46,
|
||||
IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48,
|
||||
IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47,
|
||||
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
|
||||
|
||||
/* set 2 */
|
||||
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
|
||||
|
@ -145,6 +145,7 @@ struct iwl_fw_runtime {
|
||||
u32 lmac_err_id[MAX_NUM_LMAC];
|
||||
u32 umac_err_id;
|
||||
void *fifo_iter;
|
||||
enum iwl_fw_ini_trigger_id ini_trig_id;
|
||||
} dump;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
struct {
|
||||
|
@ -383,6 +383,9 @@ struct iwl_csr_params {
|
||||
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
|
||||
* @min_txq_size: minimum number of slots required in a TX queue
|
||||
* @umac_prph_offset: offset to add to UMAC periphery address
|
||||
* @uhb_supported: ultra high band channels supported
|
||||
* @min_256_ba_txq_size: minimum number of slots required in a TX queue which
|
||||
* supports 256 BA aggregation
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt. hardware features.
|
||||
* API differences in uCode shouldn't be handled here but through TLVs
|
||||
@ -433,7 +436,8 @@ struct iwl_cfg {
|
||||
gen2:1,
|
||||
cdb:1,
|
||||
dbgc_supported:1,
|
||||
bisr_workaround:1;
|
||||
bisr_workaround:1,
|
||||
uhb_supported:1;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
u8 non_shared_ant;
|
||||
@ -450,6 +454,12 @@ struct iwl_cfg {
|
||||
u32 d3_debug_data_length;
|
||||
u32 min_txq_size;
|
||||
u32 umac_prph_offset;
|
||||
u32 fw_mon_smem_write_ptr_addr;
|
||||
u32 fw_mon_smem_write_ptr_msk;
|
||||
u32 fw_mon_smem_cycle_cnt_ptr_addr;
|
||||
u32 fw_mon_smem_cycle_cnt_ptr_msk;
|
||||
u32 gp2_reg_addr;
|
||||
u32 min_256_ba_txq_size;
|
||||
};
|
||||
|
||||
extern const struct iwl_csr_params iwl_csr_v1;
|
||||
@ -573,6 +583,7 @@ extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0;
|
||||
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
|
||||
|
||||
#endif /* __IWL_CONFIG_H__ */
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -30,7 +30,7 @@
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -337,6 +337,7 @@ enum {
|
||||
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
|
||||
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
|
||||
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
|
||||
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
|
||||
|
||||
/* HW_RF CHIP ID */
|
||||
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
|
||||
|
@ -5,7 +5,7 @@
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright (C) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -28,7 +28,7 @@
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright (C) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -73,6 +73,9 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
|
||||
int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv);
|
||||
int offset_size = copy_size;
|
||||
|
||||
if (le32_to_cpu(header->tlv_version) != 1)
|
||||
return;
|
||||
|
||||
if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM,
|
||||
"Invalid apply point id %d\n", apply_point))
|
||||
return;
|
||||
@ -132,6 +135,9 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
|
||||
hdr = (void *)&tlv->data[0];
|
||||
apply = le32_to_cpu(hdr->apply_point);
|
||||
|
||||
if (le32_to_cpu(hdr->tlv_version) != 1)
|
||||
continue;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n",
|
||||
le32_to_cpu(tlv->type), apply);
|
||||
|
||||
|
@ -130,7 +130,7 @@ enum nvm_sku_bits {
|
||||
/*
|
||||
* These are the channel numbers in the order that they are stored in the NVM
|
||||
*/
|
||||
static const u8 iwl_nvm_channels[] = {
|
||||
static const u16 iwl_nvm_channels[] = {
|
||||
/* 2.4 GHz */
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
/* 5 GHz */
|
||||
@ -139,7 +139,7 @@ static const u8 iwl_nvm_channels[] = {
|
||||
149, 153, 157, 161, 165
|
||||
};
|
||||
|
||||
static const u8 iwl_ext_nvm_channels[] = {
|
||||
static const u16 iwl_ext_nvm_channels[] = {
|
||||
/* 2.4 GHz */
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
/* 5 GHz */
|
||||
@ -148,14 +148,27 @@ static const u8 iwl_ext_nvm_channels[] = {
|
||||
149, 153, 157, 161, 165, 169, 173, 177, 181
|
||||
};
|
||||
|
||||
static const u16 iwl_uhb_nvm_channels[] = {
|
||||
/* 2.4 GHz */
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
/* 5 GHz */
|
||||
36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
|
||||
96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
|
||||
149, 153, 157, 161, 165, 169, 173, 177, 181,
|
||||
/* 6-7 GHz */
|
||||
189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241,
|
||||
245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297,
|
||||
301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353,
|
||||
357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409,
|
||||
413, 417, 421
|
||||
};
|
||||
|
||||
#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
|
||||
#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
|
||||
#define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels)
|
||||
#define NUM_2GHZ_CHANNELS 14
|
||||
#define NUM_2GHZ_CHANNELS_EXT 14
|
||||
#define FIRST_2GHZ_HT_MINUS 5
|
||||
#define LAST_2GHZ_HT_PLUS 9
|
||||
#define LAST_5GHZ_HT 165
|
||||
#define LAST_5GHZ_HT_FAMILY_8000 181
|
||||
#define N_HW_ADDR_MASK 0xF
|
||||
|
||||
/* rate data (static) */
|
||||
@ -213,7 +226,7 @@ enum iwl_nvm_channel_flags {
|
||||
};
|
||||
|
||||
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
|
||||
int chan, u16 flags)
|
||||
int chan, u32 flags)
|
||||
{
|
||||
#define CHECK_AND_PRINT_I(x) \
|
||||
((flags & NVM_CHANNEL_##x) ? " " #x : "")
|
||||
@ -244,20 +257,16 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
|
||||
}
|
||||
|
||||
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
|
||||
u16 nvm_flags, const struct iwl_cfg *cfg)
|
||||
u32 nvm_flags, const struct iwl_cfg *cfg)
|
||||
{
|
||||
u32 flags = IEEE80211_CHAN_NO_HT40;
|
||||
u32 last_5ghz_ht = LAST_5GHZ_HT;
|
||||
|
||||
if (cfg->nvm_type == IWL_NVM_EXT)
|
||||
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
|
||||
|
||||
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if (ch_num <= LAST_2GHZ_HT_PLUS)
|
||||
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
|
||||
if (ch_num >= FIRST_2GHZ_HT_MINUS)
|
||||
flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
|
||||
} else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
} else if (nvm_flags & NVM_CHANNEL_40MHZ) {
|
||||
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
|
||||
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
|
||||
else
|
||||
@ -292,30 +301,36 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
|
||||
|
||||
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data,
|
||||
const __le16 * const nvm_ch_flags,
|
||||
u32 sbands_flags)
|
||||
const void * const nvm_ch_flags,
|
||||
u32 sbands_flags, bool v4)
|
||||
{
|
||||
int ch_idx;
|
||||
int n_channels = 0;
|
||||
struct ieee80211_channel *channel;
|
||||
u16 ch_flags;
|
||||
int num_of_ch, num_2ghz_channels;
|
||||
const u8 *nvm_chan;
|
||||
u32 ch_flags;
|
||||
int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS;
|
||||
const u16 *nvm_chan;
|
||||
|
||||
if (cfg->nvm_type != IWL_NVM_EXT) {
|
||||
num_of_ch = IWL_NVM_NUM_CHANNELS;
|
||||
nvm_chan = &iwl_nvm_channels[0];
|
||||
num_2ghz_channels = NUM_2GHZ_CHANNELS;
|
||||
} else {
|
||||
if (cfg->uhb_supported) {
|
||||
num_of_ch = IWL_NVM_NUM_CHANNELS_UHB;
|
||||
nvm_chan = iwl_uhb_nvm_channels;
|
||||
} else if (cfg->nvm_type == IWL_NVM_EXT) {
|
||||
num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
|
||||
nvm_chan = &iwl_ext_nvm_channels[0];
|
||||
num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
|
||||
nvm_chan = iwl_ext_nvm_channels;
|
||||
} else {
|
||||
num_of_ch = IWL_NVM_NUM_CHANNELS;
|
||||
nvm_chan = iwl_nvm_channels;
|
||||
}
|
||||
|
||||
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
|
||||
bool is_5ghz = (ch_idx >= num_2ghz_channels);
|
||||
|
||||
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
|
||||
if (v4)
|
||||
ch_flags =
|
||||
__le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx);
|
||||
else
|
||||
ch_flags =
|
||||
__le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
|
||||
|
||||
if (is_5ghz && !data->sku_cap_band_52ghz_enable)
|
||||
continue;
|
||||
@ -636,12 +651,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
|
||||
u8 tx_chains, u8 rx_chains)
|
||||
{
|
||||
if (sband->band == NL80211_BAND_2GHZ ||
|
||||
sband->band == NL80211_BAND_5GHZ)
|
||||
sband->iftype_data = iwl_he_capa;
|
||||
else
|
||||
return;
|
||||
|
||||
sband->iftype_data = iwl_he_capa;
|
||||
sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
|
||||
|
||||
/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
|
||||
@ -661,15 +671,15 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
|
||||
|
||||
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data,
|
||||
const __le16 *nvm_ch_flags, u8 tx_chains,
|
||||
u8 rx_chains, u32 sbands_flags)
|
||||
const void *nvm_ch_flags, u8 tx_chains,
|
||||
u8 rx_chains, u32 sbands_flags, bool v4)
|
||||
{
|
||||
int n_channels;
|
||||
int n_used = 0;
|
||||
struct ieee80211_supported_band *sband;
|
||||
|
||||
n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
|
||||
sbands_flags);
|
||||
sbands_flags, v4);
|
||||
sband = &data->bands[NL80211_BAND_2GHZ];
|
||||
sband->band = NL80211_BAND_2GHZ;
|
||||
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
|
||||
@ -1006,22 +1016,18 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
|
||||
|
||||
iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
|
||||
sbands_flags);
|
||||
sbands_flags, false);
|
||||
data->calib_version = 255;
|
||||
|
||||
return data;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
|
||||
|
||||
static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
|
||||
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
|
||||
int ch_idx, u16 nvm_flags,
|
||||
const struct iwl_cfg *cfg)
|
||||
{
|
||||
u32 flags = NL80211_RRF_NO_HT40;
|
||||
u32 last_5ghz_ht = LAST_5GHZ_HT;
|
||||
|
||||
if (cfg->nvm_type == IWL_NVM_EXT)
|
||||
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
|
||||
|
||||
if (ch_idx < NUM_2GHZ_CHANNELS &&
|
||||
(nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
@ -1029,8 +1035,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
|
||||
flags &= ~NL80211_RRF_NO_HT40PLUS;
|
||||
if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
|
||||
flags &= ~NL80211_RRF_NO_HT40MINUS;
|
||||
} else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
|
||||
(nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
} else if (nvm_flags & NVM_CHANNEL_40MHZ) {
|
||||
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
|
||||
flags &= ~NL80211_RRF_NO_HT40PLUS;
|
||||
else
|
||||
@ -1074,18 +1079,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int ch_idx;
|
||||
u16 ch_flags;
|
||||
u32 reg_rule_flags, prev_reg_rule_flags = 0;
|
||||
const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
|
||||
iwl_ext_nvm_channels : iwl_nvm_channels;
|
||||
const u16 *nvm_chan;
|
||||
struct ieee80211_regdomain *regd, *copy_rd;
|
||||
int size_of_regd, regd_to_copy;
|
||||
struct ieee80211_reg_rule *rule;
|
||||
struct regdb_ptrs *regdb_ptrs;
|
||||
enum nl80211_band band;
|
||||
int center_freq, prev_center_freq = 0;
|
||||
int valid_rules = 0;
|
||||
bool new_rule;
|
||||
int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
|
||||
IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
|
||||
int max_num_ch;
|
||||
|
||||
if (cfg->uhb_supported) {
|
||||
max_num_ch = IWL_NVM_NUM_CHANNELS_UHB;
|
||||
nvm_chan = iwl_uhb_nvm_channels;
|
||||
} else if (cfg->nvm_type == IWL_NVM_EXT) {
|
||||
max_num_ch = IWL_NVM_NUM_CHANNELS_EXT;
|
||||
nvm_chan = iwl_ext_nvm_channels;
|
||||
} else {
|
||||
max_num_ch = IWL_NVM_NUM_CHANNELS;
|
||||
nvm_chan = iwl_nvm_channels;
|
||||
}
|
||||
|
||||
if (WARN_ON(num_of_ch > max_num_ch))
|
||||
num_of_ch = max_num_ch;
|
||||
@ -1097,11 +1110,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
num_of_ch);
|
||||
|
||||
/* build a regdomain rule for every valid channel */
|
||||
size_of_regd =
|
||||
sizeof(struct ieee80211_regdomain) +
|
||||
num_of_ch * sizeof(struct ieee80211_reg_rule);
|
||||
|
||||
regd = kzalloc(size_of_regd, GFP_KERNEL);
|
||||
regd = kzalloc(struct_size(regd, reg_rules, num_of_ch), GFP_KERNEL);
|
||||
if (!regd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -1177,14 +1186,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
* Narrow down regdom for unused regulatory rules to prevent hole
|
||||
* between reg rules to wmm rules.
|
||||
*/
|
||||
regd_to_copy = sizeof(struct ieee80211_regdomain) +
|
||||
valid_rules * sizeof(struct ieee80211_reg_rule);
|
||||
|
||||
copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
|
||||
if (!copy_rd) {
|
||||
copy_rd = kmemdup(regd, struct_size(regd, reg_rules, valid_rules),
|
||||
GFP_KERNEL);
|
||||
if (!copy_rd)
|
||||
copy_rd = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(regdb_ptrs);
|
||||
@ -1393,7 +1398,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
||||
const struct iwl_fw *fw)
|
||||
{
|
||||
struct iwl_nvm_get_info cmd = {};
|
||||
struct iwl_nvm_get_info_rsp *rsp;
|
||||
struct iwl_nvm_data *nvm;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
|
||||
@ -1408,12 +1412,24 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
||||
bool empty_otp;
|
||||
u32 mac_flags;
|
||||
u32 sbands_flags = 0;
|
||||
/*
|
||||
* All the values in iwl_nvm_get_info_rsp v4 are the same as
|
||||
* in v3, except for the channel profile part of the
|
||||
* regulatory. So we can just access the new struct, with the
|
||||
* exception of the latter.
|
||||
*/
|
||||
struct iwl_nvm_get_info_rsp *rsp;
|
||||
struct iwl_nvm_get_info_rsp_v3 *rsp_v3;
|
||||
bool v4 = fw_has_api(&fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_REGULATORY_NVM_INFO);
|
||||
size_t rsp_size = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
|
||||
void *channel_profile;
|
||||
|
||||
ret = iwl_trans_send_cmd(trans, &hcmd);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
|
||||
if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != rsp_size,
|
||||
"Invalid payload len in NVM response from FW %d",
|
||||
iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
|
||||
ret = -EINVAL;
|
||||
@ -1475,11 +1491,15 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
||||
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
|
||||
}
|
||||
|
||||
rsp_v3 = (void *)rsp;
|
||||
channel_profile = v4 ? (void *)rsp->regulatory.channel_profile :
|
||||
(void *)rsp_v3->regulatory.channel_profile;
|
||||
|
||||
iwl_init_sbands(trans->dev, trans->cfg, nvm,
|
||||
rsp->regulatory.channel_profile,
|
||||
nvm->valid_tx_ant & fw->valid_tx_ant,
|
||||
nvm->valid_rx_ant & fw->valid_rx_ant,
|
||||
sbands_flags);
|
||||
sbands_flags, v4);
|
||||
|
||||
iwl_free_resp(&hcmd);
|
||||
return nvm;
|
||||
|
@ -368,6 +368,12 @@
|
||||
#define MON_BUFF_WRPTR_VER2 (0xa03c24)
|
||||
#define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28)
|
||||
#define MON_BUFF_SHIFT_VER2 (0x8)
|
||||
/* FW monitor familiy AX210 and on */
|
||||
#define DBGC_CUR_DBGBUF_BASE_ADDR_LSB (0xd03c20)
|
||||
#define DBGC_CUR_DBGBUF_BASE_ADDR_MSB (0xd03c24)
|
||||
#define DBGC_CUR_DBGBUF_STATUS (0xd03c1c)
|
||||
#define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c)
|
||||
#define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff)
|
||||
|
||||
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
|
||||
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
|
||||
|
@ -274,7 +274,6 @@ struct iwl_rx_cmd_buffer {
|
||||
bool _page_stolen;
|
||||
u32 _rx_page_order;
|
||||
unsigned int truesize;
|
||||
u8 status;
|
||||
};
|
||||
|
||||
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
|
||||
@ -768,6 +767,7 @@ struct iwl_self_init_dram {
|
||||
* @umac_error_event_table: addr of umac error table
|
||||
* @error_event_table_tlv_status: bitmap that indicates what error table
|
||||
* pointers was recevied via TLV. use enum &iwl_error_event_table_status
|
||||
* @hw_error: equals true if hw error interrupt was received from the FW
|
||||
*/
|
||||
struct iwl_trans {
|
||||
const struct iwl_trans_ops *ops;
|
||||
@ -830,6 +830,7 @@ struct iwl_trans {
|
||||
u32 lmac_error_event_table[2];
|
||||
u32 umac_error_event_table;
|
||||
unsigned int error_event_table_tlv_status;
|
||||
bool hw_error;
|
||||
|
||||
/* pointer to trans specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -1728,9 +1728,12 @@ void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
|
||||
iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata);
|
||||
}
|
||||
|
||||
#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
|
||||
IWL_SCAN_MAX_PROFILES)
|
||||
|
||||
struct iwl_mvm_nd_query_results {
|
||||
u32 matched_profiles;
|
||||
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
|
||||
u8 matches[ND_QUERY_BUF_LEN];
|
||||
};
|
||||
|
||||
static int
|
||||
@ -1743,6 +1746,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
|
||||
.flags = CMD_WANT_SKB,
|
||||
};
|
||||
int ret, len;
|
||||
size_t query_len, matches_len;
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &cmd);
|
||||
if (ret) {
|
||||
@ -1750,8 +1754,19 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
|
||||
query_len = sizeof(struct iwl_scan_offload_profiles_query);
|
||||
matches_len = sizeof(struct iwl_scan_offload_profile_match) *
|
||||
IWL_SCAN_MAX_PROFILES;
|
||||
} else {
|
||||
query_len = sizeof(struct iwl_scan_offload_profiles_query_v1);
|
||||
matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) *
|
||||
IWL_SCAN_MAX_PROFILES;
|
||||
}
|
||||
|
||||
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
|
||||
if (len < sizeof(*query)) {
|
||||
if (len < query_len) {
|
||||
IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
|
||||
ret = -EIO;
|
||||
goto out_free_resp;
|
||||
@ -1760,7 +1775,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
|
||||
query = (void *)cmd.resp_pkt->data;
|
||||
|
||||
results->matched_profiles = le32_to_cpu(query->matched_profiles);
|
||||
memcpy(results->matches, query->matches, sizeof(results->matches));
|
||||
memcpy(results->matches, query->matches, matches_len);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
|
||||
@ -1771,6 +1786,57 @@ out_free_resp:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_nd_query_results *query,
|
||||
int idx)
|
||||
{
|
||||
int n_chans = 0, i;
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
|
||||
struct iwl_scan_offload_profile_match *matches =
|
||||
(struct iwl_scan_offload_profile_match *)query->matches;
|
||||
|
||||
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
|
||||
n_chans += hweight8(matches[idx].matching_channels[i]);
|
||||
} else {
|
||||
struct iwl_scan_offload_profile_match_v1 *matches =
|
||||
(struct iwl_scan_offload_profile_match_v1 *)query->matches;
|
||||
|
||||
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
|
||||
n_chans += hweight8(matches[idx].matching_channels[i]);
|
||||
}
|
||||
|
||||
return n_chans;
|
||||
}
|
||||
|
||||
static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_nd_query_results *query,
|
||||
struct cfg80211_wowlan_nd_match *match,
|
||||
int idx)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
|
||||
struct iwl_scan_offload_profile_match *matches =
|
||||
(struct iwl_scan_offload_profile_match *)query->matches;
|
||||
|
||||
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
|
||||
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
|
||||
match->channels[match->n_channels++] =
|
||||
mvm->nd_channels[i]->center_freq;
|
||||
} else {
|
||||
struct iwl_scan_offload_profile_match_v1 *matches =
|
||||
(struct iwl_scan_offload_profile_match_v1 *)query->matches;
|
||||
|
||||
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
|
||||
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
|
||||
match->channels[match->n_channels++] =
|
||||
mvm->nd_channels[i]->center_freq;
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
@ -1783,7 +1849,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
||||
struct iwl_wowlan_status *fw_status;
|
||||
unsigned long matched_profiles;
|
||||
u32 reasons = 0;
|
||||
int i, j, n_matches, ret;
|
||||
int i, n_matches, ret;
|
||||
|
||||
fw_status = iwl_mvm_get_wakeup_status(mvm);
|
||||
if (!IS_ERR_OR_NULL(fw_status)) {
|
||||
@ -1817,14 +1883,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
||||
goto out_report_nd;
|
||||
|
||||
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
|
||||
struct iwl_scan_offload_profile_match *fw_match;
|
||||
struct cfg80211_wowlan_nd_match *match;
|
||||
int idx, n_channels = 0;
|
||||
|
||||
fw_match = &query.matches[i];
|
||||
|
||||
for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
|
||||
n_channels += hweight8(fw_match->matching_channels[j]);
|
||||
n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i);
|
||||
|
||||
match = kzalloc(struct_size(match, channels, n_channels),
|
||||
GFP_KERNEL);
|
||||
@ -1844,10 +1906,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
||||
if (mvm->n_nd_channels < n_channels)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
|
||||
if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
|
||||
match->channels[match->n_channels++] =
|
||||
mvm->nd_channels[j]->center_freq;
|
||||
iwl_mvm_query_set_freqs(mvm, &query, match, i);
|
||||
}
|
||||
|
||||
out_report_nd:
|
||||
@ -2030,7 +2089,6 @@ out:
|
||||
* 2. We are using a unified image but had an error while exiting D3
|
||||
*/
|
||||
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
|
||||
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
|
||||
/*
|
||||
* When switching images we return 1, which causes mac80211
|
||||
* to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
|
||||
|
@ -743,9 +743,8 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
|
||||
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
|
||||
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
|
||||
#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
|
||||
if (!debugfs_create_file(#name, mode, parent, vif, \
|
||||
&iwl_dbgfs_##name##_ops)) \
|
||||
goto err; \
|
||||
debugfs_create_file(#name, mode, parent, vif, \
|
||||
&iwl_dbgfs_##name##_ops); \
|
||||
} while (0)
|
||||
|
||||
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
|
||||
@ -775,12 +774,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
|
||||
|
||||
if (!mvmvif->dbgfs_dir) {
|
||||
IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
|
||||
dbgfs_dir);
|
||||
return;
|
||||
}
|
||||
|
||||
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
|
||||
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
|
||||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p)))
|
||||
@ -812,12 +805,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
|
||||
mvm->debugfs_dir, buf);
|
||||
if (!mvmvif->dbgfs_slink)
|
||||
IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n",
|
||||
dbgfs_dir);
|
||||
return;
|
||||
err:
|
||||
IWL_ERR(mvm, "Can't create debugfs entity\n");
|
||||
}
|
||||
|
||||
void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
@ -1349,7 +1349,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
|
||||
return 0;
|
||||
|
||||
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
|
||||
(count - 1));
|
||||
(count - 1), NULL);
|
||||
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
|
||||
|
||||
@ -1696,9 +1696,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
|
||||
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
|
||||
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
|
||||
#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
|
||||
if (!debugfs_create_file(alias, mode, parent, mvm, \
|
||||
&iwl_dbgfs_##name##_ops)) \
|
||||
goto err; \
|
||||
debugfs_create_file(alias, mode, parent, mvm, \
|
||||
&iwl_dbgfs_##name##_ops); \
|
||||
} while (0)
|
||||
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
|
||||
MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
|
||||
@ -1709,9 +1708,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
|
||||
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
|
||||
|
||||
#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \
|
||||
if (!debugfs_create_file(alias, mode, parent, sta, \
|
||||
&iwl_dbgfs_##name##_ops)) \
|
||||
goto err; \
|
||||
debugfs_create_file(alias, mode, parent, sta, \
|
||||
&iwl_dbgfs_##name##_ops); \
|
||||
} while (0)
|
||||
#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
|
||||
MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
|
||||
@ -2092,13 +2090,9 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
|
||||
|
||||
if (iwl_mvm_has_tlc_offload(mvm))
|
||||
MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400);
|
||||
|
||||
return;
|
||||
err:
|
||||
IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
|
||||
}
|
||||
|
||||
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
||||
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
||||
{
|
||||
struct dentry *bcast_dir __maybe_unused;
|
||||
char buf[100];
|
||||
@ -2142,14 +2136,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
||||
#endif
|
||||
MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600);
|
||||
|
||||
if (!debugfs_create_bool("enable_scan_iteration_notif",
|
||||
0600,
|
||||
mvm->debugfs_dir,
|
||||
&mvm->scan_iter_notif_enabled))
|
||||
goto err;
|
||||
if (!debugfs_create_bool("drop_bcn_ap_mode", 0600,
|
||||
mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
|
||||
goto err;
|
||||
debugfs_create_bool("enable_scan_iteration_notif", 0600,
|
||||
mvm->debugfs_dir, &mvm->scan_iter_notif_enabled);
|
||||
debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir,
|
||||
&mvm->drop_bcn_ap_mode);
|
||||
|
||||
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
|
||||
|
||||
@ -2157,13 +2147,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
|
||||
bcast_dir = debugfs_create_dir("bcast_filtering",
|
||||
mvm->debugfs_dir);
|
||||
if (!bcast_dir)
|
||||
goto err;
|
||||
|
||||
if (!debugfs_create_bool("override", 0600,
|
||||
bcast_dir,
|
||||
&mvm->dbgfs_bcast_filtering.override))
|
||||
goto err;
|
||||
debugfs_create_bool("override", 0600, bcast_dir,
|
||||
&mvm->dbgfs_bcast_filtering.override);
|
||||
|
||||
MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
|
||||
bcast_dir, 0600);
|
||||
@ -2175,35 +2161,26 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600);
|
||||
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
|
||||
if (!debugfs_create_bool("d3_wake_sysassert", 0600,
|
||||
mvm->debugfs_dir, &mvm->d3_wake_sysassert))
|
||||
goto err;
|
||||
if (!debugfs_create_u32("last_netdetect_scans", 0400,
|
||||
mvm->debugfs_dir, &mvm->last_netdetect_scans))
|
||||
goto err;
|
||||
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
|
||||
&mvm->d3_wake_sysassert);
|
||||
debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
|
||||
&mvm->last_netdetect_scans);
|
||||
#endif
|
||||
|
||||
if (!debugfs_create_u8("ps_disabled", 0400,
|
||||
mvm->debugfs_dir, &mvm->ps_disabled))
|
||||
goto err;
|
||||
if (!debugfs_create_blob("nvm_hw", 0400,
|
||||
mvm->debugfs_dir, &mvm->nvm_hw_blob))
|
||||
goto err;
|
||||
if (!debugfs_create_blob("nvm_sw", 0400,
|
||||
mvm->debugfs_dir, &mvm->nvm_sw_blob))
|
||||
goto err;
|
||||
if (!debugfs_create_blob("nvm_calib", 0400,
|
||||
mvm->debugfs_dir, &mvm->nvm_calib_blob))
|
||||
goto err;
|
||||
if (!debugfs_create_blob("nvm_prod", 0400,
|
||||
mvm->debugfs_dir, &mvm->nvm_prod_blob))
|
||||
goto err;
|
||||
if (!debugfs_create_blob("nvm_phy_sku", 0400,
|
||||
mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
|
||||
goto err;
|
||||
if (!debugfs_create_blob("nvm_reg", S_IRUSR,
|
||||
mvm->debugfs_dir, &mvm->nvm_reg_blob))
|
||||
goto err;
|
||||
debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir,
|
||||
&mvm->ps_disabled);
|
||||
debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir,
|
||||
&mvm->nvm_hw_blob);
|
||||
debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir,
|
||||
&mvm->nvm_sw_blob);
|
||||
debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir,
|
||||
&mvm->nvm_calib_blob);
|
||||
debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir,
|
||||
&mvm->nvm_prod_blob);
|
||||
debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir,
|
||||
&mvm->nvm_phy_sku_blob);
|
||||
debugfs_create_blob("nvm_reg", S_IRUSR,
|
||||
mvm->debugfs_dir, &mvm->nvm_reg_blob);
|
||||
|
||||
debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
|
||||
|
||||
@ -2212,11 +2189,5 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
||||
* exists (before the opmode exists which removes the target.)
|
||||
*/
|
||||
snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
|
||||
if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
|
||||
return -ENOMEM;
|
||||
debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -262,9 +262,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
.preferred_tsf = NUM_TSF_IDS,
|
||||
.found_vif = false,
|
||||
};
|
||||
u32 ac;
|
||||
int ret, i, queue_limit;
|
||||
unsigned long used_hw_queues;
|
||||
int ret, i;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
@ -341,37 +339,9 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
INIT_LIST_HEAD(&mvmvif->time_event_data.list);
|
||||
mvmvif->time_event_data.id = TE_MAX;
|
||||
|
||||
/* No need to allocate data queues to P2P Device MAC.*/
|
||||
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
|
||||
vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
|
||||
|
||||
/* No need to allocate data queues to P2P Device MAC and NAN.*/
|
||||
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* queues in mac80211 almost entirely independent of
|
||||
* the ones here - no real limit
|
||||
*/
|
||||
queue_limit = IEEE80211_MAX_QUEUES;
|
||||
|
||||
/*
|
||||
* Find available queues, and allocate them to the ACs. When in
|
||||
* DQA-mode they aren't really used, and this is done only so the
|
||||
* mac80211 ieee80211_check_queues() function won't fail
|
||||
*/
|
||||
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
|
||||
u8 queue = find_first_zero_bit(&used_hw_queues, queue_limit);
|
||||
|
||||
if (queue >= queue_limit) {
|
||||
IWL_ERR(mvm, "Failed to allocate queue\n");
|
||||
ret = -EIO;
|
||||
goto exit_fail;
|
||||
}
|
||||
|
||||
__set_bit(queue, &used_hw_queues);
|
||||
vif->hw_queue[ac] = queue;
|
||||
}
|
||||
|
||||
/* Allocate the CAB queue for softAP and GO interfaces */
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
@ -1143,9 +1113,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
|
||||
ieee80211_tu_to_usec(data.beacon_int * rand /
|
||||
100);
|
||||
} else {
|
||||
mvmvif->ap_beacon_time =
|
||||
iwl_read_prph(mvm->trans,
|
||||
DEVICE_SYSTEM_TIME_REG);
|
||||
mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1573,6 +1541,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
|
||||
|
||||
rcu_read_lock();
|
||||
vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]);
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
@ -1581,7 +1550,6 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
|
||||
csa_vif != vif))
|
||||
goto out_unlock;
|
||||
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
|
||||
csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
|
||||
if (WARN(csa_id != id_n_color,
|
||||
"channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
|
||||
@ -1602,6 +1570,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
|
||||
return;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
iwl_mvm_csa_client_absent(mvm, vif);
|
||||
cancel_delayed_work_sync(&mvmvif->csa_work);
|
||||
ieee80211_chswitch_done(vif, true);
|
||||
break;
|
||||
default:
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -420,6 +420,7 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
|
||||
|
||||
const static u8 he_if_types_ext_capa_sta[] = {
|
||||
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
|
||||
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
|
||||
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
|
||||
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
|
||||
};
|
||||
@ -597,6 +598,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
BIT(NL80211_IFTYPE_ADHOC);
|
||||
|
||||
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
||||
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
|
||||
hw->wiphy->features |= NL80211_FEATURE_HT_IBSS;
|
||||
|
||||
hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
|
||||
if (iwl_mvm_is_lar_supported(mvm))
|
||||
hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
|
||||
@ -732,6 +736,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa;
|
||||
hw->wiphy->num_iftype_ext_capab =
|
||||
ARRAY_SIZE(he_iftypes_ext_capa);
|
||||
|
||||
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
|
||||
ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
|
||||
}
|
||||
|
||||
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
|
||||
@ -1191,15 +1198,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
|
||||
|
||||
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
||||
{
|
||||
/* clear the D3 reconfig, we only need it to avoid dumping a
|
||||
* firmware coredump on reconfiguration, we shouldn't do that
|
||||
* on D3->D0 transition
|
||||
*/
|
||||
if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
|
||||
mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
|
||||
iwl_fw_error_dump(&mvm->fwrt);
|
||||
}
|
||||
|
||||
/* cleanup all stale references (scan, roc), but keep the
|
||||
* ucode_down ref until reconfig is complete
|
||||
*/
|
||||
@ -1500,6 +1498,91 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
|
||||
}
|
||||
|
||||
static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (mvmvif->csa_failed) {
|
||||
mvmvif->csa_failed = false;
|
||||
ret = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION) {
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
|
||||
mvmvif->csa_bcn_pending = false;
|
||||
mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
|
||||
mvmvif->ap_sta_id);
|
||||
|
||||
if (WARN_ON(!mvmsta)) {
|
||||
ret = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
|
||||
|
||||
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
|
||||
|
||||
ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
iwl_mvm_stop_session_protection(mvm, vif);
|
||||
}
|
||||
|
||||
mvmvif->ps_disabled = false;
|
||||
|
||||
ret = iwl_mvm_power_update_ps(mvm);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_chan_switch_te_cmd cmd = {
|
||||
.mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
mvmvif->color)),
|
||||
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
|
||||
};
|
||||
|
||||
IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(MAC_CONF_GROUP,
|
||||
CHANNEL_SWITCH_TIME_EVENT_CMD),
|
||||
0, sizeof(cmd), &cmd));
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
WARN_ON(iwl_mvm_post_channel_switch(hw, vif));
|
||||
}
|
||||
|
||||
static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
|
||||
{
|
||||
struct iwl_mvm *mvm;
|
||||
struct iwl_mvm_vif *mvmvif;
|
||||
struct ieee80211_vif *vif;
|
||||
|
||||
mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
|
||||
vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
|
||||
mvm = mvmvif->mvm;
|
||||
|
||||
iwl_mvm_abort_channel_switch(mvm->hw, vif);
|
||||
ieee80211_chswitch_done(vif, false);
|
||||
}
|
||||
|
||||
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
@ -1626,6 +1709,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
||||
}
|
||||
|
||||
iwl_mvm_tcm_add_vif(mvm, vif);
|
||||
INIT_DELAYED_WORK(&mvmvif->csa_work,
|
||||
iwl_mvm_channel_switch_disconnect_wk);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_MONITOR)
|
||||
mvm->monitor_on = true;
|
||||
@ -2127,6 +2212,10 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
||||
.frame_time_rts_th =
|
||||
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
|
||||
};
|
||||
int size = fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_MBSSID_HE) ?
|
||||
sizeof(sta_ctxt_cmd) :
|
||||
sizeof(struct iwl_he_sta_context_cmd_v1);
|
||||
struct ieee80211_sta *sta;
|
||||
u32 flags;
|
||||
int i;
|
||||
@ -2254,16 +2343,18 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
||||
|
||||
/* Set the PPE thresholds accordingly */
|
||||
if (low_th >= 0 && high_th >= 0) {
|
||||
u8 ***pkt_ext_qam =
|
||||
(void *)sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th;
|
||||
struct iwl_he_pkt_ext *pkt_ext =
|
||||
(struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
|
||||
|
||||
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
|
||||
u8 bw;
|
||||
|
||||
for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
|
||||
bw++) {
|
||||
pkt_ext_qam[i][bw][0] = low_th;
|
||||
pkt_ext_qam[i][bw][1] = high_th;
|
||||
pkt_ext->pkt_ext_qam_th[i][bw][0] =
|
||||
low_th;
|
||||
pkt_ext->pkt_ext_qam_th[i][bw][1] =
|
||||
high_th;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2308,13 +2399,23 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
||||
(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
|
||||
}
|
||||
|
||||
/* TODO: support Multi BSSID IE */
|
||||
if (vif->bss_conf.nontransmitted) {
|
||||
flags |= STA_CTXT_HE_REF_BSSID_VALID;
|
||||
ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr,
|
||||
vif->bss_conf.transmitter_bssid);
|
||||
sta_ctxt_cmd.max_bssid_indicator =
|
||||
vif->bss_conf.bssid_indicator;
|
||||
sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index;
|
||||
sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap;
|
||||
sta_ctxt_cmd.profile_periodicity =
|
||||
vif->bss_conf.profile_periodicity;
|
||||
}
|
||||
|
||||
sta_ctxt_cmd.flags = cpu_to_le32(flags);
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
|
||||
DATA_PATH_GROUP, 0),
|
||||
0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
|
||||
0, size, &sta_ctxt_cmd))
|
||||
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
|
||||
}
|
||||
|
||||
@ -3612,7 +3713,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
int duration)
|
||||
{
|
||||
int res, time_reg = DEVICE_SYSTEM_TIME_REG;
|
||||
int res;
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
|
||||
static const u16 time_event_response[] = { HOT_SPOT_CMD };
|
||||
@ -3638,7 +3739,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
|
||||
0);
|
||||
|
||||
/* Set the time and duration */
|
||||
tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg));
|
||||
tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm));
|
||||
|
||||
delay = AUX_ROC_MIN_DELAY;
|
||||
req_dur = MSEC_TO_TU(duration);
|
||||
@ -4442,16 +4543,22 @@ static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm,
|
||||
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
|
||||
.tsf = cpu_to_le32(chsw->timestamp),
|
||||
.cs_count = chsw->count,
|
||||
.cs_mode = chsw->block_tx,
|
||||
};
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (chsw->delay)
|
||||
cmd.cs_delayed_bcn_count =
|
||||
DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(MAC_CONF_GROUP,
|
||||
CHANNEL_SWITCH_TIME_EVENT_CMD),
|
||||
0, sizeof(cmd), &cmd);
|
||||
}
|
||||
|
||||
#define IWL_MAX_CSA_BLOCK_TX 1500
|
||||
static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_channel_switch *chsw)
|
||||
@ -4516,8 +4623,18 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
|
||||
((vif->bss_conf.beacon_int * (chsw->count - 1) -
|
||||
IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
|
||||
|
||||
if (chsw->block_tx)
|
||||
if (chsw->block_tx) {
|
||||
iwl_mvm_csa_client_absent(mvm, vif);
|
||||
/*
|
||||
* In case of undetermined / long time with immediate
|
||||
* quiet monitor status to gracefully disconnect
|
||||
*/
|
||||
if (!chsw->count ||
|
||||
chsw->count * vif->bss_conf.beacon_int >
|
||||
IWL_MAX_CSA_BLOCK_TX)
|
||||
schedule_delayed_work(&mvmvif->csa_work,
|
||||
msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX));
|
||||
}
|
||||
|
||||
if (mvmvif->bf_data.bf_enabled) {
|
||||
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
|
||||
@ -4532,6 +4649,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
|
||||
iwl_mvm_schedule_csa_period(mvm, vif,
|
||||
vif->bss_conf.beacon_int,
|
||||
apply_time);
|
||||
|
||||
mvmvif->csa_count = chsw->count;
|
||||
mvmvif->csa_misbehave = false;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -4552,52 +4672,42 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_channel_switch *chsw)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
int ret;
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_chan_switch_te_cmd cmd = {
|
||||
.mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
mvmvif->color)),
|
||||
.action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
|
||||
.tsf = cpu_to_le32(chsw->timestamp),
|
||||
.cs_count = chsw->count,
|
||||
.cs_mode = chsw->block_tx,
|
||||
};
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
|
||||
return;
|
||||
|
||||
if (mvmvif->csa_failed) {
|
||||
mvmvif->csa_failed = false;
|
||||
ret = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION) {
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
|
||||
mvmvif->csa_bcn_pending = false;
|
||||
mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
|
||||
mvmvif->ap_sta_id);
|
||||
|
||||
if (WARN_ON(!mvmsta)) {
|
||||
ret = -EIO;
|
||||
goto out_unlock;
|
||||
if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
|
||||
if (mvmvif->csa_misbehave) {
|
||||
/* Second time, give up on this AP*/
|
||||
iwl_mvm_abort_channel_switch(hw, vif);
|
||||
ieee80211_chswitch_done(vif, false);
|
||||
mvmvif->csa_misbehave = false;
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
|
||||
|
||||
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
|
||||
|
||||
ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
iwl_mvm_stop_session_protection(mvm, vif);
|
||||
mvmvif->csa_misbehave = true;
|
||||
}
|
||||
mvmvif->csa_count = chsw->count;
|
||||
|
||||
mvmvif->ps_disabled = false;
|
||||
IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id);
|
||||
|
||||
ret = iwl_mvm_power_update_ps(mvm);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
return ret;
|
||||
WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(MAC_CONF_GROUP,
|
||||
CHANNEL_SWITCH_TIME_EVENT_CMD),
|
||||
CMD_ASYNC, sizeof(cmd), &cmd));
|
||||
}
|
||||
|
||||
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
|
||||
@ -5056,6 +5166,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
|
||||
.channel_switch = iwl_mvm_channel_switch,
|
||||
.pre_channel_switch = iwl_mvm_pre_channel_switch,
|
||||
.post_channel_switch = iwl_mvm_post_channel_switch,
|
||||
.abort_channel_switch = iwl_mvm_abort_channel_switch,
|
||||
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
|
||||
|
||||
.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
|
||||
.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -490,6 +490,9 @@ struct iwl_mvm_vif {
|
||||
bool csa_countdown;
|
||||
bool csa_failed;
|
||||
u16 csa_target_freq;
|
||||
u16 csa_count;
|
||||
u16 csa_misbehave;
|
||||
struct delayed_work csa_work;
|
||||
|
||||
/* Indicates that we are waiting for a beacon on a new channel */
|
||||
bool csa_bcn_pending;
|
||||
@ -1199,7 +1202,6 @@ struct iwl_mvm {
|
||||
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
|
||||
* @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
|
||||
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
|
||||
* @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
|
||||
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
|
||||
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
|
||||
*/
|
||||
@ -1211,7 +1213,6 @@ enum iwl_mvm_status {
|
||||
IWL_MVM_STATUS_IN_HW_RESTART,
|
||||
IWL_MVM_STATUS_IN_D0I3,
|
||||
IWL_MVM_STATUS_ROC_AUX_RUNNING,
|
||||
IWL_MVM_STATUS_D3_RECONFIG,
|
||||
IWL_MVM_STATUS_FIRMWARE_RUNNING,
|
||||
IWL_MVM_STATUS_NEED_FLUSH_P2P,
|
||||
};
|
||||
@ -1537,6 +1538,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
|
||||
u8 first_antenna(u8 mask);
|
||||
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
|
||||
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
|
||||
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
|
||||
|
||||
/* Tx / Host Commands */
|
||||
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
|
||||
@ -1649,8 +1651,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||
void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||
void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
||||
@ -1784,14 +1786,13 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
|
||||
|
||||
/* MVM debugfs */
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
|
||||
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
|
||||
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
#else
|
||||
static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
|
||||
struct dentry *dbgfs_dir)
|
||||
static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
|
||||
struct dentry *dbgfs_dir)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void
|
||||
iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
@ -2023,17 +2024,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
|
||||
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
||||
{
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
/* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received
|
||||
* an assert. Since we failed to bring the interface up, mac80211
|
||||
* will not attempt to reconfig the device,
|
||||
* which handles the dump collection in assert flow,
|
||||
* so trigger dump collection here.
|
||||
*/
|
||||
if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
|
||||
&mvm->status))
|
||||
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
|
||||
false, 0);
|
||||
|
||||
iwl_fw_cancel_timestamp(&mvm->fwrt);
|
||||
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
iwl_fwrt_stop_device(&mvm->fwrt);
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -862,9 +862,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
min_backoff = iwl_mvm_min_backoff(mvm);
|
||||
iwl_mvm_thermal_initialize(mvm, min_backoff);
|
||||
|
||||
err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
|
||||
if (err)
|
||||
goto out_unregister;
|
||||
iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
|
||||
|
||||
if (!iwl_mvm_has_new_rx_stats_api(mvm))
|
||||
memset(&mvm->rx_stats_v3, 0,
|
||||
@ -881,14 +879,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
|
||||
return op_mode;
|
||||
|
||||
out_unregister:
|
||||
if (iwlmvm_mod_params.init_dbg)
|
||||
return op_mode;
|
||||
|
||||
ieee80211_unregister_hw(mvm->hw);
|
||||
mvm->hw_registered = false;
|
||||
iwl_mvm_leds_exit(mvm);
|
||||
iwl_mvm_thermal_exit(mvm);
|
||||
out_free:
|
||||
iwl_fw_flush_dump(&mvm->fwrt);
|
||||
iwl_fw_runtime_free(&mvm->fwrt);
|
||||
@ -1105,7 +1095,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
|
||||
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
|
||||
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
|
||||
else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
|
||||
iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0);
|
||||
iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
|
||||
else
|
||||
iwl_mvm_rx_common(mvm, rxb, pkt);
|
||||
}
|
||||
@ -1291,8 +1281,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
* can't recover this since we're already half suspended.
|
||||
*/
|
||||
if (!mvm->fw_restart && fw_error) {
|
||||
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
|
||||
false, 0);
|
||||
iwl_fw_error_collect(&mvm->fwrt);
|
||||
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
struct iwl_mvm_reprobe *reprobe;
|
||||
|
||||
@ -1340,6 +1329,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
}
|
||||
}
|
||||
|
||||
iwl_fw_error_collect(&mvm->fwrt);
|
||||
|
||||
if (fw_error && mvm->fw_restart > 0)
|
||||
mvm->fw_restart--;
|
||||
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -27,7 +27,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -345,6 +345,37 @@ out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
|
||||
{
|
||||
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
|
||||
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
|
||||
|
||||
if (vht_cap && vht_cap->vht_supported) {
|
||||
switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
|
||||
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
|
||||
return IEEE80211_MAX_MPDU_LEN_VHT_11454;
|
||||
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
|
||||
return IEEE80211_MAX_MPDU_LEN_VHT_7991;
|
||||
default:
|
||||
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
|
||||
}
|
||||
|
||||
} else if (ht_cap && ht_cap->ht_supported) {
|
||||
if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
|
||||
/*
|
||||
* agg is offloaded so we need to assume that agg
|
||||
* are enabled and max mpdu in ampdu is 4095
|
||||
* (spec 802.11-2016 9.3.2.1)
|
||||
*/
|
||||
return IEEE80211_MAX_MPDU_LEN_HT_BA;
|
||||
else
|
||||
return IEEE80211_MAX_MPDU_LEN_HT_3839;
|
||||
}
|
||||
|
||||
/* in legacy mode no amsdu is enabled so return zero */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
enum nl80211_band band, bool update)
|
||||
{
|
||||
@ -353,14 +384,15 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
|
||||
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
|
||||
struct ieee80211_supported_band *sband;
|
||||
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
|
||||
struct iwl_tlc_config_cmd cfg_cmd = {
|
||||
.sta_id = mvmsta->sta_id,
|
||||
.max_ch_width = update ?
|
||||
rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
|
||||
.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
|
||||
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
|
||||
.max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
|
||||
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
|
||||
.max_mpdu_len = cpu_to_le16(max_amsdu_len),
|
||||
.amsdu = iwl_mvm_is_csum_supported(mvm),
|
||||
};
|
||||
int ret;
|
||||
@ -373,6 +405,12 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
sband = hw->wiphy->bands[band];
|
||||
rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
|
||||
|
||||
/*
|
||||
* since TLC offload works with one mode we can assume
|
||||
* that only vht/ht is used and also set it as station max amsdu
|
||||
*/
|
||||
sta->max_amsdu_len = max_amsdu_len;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
|
||||
|
@ -4078,9 +4078,8 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
|
||||
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
|
||||
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
|
||||
#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \
|
||||
if (!debugfs_create_file(#name, mode, parent, lq_sta, \
|
||||
&iwl_dbgfs_##name##_ops)) \
|
||||
goto err; \
|
||||
debugfs_create_file(#name, mode, parent, lq_sta, \
|
||||
&iwl_dbgfs_##name##_ops); \
|
||||
} while (0)
|
||||
|
||||
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
|
||||
@ -4108,9 +4107,6 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
|
||||
&lq_sta->pers.dbg_fixed_txp_reduction);
|
||||
|
||||
MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600);
|
||||
return;
|
||||
err:
|
||||
IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
|
||||
}
|
||||
|
||||
void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -1679,8 +1679,8 @@ out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue)
|
||||
void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue)
|
||||
{
|
||||
struct ieee80211_rx_status *rx_status;
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
@ -1701,10 +1701,6 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
|
||||
return;
|
||||
|
||||
/* Currently only NDP type is supported */
|
||||
if (info_type != RX_NO_DATA_INFO_TYPE_NDP)
|
||||
return;
|
||||
|
||||
energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS;
|
||||
energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
|
||||
channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
|
||||
@ -1726,9 +1722,22 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
|
||||
/* 0-length PSDU */
|
||||
rx_status->flag |= RX_FLAG_NO_PSDU;
|
||||
/* currently this is the only type for which we get this notif */
|
||||
rx_status->zero_length_psdu_type =
|
||||
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
|
||||
|
||||
switch (info_type) {
|
||||
case RX_NO_DATA_INFO_TYPE_NDP:
|
||||
rx_status->zero_length_psdu_type =
|
||||
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
|
||||
break;
|
||||
case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
|
||||
case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED:
|
||||
rx_status->zero_length_psdu_type =
|
||||
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
|
||||
break;
|
||||
default:
|
||||
rx_status->zero_length_psdu_type =
|
||||
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
|
||||
break;
|
||||
}
|
||||
|
||||
/* This may be overridden by iwl_mvm_rx_he() to HE_RU */
|
||||
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
|
||||
|
@ -1082,21 +1082,23 @@ static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
|
||||
dwell->extended = IWL_SCAN_DWELL_EXTENDED;
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
|
||||
static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels,
|
||||
u32 max_channels)
|
||||
{
|
||||
struct ieee80211_supported_band *band;
|
||||
int i, j = 0;
|
||||
|
||||
band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
|
||||
for (i = 0; i < band->n_channels; i++, j++)
|
||||
for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
|
||||
channels[j] = band->channels[i].hw_value;
|
||||
band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
|
||||
for (i = 0; i < band->n_channels; i++, j++)
|
||||
for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
|
||||
channels[j] = band->channels[i].hw_value;
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
|
||||
u32 flags, u8 channel_flags)
|
||||
u32 flags, u8 channel_flags,
|
||||
u32 max_channels)
|
||||
{
|
||||
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
|
||||
struct iwl_scan_config_v1 *cfg = config;
|
||||
@ -1115,11 +1117,12 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
|
||||
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
|
||||
cfg->channel_flags = channel_flags;
|
||||
|
||||
iwl_mvm_fill_channels(mvm, cfg->channel_array);
|
||||
iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
||||
u32 flags, u8 channel_flags)
|
||||
u32 flags, u8 channel_flags,
|
||||
u32 max_channels)
|
||||
{
|
||||
struct iwl_scan_config *cfg = config;
|
||||
|
||||
@ -1162,7 +1165,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
||||
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
|
||||
cfg->channel_flags = channel_flags;
|
||||
|
||||
iwl_mvm_fill_channels(mvm, cfg->channel_array);
|
||||
iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
|
||||
}
|
||||
|
||||
int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
||||
@ -1181,7 +1184,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
||||
u8 channel_flags;
|
||||
|
||||
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
|
||||
return -ENOBUFS;
|
||||
num_channels = mvm->fw->ucode_capa.n_scan_channels;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
type = iwl_mvm_get_scan_type_band(mvm, NULL,
|
||||
@ -1234,9 +1237,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
||||
flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
|
||||
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
|
||||
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
|
||||
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
|
||||
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags,
|
||||
num_channels);
|
||||
} else {
|
||||
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
|
||||
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
|
||||
num_channels);
|
||||
}
|
||||
|
||||
cmd.data[0] = cfg;
|
||||
|
@ -2277,7 +2277,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
|
||||
const u8 *maddr = _maddr;
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
||||
.fifo = vif->type == NL80211_IFTYPE_AP ?
|
||||
IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
|
||||
.sta_id = msta->sta_id,
|
||||
.tid = 0,
|
||||
.aggregate = false,
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(C) 2018 Intel Corporation
|
||||
* Copyright(C) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -29,7 +29,7 @@
|
||||
*
|
||||
* Copyright(c) 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(C) 2018 Intel Corporation
|
||||
* Copyright(C) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -252,8 +252,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
|
||||
|
||||
/* we only send requests to our switching peer - update sent time */
|
||||
if (state == IWL_MVM_TDLS_SW_REQ_SENT)
|
||||
mvm->tdls_cs.peer.sent_timestamp =
|
||||
iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
|
||||
mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
|
||||
|
||||
if (state == IWL_MVM_TDLS_SW_IDLE)
|
||||
mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -234,6 +234,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
|
||||
break;
|
||||
}
|
||||
iwl_mvm_csa_client_absent(mvm, te_data->vif);
|
||||
cancel_delayed_work_sync(&mvmvif->csa_work);
|
||||
ieee80211_chswitch_done(te_data->vif, true);
|
||||
break;
|
||||
default:
|
||||
|
@ -1418,6 +1418,16 @@ void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
|
||||
}
|
||||
|
||||
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
|
||||
{
|
||||
u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
|
||||
|
||||
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
|
||||
mvm->trans->cfg->gp2_reg_addr)
|
||||
reg_addr = mvm->trans->cfg->gp2_reg_addr;
|
||||
|
||||
return iwl_read_prph(mvm->trans, reg_addr);
|
||||
}
|
||||
|
||||
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
|
||||
{
|
||||
@ -1432,7 +1442,7 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
|
||||
iwl_mvm_power_update_device(mvm);
|
||||
}
|
||||
|
||||
*gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
|
||||
*gp2 = iwl_mvm_get_systime(mvm);
|
||||
*boottime = ktime_get_boot_ns();
|
||||
|
||||
if (!ps_disabled) {
|
||||
|
@ -963,9 +963,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
|
||||
{IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
|
||||
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
|
||||
@ -1047,9 +1044,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
/* register transport layer debugfs here */
|
||||
ret = iwl_trans_pcie_dbgfs_register(iwl_trans);
|
||||
if (ret)
|
||||
goto out_free_drv;
|
||||
iwl_trans_pcie_dbgfs_register(iwl_trans);
|
||||
|
||||
/* if RTPM is in use, enable it in our device */
|
||||
if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
|
||||
@ -1078,8 +1073,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_drv:
|
||||
iwl_drv_stop(iwl_trans->drv);
|
||||
out_free_trans:
|
||||
iwl_trans_pcie_free(iwl_trans);
|
||||
return ret;
|
||||
|
@ -106,7 +106,6 @@ struct iwl_host_cmd;
|
||||
* @page: driver's pointer to the rxb page
|
||||
* @invalid: rxb is in driver ownership - not owned by HW
|
||||
* @vid: index of this rxb in the global table
|
||||
* @size: size used from the buffer
|
||||
*/
|
||||
struct iwl_rx_mem_buffer {
|
||||
dma_addr_t page_dma;
|
||||
@ -114,7 +113,6 @@ struct iwl_rx_mem_buffer {
|
||||
u16 vid;
|
||||
bool invalid;
|
||||
struct list_head list;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -135,46 +133,32 @@ struct isr_statistics {
|
||||
u32 unhandled;
|
||||
};
|
||||
|
||||
#define IWL_RX_TD_TYPE_MSK 0xff000000
|
||||
#define IWL_RX_TD_SIZE_MSK 0x00ffffff
|
||||
#define IWL_RX_TD_SIZE_2K BIT(11)
|
||||
#define IWL_RX_TD_TYPE 0
|
||||
|
||||
/**
|
||||
* struct iwl_rx_transfer_desc - transfer descriptor
|
||||
* @type_n_size: buffer type (bit 0: external buff valid,
|
||||
* bit 1: optional footer valid, bit 2-7: reserved)
|
||||
* and buffer size
|
||||
* @addr: ptr to free buffer start address
|
||||
* @rbid: unique tag of the buffer
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct iwl_rx_transfer_desc {
|
||||
__le32 type_n_size;
|
||||
__le64 addr;
|
||||
__le16 rbid;
|
||||
__le16 reserved;
|
||||
__le16 reserved[3];
|
||||
__le64 addr;
|
||||
} __packed;
|
||||
|
||||
#define IWL_RX_CD_SIZE 0xffffff00
|
||||
#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
|
||||
|
||||
/**
|
||||
* struct iwl_rx_completion_desc - completion descriptor
|
||||
* @type: buffer type (bit 0: external buff valid,
|
||||
* bit 1: optional footer valid, bit 2-7: reserved)
|
||||
* @status: status of the completion
|
||||
* @reserved1: reserved
|
||||
* @rbid: unique tag of the received buffer
|
||||
* @size: buffer size, masked by IWL_RX_CD_SIZE
|
||||
* @flags: flags (0: fragmented, all others: reserved)
|
||||
* @reserved2: reserved
|
||||
*/
|
||||
struct iwl_rx_completion_desc {
|
||||
u8 type;
|
||||
u8 status;
|
||||
__le16 reserved1;
|
||||
__le32 reserved1;
|
||||
__le16 rbid;
|
||||
__le32 size;
|
||||
u8 reserved2[22];
|
||||
u8 flags;
|
||||
u8 reserved2[25];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
@ -1046,12 +1030,9 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
|
||||
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
|
||||
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
|
||||
#else
|
||||
static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
|
||||
#endif
|
||||
|
||||
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
|
||||
|
@ -282,9 +282,8 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
|
||||
struct iwl_rx_transfer_desc *bd = rxq->bd;
|
||||
|
||||
bd[rxq->write].type_n_size =
|
||||
cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
|
||||
((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
|
||||
BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
|
||||
|
||||
bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
|
||||
bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
|
||||
} else {
|
||||
@ -1265,9 +1264,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
.truesize = max_len,
|
||||
};
|
||||
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
rxcb.status = rxq->cd[i].status;
|
||||
|
||||
pkt = rxb_addr(&rxcb);
|
||||
|
||||
if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
|
||||
@ -1394,6 +1390,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
u16 vid;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
|
||||
|
||||
if (!trans->cfg->mq_rx_supported) {
|
||||
rxb = rxq->queue[i];
|
||||
rxq->queue[i] = NULL;
|
||||
@ -1415,9 +1413,6 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
|
||||
|
||||
IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
|
||||
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
|
||||
|
||||
rxb->invalid = true;
|
||||
|
||||
return rxb;
|
||||
@ -2212,6 +2207,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
"Hardware error detected. Restarting.\n");
|
||||
|
||||
isr_stats->hw++;
|
||||
trans->hw_error = true;
|
||||
iwl_pcie_irq_handle_error(trans);
|
||||
}
|
||||
|
||||
|
@ -2442,9 +2442,8 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
/* create and remove of files */
|
||||
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
|
||||
if (!debugfs_create_file(#name, mode, parent, trans, \
|
||||
&iwl_dbgfs_##name##_ops)) \
|
||||
goto err; \
|
||||
debugfs_create_file(#name, mode, parent, trans, \
|
||||
&iwl_dbgfs_##name##_ops); \
|
||||
} while (0)
|
||||
|
||||
/* file operation */
|
||||
@ -2847,7 +2846,7 @@ static const struct file_operations iwl_dbgfs_monitor_data_ops = {
|
||||
};
|
||||
|
||||
/* Create the debugfs files and directories */
|
||||
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
|
||||
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
|
||||
{
|
||||
struct dentry *dir = trans->dbgfs_dir;
|
||||
|
||||
@ -2858,11 +2857,6 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
|
||||
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
|
||||
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
|
||||
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
|
||||
@ -3012,10 +3006,14 @@ static void
|
||||
iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
|
||||
struct iwl_fw_error_dump_fw_mon *fw_mon_data)
|
||||
{
|
||||
u32 base, write_ptr, wrap_cnt;
|
||||
u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
|
||||
|
||||
/* If there was a dest TLV - use the values from there */
|
||||
if (trans->ini_valid) {
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
|
||||
base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
|
||||
write_ptr = DBGC_CUR_DBGBUF_STATUS;
|
||||
wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
|
||||
} else if (trans->ini_valid) {
|
||||
base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2);
|
||||
write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2);
|
||||
wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2);
|
||||
@ -3028,12 +3026,18 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
|
||||
write_ptr = MON_BUFF_WRPTR;
|
||||
wrap_cnt = MON_BUFF_CYCLE_CNT;
|
||||
}
|
||||
fw_mon_data->fw_mon_wr_ptr =
|
||||
cpu_to_le32(iwl_read_prph(trans, write_ptr));
|
||||
|
||||
write_ptr_val = iwl_read_prph(trans, write_ptr);
|
||||
fw_mon_data->fw_mon_cycle_cnt =
|
||||
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
|
||||
fw_mon_data->fw_mon_base_ptr =
|
||||
cpu_to_le32(iwl_read_prph(trans, base));
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
fw_mon_data->fw_mon_base_high_ptr =
|
||||
cpu_to_le32(iwl_read_prph(trans, base_high));
|
||||
write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
|
||||
}
|
||||
fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
|
||||
}
|
||||
|
||||
static u32
|
||||
@ -3044,9 +3048,10 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
|
||||
u32 len = 0;
|
||||
|
||||
if ((trans->num_blocks &&
|
||||
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
|
||||
(trans->dbg_dest_tlv && !trans->ini_valid) ||
|
||||
(trans->ini_valid && trans->num_blocks)) {
|
||||
(trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
|
||||
trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210 ||
|
||||
trans->ini_valid)) ||
|
||||
(trans->dbg_dest_tlv && !trans->ini_valid)) {
|
||||
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
|
||||
|
||||
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
|
||||
@ -3165,8 +3170,10 @@ static struct iwl_trans_dump_data
|
||||
len = sizeof(*dump_data);
|
||||
|
||||
/* host commands */
|
||||
len += sizeof(*data) +
|
||||
cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
|
||||
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD))
|
||||
len += sizeof(*data) +
|
||||
cmdq->n_window * (sizeof(*txcmd) +
|
||||
TFD_MAX_PAYLOAD_SIZE);
|
||||
|
||||
/* FW monitor */
|
||||
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
|
||||
@ -3540,6 +3547,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
|
||||
trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
|
||||
trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
|
||||
}
|
||||
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
|
||||
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
|
@ -999,7 +999,8 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
slots_num = max_t(u32, TFD_CMD_SLOTS,
|
||||
trans->cfg->min_txq_size);
|
||||
else
|
||||
slots_num = TFD_TX_CMD_SLOTS;
|
||||
slots_num = max_t(u32, TFD_TX_CMD_SLOTS,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
|
||||
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
@ -1052,7 +1053,8 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
slots_num = max_t(u32, TFD_CMD_SLOTS,
|
||||
trans->cfg->min_txq_size);
|
||||
else
|
||||
slots_num = TFD_TX_CMD_SLOTS;
|
||||
slots_num = max_t(u32, TFD_TX_CMD_SLOTS,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
if (ret) {
|
||||
|
@ -4082,16 +4082,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
|
||||
if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
|
||||
dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
|
||||
kfree(hostcmd);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* process hostcmd response*/
|
||||
skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
kfree(hostcmd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
|
||||
hostcmd->len, hostcmd->cmd);
|
||||
if (err) {
|
||||
kfree(hostcmd);
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
@ -341,6 +341,12 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
|
||||
sleep_cfm_tmp =
|
||||
dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
|
||||
+ MWIFIEX_TYPE_LEN);
|
||||
if (!sleep_cfm_tmp) {
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"SLEEP_CFM: dev_alloc_skb failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm)
|
||||
+ MWIFIEX_TYPE_LEN);
|
||||
put_unaligned_le32(MWIFIEX_USB_TYPE_CMD, sleep_cfm_tmp->data);
|
||||
|
@ -250,7 +250,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
|
||||
local_rx_pd->nf);
|
||||
}
|
||||
} else {
|
||||
if (rx_pkt_type != PKT_TYPE_BAR)
|
||||
if (rx_pkt_type != PKT_TYPE_BAR &&
|
||||
local_rx_pd->priority < MAX_NUM_TID)
|
||||
priv->rx_seq[local_rx_pd->priority] = seq_num;
|
||||
memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
|
||||
ETH_ALEN);
|
||||
|
@ -13,12 +13,11 @@
|
||||
#define QTNF_MAX_MAC 3
|
||||
|
||||
enum qtnf_fw_state {
|
||||
QTNF_FW_STATE_RESET,
|
||||
QTNF_FW_STATE_FW_DNLD_DONE,
|
||||
QTNF_FW_STATE_DETACHED,
|
||||
QTNF_FW_STATE_BOOT_DONE,
|
||||
QTNF_FW_STATE_ACTIVE,
|
||||
QTNF_FW_STATE_DETACHED,
|
||||
QTNF_FW_STATE_EP_DEAD,
|
||||
QTNF_FW_STATE_RUNNING,
|
||||
QTNF_FW_STATE_DEAD,
|
||||
};
|
||||
|
||||
struct qtnf_bus;
|
||||
@ -50,6 +49,7 @@ struct qtnf_bus {
|
||||
struct napi_struct mux_napi;
|
||||
struct net_device mux_dev;
|
||||
struct workqueue_struct *workqueue;
|
||||
struct workqueue_struct *hprio_workqueue;
|
||||
struct work_struct fw_work;
|
||||
struct work_struct event_work;
|
||||
struct mutex bus_lock; /* lock during command/event processing */
|
||||
@ -58,6 +58,23 @@ struct qtnf_bus {
|
||||
char bus_priv[0] __aligned(sizeof(void *));
|
||||
};
|
||||
|
||||
static inline bool qtnf_fw_is_up(struct qtnf_bus *bus)
|
||||
{
|
||||
enum qtnf_fw_state state = bus->fw_state;
|
||||
|
||||
return ((state == QTNF_FW_STATE_ACTIVE) ||
|
||||
(state == QTNF_FW_STATE_RUNNING));
|
||||
}
|
||||
|
||||
static inline bool qtnf_fw_is_attached(struct qtnf_bus *bus)
|
||||
{
|
||||
enum qtnf_fw_state state = bus->fw_state;
|
||||
|
||||
return ((state == QTNF_FW_STATE_ACTIVE) ||
|
||||
(state == QTNF_FW_STATE_RUNNING) ||
|
||||
(state == QTNF_FW_STATE_DEAD));
|
||||
}
|
||||
|
||||
static inline void *get_bus_priv(struct qtnf_bus *bus)
|
||||
{
|
||||
if (WARN(!bus, "qtnfmac: invalid bus pointer"))
|
||||
|
@ -144,6 +144,7 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
|
||||
{
|
||||
struct net_device *netdev = wdev->netdev;
|
||||
struct qtnf_vif *vif;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (WARN_ON(!netdev))
|
||||
return -EFAULT;
|
||||
@ -157,6 +158,11 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
|
||||
if (netif_carrier_ok(netdev))
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
while ((skb = skb_dequeue(&vif->high_pri_tx_queue)))
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
cancel_work_sync(&vif->high_pri_tx_work);
|
||||
|
||||
if (netdev->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdevice(netdev);
|
||||
|
||||
@ -424,13 +430,13 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
*cookie = short_cookie;
|
||||
|
||||
if (params->offchan)
|
||||
flags |= QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN;
|
||||
flags |= QLINK_FRAME_TX_FLAG_OFFCHAN;
|
||||
|
||||
if (params->no_cck)
|
||||
flags |= QLINK_MGMT_FRAME_TX_FLAG_NO_CCK;
|
||||
flags |= QLINK_FRAME_TX_FLAG_NO_CCK;
|
||||
|
||||
if (params->dont_wait_for_ack)
|
||||
flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;
|
||||
flags |= QLINK_FRAME_TX_FLAG_ACK_NOWAIT;
|
||||
|
||||
/* If channel is not specified, pass "freq = 0" to tell device
|
||||
* firmware to use current channel.
|
||||
@ -445,9 +451,8 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
|
||||
params->len, short_cookie, flags);
|
||||
|
||||
return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
|
||||
freq,
|
||||
params->buf, params->len);
|
||||
return qtnf_cmd_send_frame(vif, short_cookie, flags,
|
||||
freq, params->buf, params->len);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -993,53 +998,31 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
|
||||
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
|
||||
struct regulatory_request *req)
|
||||
{
|
||||
struct qtnf_wmac *mac = wiphy_priv(wiphy_in);
|
||||
struct qtnf_bus *bus = mac->bus;
|
||||
struct wiphy *wiphy;
|
||||
unsigned int mac_idx;
|
||||
struct qtnf_wmac *mac = wiphy_priv(wiphy);
|
||||
enum nl80211_band band;
|
||||
int ret;
|
||||
|
||||
pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
|
||||
req->alpha2[0], req->alpha2[1]);
|
||||
|
||||
ret = qtnf_cmd_reg_notify(bus, req);
|
||||
ret = qtnf_cmd_reg_notify(mac, req);
|
||||
if (ret) {
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
pr_warn("reg update not supported\n");
|
||||
} else if (ret == -EALREADY) {
|
||||
pr_info("regulatory domain is already set to %c%c",
|
||||
req->alpha2[0], req->alpha2[1]);
|
||||
} else {
|
||||
pr_err("failed to update reg domain to %c%c\n",
|
||||
req->alpha2[0], req->alpha2[1]);
|
||||
}
|
||||
|
||||
pr_err("MAC%u: failed to update region to %c%c: %d\n",
|
||||
mac->macid, req->alpha2[0], req->alpha2[1], ret);
|
||||
return;
|
||||
}
|
||||
|
||||
for (mac_idx = 0; mac_idx < QTNF_MAX_MAC; ++mac_idx) {
|
||||
if (!(bus->hw_info.mac_bitmap & (1 << mac_idx)))
|
||||
for (band = 0; band < NUM_NL80211_BANDS; ++band) {
|
||||
if (!wiphy->bands[band])
|
||||
continue;
|
||||
|
||||
mac = bus->mac[mac_idx];
|
||||
if (!mac)
|
||||
continue;
|
||||
|
||||
wiphy = priv_to_wiphy(mac);
|
||||
|
||||
for (band = 0; band < NUM_NL80211_BANDS; ++band) {
|
||||
if (!wiphy->bands[band])
|
||||
continue;
|
||||
|
||||
ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
|
||||
if (ret)
|
||||
pr_err("failed to get chan info for mac %u band %u\n",
|
||||
mac_idx, band);
|
||||
}
|
||||
ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
|
||||
if (ret)
|
||||
pr_err("MAC%u: failed to update band %u\n",
|
||||
mac->macid, band);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1095,6 +1078,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
|
||||
struct wiphy *wiphy = priv_to_wiphy(mac);
|
||||
struct qtnf_mac_info *macinfo = &mac->macinfo;
|
||||
int ret;
|
||||
bool regdomain_is_known;
|
||||
|
||||
if (!wiphy) {
|
||||
pr_err("invalid wiphy pointer\n");
|
||||
@ -1127,7 +1111,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
|
||||
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
|
||||
WIPHY_FLAG_AP_UAPSD |
|
||||
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
|
||||
WIPHY_FLAG_4ADDR_STATION;
|
||||
WIPHY_FLAG_4ADDR_STATION |
|
||||
WIPHY_FLAG_NETNS_OK;
|
||||
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
|
||||
|
||||
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
|
||||
@ -1166,11 +1151,19 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
|
||||
wiphy->wowlan = macinfo->wowlan;
|
||||
#endif
|
||||
|
||||
regdomain_is_known = isalpha(mac->rd->alpha2[0]) &&
|
||||
isalpha(mac->rd->alpha2[1]);
|
||||
|
||||
if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
|
||||
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
|
||||
REGULATORY_CUSTOM_REG;
|
||||
wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
|
||||
wiphy_apply_custom_regulatory(wiphy, hw_info->rd);
|
||||
|
||||
if (mac->rd->alpha2[0] == '9' && mac->rd->alpha2[1] == '9') {
|
||||
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
|
||||
REGULATORY_STRICT_REG;
|
||||
wiphy_apply_custom_regulatory(wiphy, mac->rd);
|
||||
} else if (regdomain_is_known) {
|
||||
wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
|
||||
}
|
||||
} else {
|
||||
wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
|
||||
}
|
||||
@ -1193,10 +1186,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
|
||||
goto out;
|
||||
|
||||
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
|
||||
ret = regulatory_set_wiphy_regd(wiphy, hw_info->rd);
|
||||
else if (isalpha(hw_info->rd->alpha2[0]) &&
|
||||
isalpha(hw_info->rd->alpha2[1]))
|
||||
ret = regulatory_hint(wiphy, hw_info->rd->alpha2);
|
||||
ret = regulatory_set_wiphy_regd(wiphy, mac->rd);
|
||||
else if (regdomain_is_known)
|
||||
ret = regulatory_hint(wiphy, mac->rd->alpha2);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
@ -11,6 +11,13 @@
|
||||
#include "bus.h"
|
||||
#include "commands.h"
|
||||
|
||||
#define QTNF_SCAN_TIME_AUTO 0
|
||||
|
||||
/* Let device itself to select best values for current conditions */
|
||||
#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT QTNF_SCAN_TIME_AUTO
|
||||
#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT QTNF_SCAN_TIME_AUTO
|
||||
#define QTNF_SCAN_SAMPLE_DURATION_DEFAULT QTNF_SCAN_TIME_AUTO
|
||||
|
||||
static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp,
|
||||
u16 cmd_id, u8 mac_id, u8 vif_id,
|
||||
size_t resp_size)
|
||||
@ -89,8 +96,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
|
||||
|
||||
pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, cmd_id);
|
||||
|
||||
if (bus->fw_state != QTNF_FW_STATE_ACTIVE &&
|
||||
cmd_id != QLINK_CMD_FW_INIT) {
|
||||
if (!qtnf_fw_is_up(bus) && cmd_id != QLINK_CMD_FW_INIT) {
|
||||
pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
|
||||
mac_id, vif_id, cmd_id, bus->fw_state);
|
||||
dev_kfree_skb(cmd_skb);
|
||||
@ -177,14 +183,6 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
|
||||
memcpy(tlv->ie_data, buf, len);
|
||||
}
|
||||
|
||||
static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl)
|
||||
{
|
||||
size_t size = sizeof(struct qlink_acl_data) +
|
||||
acl->n_acl_entries * sizeof(struct qlink_mac_address);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
|
||||
const struct cfg80211_ap_settings *s)
|
||||
{
|
||||
@ -203,7 +201,7 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
|
||||
|
||||
if (s->acl)
|
||||
len += sizeof(struct qlink_tlv_hdr) +
|
||||
qtnf_cmd_acl_data_size(s->acl);
|
||||
struct_size(s->acl, mac_addrs, s->acl->n_acl_entries);
|
||||
|
||||
if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
|
||||
pr_err("VIF%u.%u: can not fit AP settings: %u\n",
|
||||
@ -310,7 +308,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
|
||||
}
|
||||
|
||||
if (s->acl) {
|
||||
size_t acl_size = qtnf_cmd_acl_data_size(s->acl);
|
||||
size_t acl_size = struct_size(s->acl, mac_addrs,
|
||||
s->acl->n_acl_entries);
|
||||
struct qlink_tlv_hdr *tlv =
|
||||
skb_put(cmd_skb, sizeof(*tlv) + acl_size);
|
||||
|
||||
@ -382,11 +381,11 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
|
||||
u16 freq, const u8 *buf, size_t len)
|
||||
int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
|
||||
u16 freq, const u8 *buf, size_t len)
|
||||
{
|
||||
struct sk_buff *cmd_skb;
|
||||
struct qlink_cmd_mgmt_frame_tx *cmd;
|
||||
struct qlink_cmd_frame_tx *cmd;
|
||||
int ret;
|
||||
|
||||
if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
|
||||
@ -396,14 +395,14 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
|
||||
}
|
||||
|
||||
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
|
||||
QLINK_CMD_SEND_MGMT_FRAME,
|
||||
QLINK_CMD_SEND_FRAME,
|
||||
sizeof(*cmd));
|
||||
if (!cmd_skb)
|
||||
return -ENOMEM;
|
||||
|
||||
qtnf_bus_lock(vif->mac->bus);
|
||||
|
||||
cmd = (struct qlink_cmd_mgmt_frame_tx *)cmd_skb->data;
|
||||
cmd = (struct qlink_cmd_frame_tx *)cmd_skb->data;
|
||||
cmd->cookie = cpu_to_le32(cookie);
|
||||
cmd->freq = cpu_to_le16(freq);
|
||||
cmd->flags = cpu_to_le16(flags);
|
||||
@ -786,8 +785,25 @@ int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
|
||||
int use4addr,
|
||||
u8 *mac_addr)
|
||||
{
|
||||
return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
|
||||
QLINK_CMD_CHANGE_INTF);
|
||||
int ret;
|
||||
|
||||
ret = qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
|
||||
QLINK_CMD_CHANGE_INTF);
|
||||
|
||||
/* Regulatory settings may be different for different interface types */
|
||||
if (ret == 0 && vif->wdev.iftype != iftype) {
|
||||
enum nl80211_band band;
|
||||
struct wiphy *wiphy = priv_to_wiphy(vif->mac);
|
||||
|
||||
for (band = 0; band < NUM_NL80211_BANDS; ++band) {
|
||||
if (!wiphy->bands[band])
|
||||
continue;
|
||||
|
||||
qtnf_cmd_band_info_get(vif->mac, wiphy->bands[band]);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
|
||||
@ -831,55 +847,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 qtnf_cmd_resp_reg_rule_flags_parse(u32 qflags)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_OFDM)
|
||||
flags |= NL80211_RRF_NO_OFDM;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_CCK)
|
||||
flags |= NL80211_RRF_NO_CCK;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_INDOOR)
|
||||
flags |= NL80211_RRF_NO_INDOOR;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_OUTDOOR)
|
||||
flags |= NL80211_RRF_NO_OUTDOOR;
|
||||
|
||||
if (qflags & QLINK_RRF_DFS)
|
||||
flags |= NL80211_RRF_DFS;
|
||||
|
||||
if (qflags & QLINK_RRF_PTP_ONLY)
|
||||
flags |= NL80211_RRF_PTP_ONLY;
|
||||
|
||||
if (qflags & QLINK_RRF_PTMP_ONLY)
|
||||
flags |= NL80211_RRF_PTMP_ONLY;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_IR)
|
||||
flags |= NL80211_RRF_NO_IR;
|
||||
|
||||
if (qflags & QLINK_RRF_AUTO_BW)
|
||||
flags |= NL80211_RRF_AUTO_BW;
|
||||
|
||||
if (qflags & QLINK_RRF_IR_CONCURRENT)
|
||||
flags |= NL80211_RRF_IR_CONCURRENT;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_HT40MINUS)
|
||||
flags |= NL80211_RRF_NO_HT40MINUS;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_HT40PLUS)
|
||||
flags |= NL80211_RRF_NO_HT40PLUS;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_80MHZ)
|
||||
flags |= NL80211_RRF_NO_80MHZ;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_160MHZ)
|
||||
flags |= NL80211_RRF_NO_160MHZ;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static int
|
||||
qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
|
||||
const struct qlink_resp_get_hw_info *resp,
|
||||
@ -887,7 +854,6 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
|
||||
{
|
||||
struct qtnf_hw_info *hwinfo = &bus->hw_info;
|
||||
const struct qlink_tlv_hdr *tlv;
|
||||
const struct qlink_tlv_reg_rule *tlv_rule;
|
||||
const char *bld_name = NULL;
|
||||
const char *bld_rev = NULL;
|
||||
const char *bld_type = NULL;
|
||||
@ -898,19 +864,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
|
||||
const char *calibration_ver = NULL;
|
||||
const char *uboot_ver = NULL;
|
||||
u32 hw_ver = 0;
|
||||
struct ieee80211_reg_rule *rule;
|
||||
u16 tlv_type;
|
||||
u16 tlv_value_len;
|
||||
unsigned int rule_idx = 0;
|
||||
|
||||
if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
|
||||
return -E2BIG;
|
||||
|
||||
hwinfo->rd = kzalloc(struct_size(hwinfo->rd, reg_rules,
|
||||
resp->n_reg_rules), GFP_KERNEL);
|
||||
|
||||
if (!hwinfo->rd)
|
||||
return -ENOMEM;
|
||||
|
||||
hwinfo->num_mac = resp->num_mac;
|
||||
hwinfo->mac_bitmap = resp->mac_bitmap;
|
||||
@ -919,30 +874,11 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
|
||||
hwinfo->total_tx_chain = resp->total_tx_chain;
|
||||
hwinfo->total_rx_chain = resp->total_rx_chain;
|
||||
hwinfo->hw_capab = le32_to_cpu(resp->hw_capab);
|
||||
hwinfo->rd->n_reg_rules = resp->n_reg_rules;
|
||||
hwinfo->rd->alpha2[0] = resp->alpha2[0];
|
||||
hwinfo->rd->alpha2[1] = resp->alpha2[1];
|
||||
|
||||
bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
|
||||
plat_id = le32_to_cpu(resp->plat_id);
|
||||
hw_ver = le32_to_cpu(resp->hw_ver);
|
||||
|
||||
switch (resp->dfs_region) {
|
||||
case QLINK_DFS_FCC:
|
||||
hwinfo->rd->dfs_region = NL80211_DFS_FCC;
|
||||
break;
|
||||
case QLINK_DFS_ETSI:
|
||||
hwinfo->rd->dfs_region = NL80211_DFS_ETSI;
|
||||
break;
|
||||
case QLINK_DFS_JP:
|
||||
hwinfo->rd->dfs_region = NL80211_DFS_JP;
|
||||
break;
|
||||
case QLINK_DFS_UNSET:
|
||||
default:
|
||||
hwinfo->rd->dfs_region = NL80211_DFS_UNSET;
|
||||
break;
|
||||
}
|
||||
|
||||
tlv = (const struct qlink_tlv_hdr *)resp->info;
|
||||
|
||||
while (info_len >= sizeof(*tlv)) {
|
||||
@ -956,37 +892,6 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
|
||||
}
|
||||
|
||||
switch (tlv_type) {
|
||||
case QTN_TLV_ID_REG_RULE:
|
||||
if (rule_idx >= resp->n_reg_rules) {
|
||||
pr_warn("unexpected number of rules: %u\n",
|
||||
resp->n_reg_rules);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) {
|
||||
pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
|
||||
tlv_type, tlv_value_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tlv_rule = (const struct qlink_tlv_reg_rule *)tlv;
|
||||
rule = &hwinfo->rd->reg_rules[rule_idx++];
|
||||
|
||||
rule->freq_range.start_freq_khz =
|
||||
le32_to_cpu(tlv_rule->start_freq_khz);
|
||||
rule->freq_range.end_freq_khz =
|
||||
le32_to_cpu(tlv_rule->end_freq_khz);
|
||||
rule->freq_range.max_bandwidth_khz =
|
||||
le32_to_cpu(tlv_rule->max_bandwidth_khz);
|
||||
rule->power_rule.max_antenna_gain =
|
||||
le32_to_cpu(tlv_rule->max_antenna_gain);
|
||||
rule->power_rule.max_eirp =
|
||||
le32_to_cpu(tlv_rule->max_eirp);
|
||||
rule->dfs_cac_ms =
|
||||
le32_to_cpu(tlv_rule->dfs_cac_ms);
|
||||
rule->flags = qtnf_cmd_resp_reg_rule_flags_parse(
|
||||
le32_to_cpu(tlv_rule->flags));
|
||||
break;
|
||||
case QTN_TLV_ID_BUILD_NAME:
|
||||
bld_name = (const void *)tlv->val;
|
||||
break;
|
||||
@ -1019,17 +924,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
|
||||
tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
|
||||
}
|
||||
|
||||
if (rule_idx != resp->n_reg_rules) {
|
||||
pr_warn("unexpected number of rules: expected %u got %u\n",
|
||||
resp->n_reg_rules, rule_idx);
|
||||
kfree(hwinfo->rd);
|
||||
hwinfo->rd = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u, capab=0x%x\n",
|
||||
pr_info("fw_version=%d, MACs map %#x, chains Tx=%u Rx=%u, capab=0x%x\n",
|
||||
hwinfo->fw_ver, hwinfo->mac_bitmap,
|
||||
hwinfo->rd->alpha2[0], hwinfo->rd->alpha2[1],
|
||||
hwinfo->total_tx_chain, hwinfo->total_rx_chain,
|
||||
hwinfo->hw_capab);
|
||||
|
||||
@ -1042,7 +938,7 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
|
||||
"\nHardware ID: %s" \
|
||||
"\nCalibration version: %s" \
|
||||
"\nU-Boot version: %s" \
|
||||
"\nHardware version: 0x%08x",
|
||||
"\nHardware version: 0x%08x\n",
|
||||
bld_name, bld_rev, bld_type, bld_label,
|
||||
(unsigned long)bld_tmstamp,
|
||||
(unsigned long)plat_id,
|
||||
@ -1085,9 +981,12 @@ qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
|
||||
}
|
||||
}
|
||||
|
||||
static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
|
||||
const u8 *tlv_buf, size_t tlv_buf_size)
|
||||
static int
|
||||
qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
|
||||
const struct qlink_resp_get_mac_info *resp,
|
||||
size_t tlv_buf_size)
|
||||
{
|
||||
const u8 *tlv_buf = resp->var_info;
|
||||
struct ieee80211_iface_combination *comb = NULL;
|
||||
size_t n_comb = 0;
|
||||
struct ieee80211_iface_limit *limits;
|
||||
@ -1105,6 +1004,38 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
|
||||
u8 ext_capa_len = 0;
|
||||
u8 ext_capa_mask_len = 0;
|
||||
int i = 0;
|
||||
struct ieee80211_reg_rule *rule;
|
||||
unsigned int rule_idx = 0;
|
||||
const struct qlink_tlv_reg_rule *tlv_rule;
|
||||
|
||||
if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
|
||||
return -E2BIG;
|
||||
|
||||
mac->rd = kzalloc(sizeof(*mac->rd) +
|
||||
sizeof(struct ieee80211_reg_rule) *
|
||||
resp->n_reg_rules, GFP_KERNEL);
|
||||
if (!mac->rd)
|
||||
return -ENOMEM;
|
||||
|
||||
mac->rd->n_reg_rules = resp->n_reg_rules;
|
||||
mac->rd->alpha2[0] = resp->alpha2[0];
|
||||
mac->rd->alpha2[1] = resp->alpha2[1];
|
||||
|
||||
switch (resp->dfs_region) {
|
||||
case QLINK_DFS_FCC:
|
||||
mac->rd->dfs_region = NL80211_DFS_FCC;
|
||||
break;
|
||||
case QLINK_DFS_ETSI:
|
||||
mac->rd->dfs_region = NL80211_DFS_ETSI;
|
||||
break;
|
||||
case QLINK_DFS_JP:
|
||||
mac->rd->dfs_region = NL80211_DFS_JP;
|
||||
break;
|
||||
case QLINK_DFS_UNSET:
|
||||
default:
|
||||
mac->rd->dfs_region = NL80211_DFS_UNSET;
|
||||
break;
|
||||
}
|
||||
|
||||
tlv = (const struct qlink_tlv_hdr *)tlv_buf;
|
||||
while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
|
||||
@ -1225,6 +1156,23 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
|
||||
mac->macinfo.wowlan = NULL;
|
||||
qtnf_parse_wowlan_info(mac, wowlan);
|
||||
break;
|
||||
case QTN_TLV_ID_REG_RULE:
|
||||
if (rule_idx >= resp->n_reg_rules) {
|
||||
pr_warn("unexpected number of rules: %u\n",
|
||||
resp->n_reg_rules);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) {
|
||||
pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
|
||||
tlv_type, tlv_value_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tlv_rule = (const struct qlink_tlv_reg_rule *)tlv;
|
||||
rule = &mac->rd->reg_rules[rule_idx++];
|
||||
qlink_utils_regrule_q2nl(rule, tlv_rule);
|
||||
break;
|
||||
default:
|
||||
pr_warn("MAC%u: unknown TLV type %u\n",
|
||||
mac->macid, tlv_type);
|
||||
@ -1253,6 +1201,12 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rule_idx != resp->n_reg_rules) {
|
||||
pr_warn("unexpected number of rules: expected %u got %u\n",
|
||||
resp->n_reg_rules, rule_idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ext_capa_len > 0) {
|
||||
ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL);
|
||||
if (!ext_capa)
|
||||
@ -1663,7 +1617,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
|
||||
|
||||
resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
|
||||
qtnf_cmd_resp_proc_mac_info(mac, resp);
|
||||
ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len);
|
||||
ret = qtnf_parse_variable_mac_info(mac, resp, var_data_len);
|
||||
|
||||
out:
|
||||
qtnf_bus_unlock(mac->bus);
|
||||
@ -1709,21 +1663,7 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
|
||||
struct qlink_resp_band_info_get *resp;
|
||||
size_t info_len = 0;
|
||||
int ret = 0;
|
||||
u8 qband;
|
||||
|
||||
switch (band->band) {
|
||||
case NL80211_BAND_2GHZ:
|
||||
qband = QLINK_BAND_2GHZ;
|
||||
break;
|
||||
case NL80211_BAND_5GHZ:
|
||||
qband = QLINK_BAND_5GHZ;
|
||||
break;
|
||||
case NL80211_BAND_60GHZ:
|
||||
qband = QLINK_BAND_60GHZ;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
u8 qband = qlink_utils_band_cfg2q(band->band);
|
||||
|
||||
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
|
||||
QLINK_CMD_BAND_INFO_GET,
|
||||
@ -2107,22 +2047,23 @@ out:
|
||||
static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
|
||||
const struct ieee80211_channel *sc)
|
||||
{
|
||||
struct qlink_tlv_channel *qchan;
|
||||
u32 flags = 0;
|
||||
struct qlink_tlv_channel *tlv;
|
||||
struct qlink_channel *qch;
|
||||
|
||||
qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
|
||||
qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
|
||||
qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr));
|
||||
qchan->chan.center_freq = cpu_to_le16(sc->center_freq);
|
||||
qchan->chan.hw_value = cpu_to_le16(sc->hw_value);
|
||||
tlv = skb_put_zero(cmd_skb, sizeof(*tlv));
|
||||
qch = &tlv->chan;
|
||||
tlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
|
||||
tlv->hdr.len = cpu_to_le16(sizeof(*qch));
|
||||
|
||||
if (sc->flags & IEEE80211_CHAN_NO_IR)
|
||||
flags |= QLINK_CHAN_NO_IR;
|
||||
|
||||
if (sc->flags & IEEE80211_CHAN_RADAR)
|
||||
flags |= QLINK_CHAN_RADAR;
|
||||
|
||||
qchan->chan.flags = cpu_to_le32(flags);
|
||||
qch->center_freq = cpu_to_le16(sc->center_freq);
|
||||
qch->hw_value = cpu_to_le16(sc->hw_value);
|
||||
qch->band = qlink_utils_band_cfg2q(sc->band);
|
||||
qch->max_power = sc->max_power;
|
||||
qch->max_reg_power = sc->max_reg_power;
|
||||
qch->max_antenna_gain = sc->max_antenna_gain;
|
||||
qch->beacon_found = sc->beacon_found;
|
||||
qch->dfs_state = qlink_utils_dfs_state_cfg2q(sc->dfs_state);
|
||||
qch->flags = cpu_to_le32(qlink_utils_chflags_cfg2q(sc->flags));
|
||||
}
|
||||
|
||||
static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
|
||||
@ -2141,6 +2082,35 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
|
||||
memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
|
||||
}
|
||||
|
||||
static void qtnf_cmd_scan_set_dwell(struct qtnf_wmac *mac,
|
||||
struct sk_buff *cmd_skb)
|
||||
{
|
||||
struct cfg80211_scan_request *scan_req = mac->scan_req;
|
||||
u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
|
||||
u16 dwell_passive = QTNF_SCAN_DWELL_PASSIVE_DEFAULT;
|
||||
u16 duration = QTNF_SCAN_SAMPLE_DURATION_DEFAULT;
|
||||
|
||||
if (scan_req->duration) {
|
||||
dwell_active = scan_req->duration;
|
||||
dwell_passive = scan_req->duration;
|
||||
}
|
||||
|
||||
pr_debug("MAC%u: %s scan dwell active=%u, passive=%u, duration=%u\n",
|
||||
mac->macid,
|
||||
scan_req->duration_mandatory ? "mandatory" : "max",
|
||||
dwell_active, dwell_passive, duration);
|
||||
|
||||
qtnf_cmd_skb_put_tlv_u16(cmd_skb,
|
||||
QTN_TLV_ID_SCAN_DWELL_ACTIVE,
|
||||
dwell_active);
|
||||
qtnf_cmd_skb_put_tlv_u16(cmd_skb,
|
||||
QTN_TLV_ID_SCAN_DWELL_PASSIVE,
|
||||
dwell_passive);
|
||||
qtnf_cmd_skb_put_tlv_u16(cmd_skb,
|
||||
QTN_TLV_ID_SCAN_SAMPLE_DURATION,
|
||||
duration);
|
||||
}
|
||||
|
||||
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
|
||||
{
|
||||
struct sk_buff *cmd_skb;
|
||||
@ -2192,6 +2162,8 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
|
||||
}
|
||||
}
|
||||
|
||||
qtnf_cmd_scan_set_dwell(mac, cmd_skb);
|
||||
|
||||
if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
|
||||
pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
|
||||
mac->macid,
|
||||
@ -2207,15 +2179,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
|
||||
qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
|
||||
}
|
||||
|
||||
if (scan_req->duration) {
|
||||
pr_debug("MAC%u: %s scan duration %u\n", mac->macid,
|
||||
scan_req->duration_mandatory ? "mandatory" : "max",
|
||||
scan_req->duration);
|
||||
|
||||
qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL,
|
||||
scan_req->duration);
|
||||
}
|
||||
|
||||
ret = qtnf_cmd_send(mac->bus, cmd_skb);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -2404,13 +2367,17 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
|
||||
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req)
|
||||
{
|
||||
struct wiphy *wiphy = priv_to_wiphy(mac);
|
||||
struct qtnf_bus *bus = mac->bus;
|
||||
struct sk_buff *cmd_skb;
|
||||
int ret;
|
||||
struct qlink_cmd_reg_notify *cmd;
|
||||
enum nl80211_band band;
|
||||
const struct ieee80211_supported_band *cfg_band;
|
||||
|
||||
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
|
||||
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
|
||||
QLINK_CMD_REG_NOTIFY,
|
||||
sizeof(*cmd));
|
||||
if (!cmd_skb)
|
||||
@ -2447,12 +2414,40 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
|
||||
break;
|
||||
}
|
||||
|
||||
switch (req->dfs_region) {
|
||||
case NL80211_DFS_FCC:
|
||||
cmd->dfs_region = QLINK_DFS_FCC;
|
||||
break;
|
||||
case NL80211_DFS_ETSI:
|
||||
cmd->dfs_region = QLINK_DFS_ETSI;
|
||||
break;
|
||||
case NL80211_DFS_JP:
|
||||
cmd->dfs_region = QLINK_DFS_JP;
|
||||
break;
|
||||
default:
|
||||
cmd->dfs_region = QLINK_DFS_UNSET;
|
||||
break;
|
||||
}
|
||||
|
||||
cmd->num_channels = 0;
|
||||
|
||||
for (band = 0; band < NUM_NL80211_BANDS; band++) {
|
||||
unsigned int i;
|
||||
|
||||
cfg_band = wiphy->bands[band];
|
||||
if (!cfg_band)
|
||||
continue;
|
||||
|
||||
cmd->num_channels += cfg_band->n_channels;
|
||||
|
||||
for (i = 0; i < cfg_band->n_channels; ++i) {
|
||||
qtnf_cmd_channel_tlv_add(cmd_skb,
|
||||
&cfg_band->channels[i]);
|
||||
}
|
||||
}
|
||||
|
||||
qtnf_bus_lock(bus);
|
||||
ret = qtnf_cmd_send(bus, cmd_skb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
qtnf_bus_unlock(bus);
|
||||
|
||||
return ret;
|
||||
@ -2592,7 +2587,7 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
|
||||
struct qtnf_bus *bus = vif->mac->bus;
|
||||
struct sk_buff *cmd_skb;
|
||||
struct qlink_tlv_hdr *tlv;
|
||||
size_t acl_size = qtnf_cmd_acl_data_size(params);
|
||||
size_t acl_size = struct_size(params, mac_addrs, params->n_acl_entries);
|
||||
int ret;
|
||||
|
||||
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
|
||||
|
@ -27,8 +27,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
|
||||
const struct cfg80211_ap_settings *s);
|
||||
int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif);
|
||||
int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg);
|
||||
int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
|
||||
u16 freq, const u8 *buf, size_t len);
|
||||
int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
|
||||
u16 freq, const u8 *buf, size_t len);
|
||||
int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
|
||||
const u8 *buf, size_t len);
|
||||
int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
|
||||
@ -57,7 +57,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif,
|
||||
u16 reason_code);
|
||||
int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
|
||||
bool up);
|
||||
int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req);
|
||||
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req);
|
||||
int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
|
||||
struct qtnf_chan_stats *stats);
|
||||
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
|
||||
|
@ -368,6 +368,23 @@ static void qtnf_mac_scan_timeout(struct work_struct *work)
|
||||
qtnf_mac_scan_finish(mac, true);
|
||||
}
|
||||
|
||||
static void qtnf_vif_send_data_high_pri(struct work_struct *work)
|
||||
{
|
||||
struct qtnf_vif *vif =
|
||||
container_of(work, struct qtnf_vif, high_pri_tx_work);
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!vif->netdev ||
|
||||
vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)
|
||||
return;
|
||||
|
||||
while ((skb = skb_dequeue(&vif->high_pri_tx_queue))) {
|
||||
qtnf_cmd_send_frame(vif, 0, QLINK_FRAME_TX_FLAG_8023,
|
||||
0, skb->data, skb->len);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
|
||||
unsigned int macid)
|
||||
{
|
||||
@ -395,7 +412,8 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
|
||||
vif->mac = mac;
|
||||
vif->vifid = i;
|
||||
qtnf_sta_list_init(&vif->sta_list);
|
||||
|
||||
INIT_WORK(&vif->high_pri_tx_work, qtnf_vif_send_data_high_pri);
|
||||
skb_queue_head_init(&vif->high_pri_tx_queue);
|
||||
vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!vif->stats64)
|
||||
pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
|
||||
@ -499,6 +517,8 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
|
||||
qtnf_mac_iface_comb_free(mac);
|
||||
qtnf_mac_ext_caps_free(mac);
|
||||
kfree(mac->macinfo.wowlan);
|
||||
kfree(mac->rd);
|
||||
mac->rd = NULL;
|
||||
wiphy_free(wiphy);
|
||||
bus->mac[macid] = NULL;
|
||||
}
|
||||
@ -587,8 +607,6 @@ int qtnf_core_attach(struct qtnf_bus *bus)
|
||||
int ret;
|
||||
|
||||
qtnf_trans_init(bus);
|
||||
|
||||
bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
|
||||
qtnf_bus_data_rx_start(bus);
|
||||
|
||||
bus->workqueue = alloc_ordered_workqueue("QTNF_BUS", 0);
|
||||
@ -598,6 +616,13 @@ int qtnf_core_attach(struct qtnf_bus *bus)
|
||||
goto error;
|
||||
}
|
||||
|
||||
bus->hprio_workqueue = alloc_workqueue("QTNF_HPRI", WQ_HIGHPRI, 0);
|
||||
if (!bus->hprio_workqueue) {
|
||||
pr_err("failed to alloc high prio workqueue\n");
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
INIT_WORK(&bus->event_work, qtnf_event_work_handler);
|
||||
|
||||
ret = qtnf_cmd_send_init_fw(bus);
|
||||
@ -607,7 +632,6 @@ int qtnf_core_attach(struct qtnf_bus *bus)
|
||||
}
|
||||
|
||||
bus->fw_state = QTNF_FW_STATE_ACTIVE;
|
||||
|
||||
ret = qtnf_cmd_get_hw_info(bus);
|
||||
if (ret) {
|
||||
pr_err("failed to get HW info: %d\n", ret);
|
||||
@ -637,11 +661,11 @@ int qtnf_core_attach(struct qtnf_bus *bus)
|
||||
}
|
||||
}
|
||||
|
||||
bus->fw_state = QTNF_FW_STATE_RUNNING;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
qtnf_core_detach(bus);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qtnf_core_attach);
|
||||
@ -655,7 +679,7 @@ void qtnf_core_detach(struct qtnf_bus *bus)
|
||||
for (macid = 0; macid < QTNF_MAX_MAC; macid++)
|
||||
qtnf_core_mac_detach(bus, macid);
|
||||
|
||||
if (bus->fw_state == QTNF_FW_STATE_ACTIVE)
|
||||
if (qtnf_fw_is_up(bus))
|
||||
qtnf_cmd_send_deinit_fw(bus);
|
||||
|
||||
bus->fw_state = QTNF_FW_STATE_DETACHED;
|
||||
@ -663,10 +687,14 @@ void qtnf_core_detach(struct qtnf_bus *bus)
|
||||
if (bus->workqueue) {
|
||||
flush_workqueue(bus->workqueue);
|
||||
destroy_workqueue(bus->workqueue);
|
||||
bus->workqueue = NULL;
|
||||
}
|
||||
|
||||
kfree(bus->hw_info.rd);
|
||||
bus->hw_info.rd = NULL;
|
||||
if (bus->hprio_workqueue) {
|
||||
flush_workqueue(bus->hprio_workqueue);
|
||||
destroy_workqueue(bus->hprio_workqueue);
|
||||
bus->hprio_workqueue = NULL;
|
||||
}
|
||||
|
||||
qtnf_trans_free(bus);
|
||||
}
|
||||
@ -684,6 +712,9 @@ struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
|
||||
struct qtnf_wmac *mac;
|
||||
struct qtnf_vif *vif;
|
||||
|
||||
if (unlikely(bus->fw_state != QTNF_FW_STATE_RUNNING))
|
||||
return NULL;
|
||||
|
||||
meta = (struct qtnf_frame_meta_info *)
|
||||
(skb_tail_pointer(skb) - sizeof(*meta));
|
||||
|
||||
@ -799,6 +830,15 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
|
||||
|
||||
void qtnf_packet_send_hi_pri(struct sk_buff *skb)
|
||||
{
|
||||
struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
|
||||
|
||||
skb_queue_tail(&vif->high_pri_tx_queue, skb);
|
||||
queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri);
|
||||
|
||||
MODULE_AUTHOR("Quantenna Communications");
|
||||
MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -63,6 +63,8 @@ struct qtnf_vif {
|
||||
struct qtnf_wmac *mac;
|
||||
|
||||
struct work_struct reset_work;
|
||||
struct work_struct high_pri_tx_work;
|
||||
struct sk_buff_head high_pri_tx_queue;
|
||||
struct qtnf_sta_list sta_list;
|
||||
unsigned long cons_tx_timeout_cnt;
|
||||
int generation;
|
||||
@ -112,6 +114,7 @@ struct qtnf_wmac {
|
||||
struct cfg80211_scan_request *scan_req;
|
||||
struct mutex mac_lock; /* lock during wmac speicific ops */
|
||||
struct delayed_work scan_timeout;
|
||||
struct ieee80211_regdomain *rd;
|
||||
};
|
||||
|
||||
struct qtnf_hw_info {
|
||||
@ -120,7 +123,6 @@ struct qtnf_hw_info {
|
||||
u8 mac_bitmap;
|
||||
u32 fw_ver;
|
||||
u32 hw_capab;
|
||||
struct ieee80211_regdomain *rd;
|
||||
u8 total_tx_chain;
|
||||
u8 total_rx_chain;
|
||||
char fw_version[ETHTOOL_FWVERS_LEN];
|
||||
@ -149,6 +151,7 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev);
|
||||
|
||||
void qtnf_netdev_updown(struct net_device *ndev, bool up);
|
||||
void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
|
||||
void qtnf_packet_send_hi_pri(struct sk_buff *skb);
|
||||
|
||||
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
|
||||
{
|
||||
|
@ -56,7 +56,7 @@ int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
|
||||
|
||||
if (ret == -ETIMEDOUT) {
|
||||
pr_err("EP firmware is dead\n");
|
||||
bus->fw_state = QTNF_FW_STATE_EP_DEAD;
|
||||
bus->fw_state = QTNF_FW_STATE_DEAD;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -128,32 +128,22 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success)
|
||||
int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
|
||||
{
|
||||
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
|
||||
struct pci_dev *pdev = priv->pdev;
|
||||
int ret;
|
||||
|
||||
if (boot_success) {
|
||||
bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
|
||||
|
||||
ret = qtnf_core_attach(bus);
|
||||
if (ret) {
|
||||
pr_err("failed to attach core\n");
|
||||
boot_success = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (boot_success) {
|
||||
bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
|
||||
ret = qtnf_core_attach(bus);
|
||||
if (ret) {
|
||||
pr_err("failed to attach core\n");
|
||||
} else {
|
||||
qtnf_debugfs_init(bus, DRV_NAME);
|
||||
qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
|
||||
qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
|
||||
qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
|
||||
} else {
|
||||
bus->fw_state = QTNF_FW_STATE_DETACHED;
|
||||
}
|
||||
|
||||
put_device(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qtnf_tune_pcie_mps(struct pci_dev *pdev)
|
||||
@ -344,7 +334,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pcie_priv = get_bus_priv(bus);
|
||||
pci_set_drvdata(pdev, bus);
|
||||
bus->dev = &pdev->dev;
|
||||
bus->fw_state = QTNF_FW_STATE_RESET;
|
||||
bus->fw_state = QTNF_FW_STATE_DETACHED;
|
||||
pcie_priv->pdev = pdev;
|
||||
pcie_priv->tx_stopped = 0;
|
||||
pcie_priv->rx_bd_num = rx_bd_size_param;
|
||||
@ -364,6 +354,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pcie_priv->pcie_irq_count = 0;
|
||||
pcie_priv->tx_reclaim_done = 0;
|
||||
pcie_priv->tx_reclaim_req = 0;
|
||||
pcie_priv->tx_eapol = 0;
|
||||
|
||||
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
|
||||
if (!pcie_priv->workqueue) {
|
||||
@ -419,8 +410,7 @@ static void qtnf_pcie_remove(struct pci_dev *dev)
|
||||
|
||||
cancel_work_sync(&bus->fw_work);
|
||||
|
||||
if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
|
||||
bus->fw_state == QTNF_FW_STATE_EP_DEAD)
|
||||
if (qtnf_fw_is_attached(bus))
|
||||
qtnf_core_detach(bus);
|
||||
|
||||
netif_napi_del(&bus->mux_napi);
|
||||
|
@ -62,6 +62,7 @@ struct qtnf_pcie_bus_priv {
|
||||
u32 tx_done_count;
|
||||
u32 tx_reclaim_done;
|
||||
u32 tx_reclaim_req;
|
||||
u32 tx_eapol;
|
||||
|
||||
u8 msi_enabled;
|
||||
u8 tx_stopped;
|
||||
@ -70,7 +71,7 @@ struct qtnf_pcie_bus_priv {
|
||||
|
||||
int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb);
|
||||
int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv);
|
||||
void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success);
|
||||
int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus);
|
||||
void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
|
||||
struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
|
||||
struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
|
||||
|
@ -980,12 +980,11 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
|
||||
struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
|
||||
u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
|
||||
const char *fwname = QTN_PCI_PEARL_FW_NAME;
|
||||
struct pci_dev *pdev = ps->base.pdev;
|
||||
const struct firmware *fw;
|
||||
int ret;
|
||||
u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
|
||||
const char *fwname = QTN_PCI_PEARL_FW_NAME;
|
||||
bool fw_boot_success = false;
|
||||
|
||||
if (ps->base.flashboot) {
|
||||
state |= QTN_RC_FW_FLASHBOOT;
|
||||
@ -1031,23 +1030,23 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
|
||||
goto fw_load_exit;
|
||||
}
|
||||
|
||||
pr_info("firmware is up and running\n");
|
||||
|
||||
if (qtnf_poll_state(&ps->bda->bda_ep_state,
|
||||
QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
|
||||
pr_err("firmware runtime failure\n");
|
||||
goto fw_load_exit;
|
||||
}
|
||||
|
||||
fw_boot_success = true;
|
||||
pr_info("firmware is up and running\n");
|
||||
|
||||
ret = qtnf_pcie_fw_boot_done(bus);
|
||||
if (ret)
|
||||
goto fw_load_exit;
|
||||
|
||||
qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
|
||||
qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
|
||||
|
||||
fw_load_exit:
|
||||
qtnf_pcie_fw_boot_done(bus, fw_boot_success);
|
||||
|
||||
if (fw_boot_success) {
|
||||
qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
|
||||
qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
|
||||
}
|
||||
put_device(&pdev->dev);
|
||||
}
|
||||
|
||||
static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
|
||||
|
@ -498,6 +498,13 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
|
||||
int len;
|
||||
int i;
|
||||
|
||||
if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
|
||||
qtnf_packet_send_hi_pri(skb);
|
||||
qtnf_update_tx_stats(skb->dev, skb);
|
||||
priv->tx_eapol++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
|
||||
if (!qtnf_tx_queue_ready(ts)) {
|
||||
@ -761,6 +768,7 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
|
||||
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
|
||||
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
|
||||
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
|
||||
seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
|
||||
|
||||
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
|
||||
seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
|
||||
@ -1023,8 +1031,9 @@ static void qtnf_topaz_fw_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
|
||||
struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
|
||||
int ret;
|
||||
int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
|
||||
struct pci_dev *pdev = ts->base.pdev;
|
||||
int ret;
|
||||
|
||||
qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
|
||||
|
||||
@ -1073,19 +1082,23 @@ static void qtnf_topaz_fw_work_handler(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
ret = qtnf_post_init_ep(ts);
|
||||
if (ret) {
|
||||
pr_err("FW runtime failure\n");
|
||||
goto fw_load_exit;
|
||||
}
|
||||
|
||||
pr_info("firmware is up and running\n");
|
||||
|
||||
ret = qtnf_post_init_ep(ts);
|
||||
ret = qtnf_pcie_fw_boot_done(bus);
|
||||
if (ret)
|
||||
pr_err("FW runtime failure\n");
|
||||
goto fw_load_exit;
|
||||
|
||||
qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
|
||||
qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
|
||||
|
||||
fw_load_exit:
|
||||
qtnf_pcie_fw_boot_done(bus, ret ? false : true);
|
||||
|
||||
if (ret == 0) {
|
||||
qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
|
||||
qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
|
||||
}
|
||||
put_device(&pdev->dev);
|
||||
}
|
||||
|
||||
static void qtnf_reclaim_tasklet_fn(unsigned long data)
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include <linux/ieee80211.h>
|
||||
|
||||
#define QLINK_PROTO_VER 13
|
||||
#define QLINK_PROTO_VER 15
|
||||
|
||||
#define QLINK_MACID_RSVD 0xFF
|
||||
#define QLINK_VIFID_RSVD 0xFF
|
||||
@ -206,6 +206,8 @@ struct qlink_sta_info_state {
|
||||
* execution status (one of &enum qlink_cmd_result). Reply message
|
||||
* may also contain data payload specific to the command type.
|
||||
*
|
||||
* @QLINK_CMD_SEND_FRAME: send specified frame over the air; firmware will
|
||||
* encapsulate 802.3 packet into 802.11 frame automatically.
|
||||
* @QLINK_CMD_BAND_INFO_GET: for the specified MAC and specified band, get
|
||||
* the band's description including number of operational channels and
|
||||
* info on each channel, HT/VHT capabilities, supported rates etc.
|
||||
@ -220,7 +222,7 @@ enum qlink_cmd_type {
|
||||
QLINK_CMD_FW_INIT = 0x0001,
|
||||
QLINK_CMD_FW_DEINIT = 0x0002,
|
||||
QLINK_CMD_REGISTER_MGMT = 0x0003,
|
||||
QLINK_CMD_SEND_MGMT_FRAME = 0x0004,
|
||||
QLINK_CMD_SEND_FRAME = 0x0004,
|
||||
QLINK_CMD_MGMT_SET_APPIE = 0x0005,
|
||||
QLINK_CMD_PHY_PARAMS_GET = 0x0011,
|
||||
QLINK_CMD_PHY_PARAMS_SET = 0x0012,
|
||||
@ -321,22 +323,26 @@ struct qlink_cmd_mgmt_frame_register {
|
||||
u8 do_register;
|
||||
} __packed;
|
||||
|
||||
enum qlink_mgmt_frame_tx_flags {
|
||||
QLINK_MGMT_FRAME_TX_FLAG_NONE = 0,
|
||||
QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN = BIT(0),
|
||||
QLINK_MGMT_FRAME_TX_FLAG_NO_CCK = BIT(1),
|
||||
QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2),
|
||||
/**
|
||||
* @QLINK_FRAME_TX_FLAG_8023: frame has a 802.3 header; if not set, frame
|
||||
* is a 802.11 encapsulated.
|
||||
*/
|
||||
enum qlink_frame_tx_flags {
|
||||
QLINK_FRAME_TX_FLAG_OFFCHAN = BIT(0),
|
||||
QLINK_FRAME_TX_FLAG_NO_CCK = BIT(1),
|
||||
QLINK_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2),
|
||||
QLINK_FRAME_TX_FLAG_8023 = BIT(3),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qlink_cmd_mgmt_frame_tx - data for QLINK_CMD_SEND_MGMT_FRAME command
|
||||
* struct qlink_cmd_frame_tx - data for QLINK_CMD_SEND_FRAME command
|
||||
*
|
||||
* @cookie: opaque request identifier.
|
||||
* @freq: Frequency to use for frame transmission.
|
||||
* @flags: Transmission flags, one of &enum qlink_mgmt_frame_tx_flags.
|
||||
* @flags: Transmission flags, one of &enum qlink_frame_tx_flags.
|
||||
* @frame_data: frame to transmit.
|
||||
*/
|
||||
struct qlink_cmd_mgmt_frame_tx {
|
||||
struct qlink_cmd_frame_tx {
|
||||
struct qlink_cmd chdr;
|
||||
__le32 cookie;
|
||||
__le16 freq;
|
||||
@ -580,12 +586,20 @@ enum qlink_user_reg_hint_type {
|
||||
* @initiator: which entity sent the request, one of &enum qlink_reg_initiator.
|
||||
* @user_reg_hint_type: type of hint for QLINK_REGDOM_SET_BY_USER request, one
|
||||
* of &enum qlink_user_reg_hint_type.
|
||||
* @num_channels: number of &struct qlink_tlv_channel in a variable portion of a
|
||||
* payload.
|
||||
* @dfs_region: one of &enum qlink_dfs_regions.
|
||||
* @info: variable portion of regulatory notifier callback.
|
||||
*/
|
||||
struct qlink_cmd_reg_notify {
|
||||
struct qlink_cmd chdr;
|
||||
u8 alpha2[2];
|
||||
u8 initiator;
|
||||
u8 user_reg_hint_type;
|
||||
u8 num_channels;
|
||||
u8 dfs_region;
|
||||
u8 rsvd[2];
|
||||
u8 info[0];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
@ -764,6 +778,18 @@ struct qlink_resp {
|
||||
u8 vifid;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* enum qlink_dfs_regions - regulatory DFS regions
|
||||
*
|
||||
* Corresponds to &enum nl80211_dfs_regions.
|
||||
*/
|
||||
enum qlink_dfs_regions {
|
||||
QLINK_DFS_UNSET = 0,
|
||||
QLINK_DFS_FCC = 1,
|
||||
QLINK_DFS_ETSI = 2,
|
||||
QLINK_DFS_JP = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qlink_resp_get_mac_info - response for QLINK_CMD_MAC_INFO command
|
||||
*
|
||||
@ -779,6 +805,10 @@ struct qlink_resp {
|
||||
* @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band.
|
||||
* @max_ap_assoc_sta: Maximum number of associations supported by WMAC.
|
||||
* @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar.
|
||||
* @alpha2: country code ID firmware is configured to.
|
||||
* @n_reg_rules: number of regulatory rules TLVs in variable portion of the
|
||||
* message.
|
||||
* @dfs_region: regulatory DFS region, one of &enum qlink_dfs_regions.
|
||||
* @var_info: variable-length WMAC info data.
|
||||
*/
|
||||
struct qlink_resp_get_mac_info {
|
||||
@ -792,22 +822,13 @@ struct qlink_resp_get_mac_info {
|
||||
__le16 radar_detect_widths;
|
||||
__le32 max_acl_mac_addrs;
|
||||
u8 bands_cap;
|
||||
u8 alpha2[2];
|
||||
u8 n_reg_rules;
|
||||
u8 dfs_region;
|
||||
u8 rsvd[1];
|
||||
u8 var_info[0];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* enum qlink_dfs_regions - regulatory DFS regions
|
||||
*
|
||||
* Corresponds to &enum nl80211_dfs_regions.
|
||||
*/
|
||||
enum qlink_dfs_regions {
|
||||
QLINK_DFS_UNSET = 0,
|
||||
QLINK_DFS_FCC = 1,
|
||||
QLINK_DFS_ETSI = 2,
|
||||
QLINK_DFS_JP = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qlink_resp_get_hw_info - response for QLINK_CMD_GET_HW_INFO command
|
||||
*
|
||||
@ -820,11 +841,7 @@ enum qlink_dfs_regions {
|
||||
* @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware.
|
||||
* @total_tx_chains: total number of transmit chains used by device.
|
||||
* @total_rx_chains: total number of receive chains.
|
||||
* @alpha2: country code ID firmware is configured to.
|
||||
* @n_reg_rules: number of regulatory rules TLVs in variable portion of the
|
||||
* message.
|
||||
* @dfs_region: regulatory DFS region, one of @enum qlink_dfs_region.
|
||||
* @info: variable-length HW info, can contain QTN_TLV_ID_REG_RULE.
|
||||
* @info: variable-length HW info.
|
||||
*/
|
||||
struct qlink_resp_get_hw_info {
|
||||
struct qlink_resp rhdr;
|
||||
@ -838,9 +855,6 @@ struct qlink_resp_get_hw_info {
|
||||
u8 mac_bitmap;
|
||||
u8 total_tx_chain;
|
||||
u8 total_rx_chain;
|
||||
u8 alpha2[2];
|
||||
u8 n_reg_rules;
|
||||
u8 dfs_region;
|
||||
u8 info[0];
|
||||
} __packed;
|
||||
|
||||
@ -1148,6 +1162,13 @@ struct qlink_event_external_auth {
|
||||
* carried by QTN_TLV_ID_STA_STATS_MAP.
|
||||
* @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
|
||||
* for in any given scan.
|
||||
* @QTN_TLV_ID_SCAN_DWELL_ACTIVE: time spent on a single channel for an active
|
||||
* scan.
|
||||
* @QTN_TLV_ID_SCAN_DWELL_PASSIVE: time spent on a single channel for a passive
|
||||
* scan.
|
||||
* @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel
|
||||
* during a scan including off-channel dwell time and operating channel
|
||||
* time.
|
||||
*/
|
||||
enum qlink_tlv_id {
|
||||
QTN_TLV_ID_FRAG_THRESH = 0x0201,
|
||||
@ -1180,7 +1201,9 @@ enum qlink_tlv_id {
|
||||
QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
|
||||
QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
|
||||
QTN_TLV_ID_SCAN_FLUSH = 0x0412,
|
||||
QTN_TLV_ID_SCAN_DWELL = 0x0413,
|
||||
QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413,
|
||||
QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416,
|
||||
QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417,
|
||||
};
|
||||
|
||||
struct qlink_tlv_hdr {
|
||||
|
@ -182,3 +182,120 @@ void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
|
||||
memcpy(qacl->mac_addrs, acl->mac_addrs,
|
||||
acl->n_acl_entries * sizeof(*qacl->mac_addrs));
|
||||
}
|
||||
|
||||
enum qlink_band qlink_utils_band_cfg2q(enum nl80211_band band)
|
||||
{
|
||||
switch (band) {
|
||||
case NL80211_BAND_2GHZ:
|
||||
return QLINK_BAND_2GHZ;
|
||||
case NL80211_BAND_5GHZ:
|
||||
return QLINK_BAND_5GHZ;
|
||||
case NL80211_BAND_60GHZ:
|
||||
return QLINK_BAND_60GHZ;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
enum qlink_dfs_state qlink_utils_dfs_state_cfg2q(enum nl80211_dfs_state state)
|
||||
{
|
||||
switch (state) {
|
||||
case NL80211_DFS_USABLE:
|
||||
return QLINK_DFS_USABLE;
|
||||
case NL80211_DFS_AVAILABLE:
|
||||
return QLINK_DFS_AVAILABLE;
|
||||
case NL80211_DFS_UNAVAILABLE:
|
||||
default:
|
||||
return QLINK_DFS_UNAVAILABLE;
|
||||
}
|
||||
}
|
||||
|
||||
u32 qlink_utils_chflags_cfg2q(u32 cfgflags)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (cfgflags & IEEE80211_CHAN_DISABLED)
|
||||
flags |= QLINK_CHAN_DISABLED;
|
||||
|
||||
if (cfgflags & IEEE80211_CHAN_NO_IR)
|
||||
flags |= QLINK_CHAN_NO_IR;
|
||||
|
||||
if (cfgflags & IEEE80211_CHAN_RADAR)
|
||||
flags |= QLINK_CHAN_RADAR;
|
||||
|
||||
if (cfgflags & IEEE80211_CHAN_NO_HT40PLUS)
|
||||
flags |= QLINK_CHAN_NO_HT40PLUS;
|
||||
|
||||
if (cfgflags & IEEE80211_CHAN_NO_HT40MINUS)
|
||||
flags |= QLINK_CHAN_NO_HT40MINUS;
|
||||
|
||||
if (cfgflags & IEEE80211_CHAN_NO_80MHZ)
|
||||
flags |= QLINK_CHAN_NO_80MHZ;
|
||||
|
||||
if (cfgflags & IEEE80211_CHAN_NO_160MHZ)
|
||||
flags |= QLINK_CHAN_NO_160MHZ;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static u32 qtnf_reg_rule_flags_parse(u32 qflags)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_OFDM)
|
||||
flags |= NL80211_RRF_NO_OFDM;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_CCK)
|
||||
flags |= NL80211_RRF_NO_CCK;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_INDOOR)
|
||||
flags |= NL80211_RRF_NO_INDOOR;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_OUTDOOR)
|
||||
flags |= NL80211_RRF_NO_OUTDOOR;
|
||||
|
||||
if (qflags & QLINK_RRF_DFS)
|
||||
flags |= NL80211_RRF_DFS;
|
||||
|
||||
if (qflags & QLINK_RRF_PTP_ONLY)
|
||||
flags |= NL80211_RRF_PTP_ONLY;
|
||||
|
||||
if (qflags & QLINK_RRF_PTMP_ONLY)
|
||||
flags |= NL80211_RRF_PTMP_ONLY;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_IR)
|
||||
flags |= NL80211_RRF_NO_IR;
|
||||
|
||||
if (qflags & QLINK_RRF_AUTO_BW)
|
||||
flags |= NL80211_RRF_AUTO_BW;
|
||||
|
||||
if (qflags & QLINK_RRF_IR_CONCURRENT)
|
||||
flags |= NL80211_RRF_IR_CONCURRENT;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_HT40MINUS)
|
||||
flags |= NL80211_RRF_NO_HT40MINUS;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_HT40PLUS)
|
||||
flags |= NL80211_RRF_NO_HT40PLUS;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_80MHZ)
|
||||
flags |= NL80211_RRF_NO_80MHZ;
|
||||
|
||||
if (qflags & QLINK_RRF_NO_160MHZ)
|
||||
flags |= NL80211_RRF_NO_160MHZ;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
|
||||
const struct qlink_tlv_reg_rule *tlv)
|
||||
{
|
||||
rule->freq_range.start_freq_khz = le32_to_cpu(tlv->start_freq_khz);
|
||||
rule->freq_range.end_freq_khz = le32_to_cpu(tlv->end_freq_khz);
|
||||
rule->freq_range.max_bandwidth_khz =
|
||||
le32_to_cpu(tlv->max_bandwidth_khz);
|
||||
rule->power_rule.max_antenna_gain = le32_to_cpu(tlv->max_antenna_gain);
|
||||
rule->power_rule.max_eirp = le32_to_cpu(tlv->max_eirp);
|
||||
rule->dfs_cac_ms = le32_to_cpu(tlv->dfs_cac_ms);
|
||||
rule->flags = qtnf_reg_rule_flags_parse(le32_to_cpu(tlv->flags));
|
||||
}
|
||||
|
@ -79,5 +79,10 @@ bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
|
||||
unsigned int arr_max_len);
|
||||
void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
|
||||
struct qlink_acl_data *qacl);
|
||||
enum qlink_band qlink_utils_band_cfg2q(enum nl80211_band band);
|
||||
enum qlink_dfs_state qlink_utils_dfs_state_cfg2q(enum nl80211_dfs_state state);
|
||||
u32 qlink_utils_chflags_cfg2q(u32 cfgflags);
|
||||
void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
|
||||
const struct qlink_tlv_reg_rule *tlv_rule);
|
||||
|
||||
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
|
||||
|
@ -448,6 +448,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
|
||||
/* <2> work queue */
|
||||
rtlpriv->works.hw = hw;
|
||||
rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
|
||||
if (unlikely(!rtlpriv->works.rtl_wq)) {
|
||||
pr_err("Failed to allocate work queue\n");
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
|
||||
(void *)rtl_watchdog_wq_callback);
|
||||
INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
|
||||
|
@ -499,16 +499,16 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
|
||||
|
||||
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
|
||||
|
||||
spin_lock_bh(&rtlpriv->locks.waitq_lock);
|
||||
spin_lock(&rtlpriv->locks.waitq_lock);
|
||||
if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
|
||||
(ring->entries - skb_queue_len(&ring->queue) >
|
||||
rtlhal->max_earlymode_num)) {
|
||||
skb = skb_dequeue(&mac->skb_waitq[tid]);
|
||||
} else {
|
||||
spin_unlock_bh(&rtlpriv->locks.waitq_lock);
|
||||
spin_unlock(&rtlpriv->locks.waitq_lock);
|
||||
break;
|
||||
}
|
||||
spin_unlock_bh(&rtlpriv->locks.waitq_lock);
|
||||
spin_unlock(&rtlpriv->locks.waitq_lock);
|
||||
|
||||
/* Some macaddr can't do early mode. like
|
||||
* multicast/broadcast/no_qos data
|
||||
|
@ -600,6 +600,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
|
||||
u1rsvdpageloc, 3);
|
||||
|
||||
skb = dev_alloc_skb(totalpacketlen);
|
||||
if (!skb)
|
||||
return;
|
||||
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
|
||||
|
||||
rtstatus = rtl_cmd_send_packet(hw, skb);
|
||||
|
@ -372,8 +372,9 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rx_fwinfo_88e *p_drvinfo;
|
||||
struct ieee80211_hdr *hdr;
|
||||
|
||||
u8 wake_match;
|
||||
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
|
||||
|
||||
status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
|
||||
if (status->packet_report_type == TX_REPORT2)
|
||||
status->length = (u16)GET_RX_RPT2_DESC_PKT_LEN(pdesc);
|
||||
@ -399,18 +400,18 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
|
||||
status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate);
|
||||
|
||||
status->macid = GET_RX_DESC_MACID(pdesc);
|
||||
if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
|
||||
status->wake_match = BIT(2);
|
||||
if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
|
||||
wake_match = BIT(2);
|
||||
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
|
||||
status->wake_match = BIT(1);
|
||||
wake_match = BIT(1);
|
||||
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
|
||||
status->wake_match = BIT(0);
|
||||
wake_match = BIT(0);
|
||||
else
|
||||
status->wake_match = 0;
|
||||
if (status->wake_match)
|
||||
wake_match = 0;
|
||||
if (wake_match)
|
||||
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
|
||||
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
|
||||
status->wake_match);
|
||||
wake_match);
|
||||
rx_status->freq = hw->conf.chandef.chan->center_freq;
|
||||
rx_status->band = hw->conf.chandef.chan->band;
|
||||
|
||||
|
@ -623,6 +623,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
|
||||
u1rsvdpageloc, 3);
|
||||
|
||||
skb = dev_alloc_skb(totalpacketlen);
|
||||
if (!skb)
|
||||
return;
|
||||
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
|
||||
|
||||
if (cmd_send_packet)
|
||||
|
@ -744,6 +744,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
|
||||
u1rsvdpageloc, 3);
|
||||
|
||||
skb = dev_alloc_skb(totalpacketlen);
|
||||
if (!skb)
|
||||
return;
|
||||
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
|
||||
|
||||
rtstatus = rtl_cmd_send_packet(hw, skb);
|
||||
|
@ -331,6 +331,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
|
||||
struct rx_fwinfo *p_drvinfo;
|
||||
struct ieee80211_hdr *hdr;
|
||||
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
|
||||
u8 wake_match;
|
||||
|
||||
if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0)
|
||||
status->packet_report_type = NORMAL_RX;
|
||||
@ -350,18 +351,18 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
|
||||
status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate);
|
||||
|
||||
status->macid = GET_RX_DESC_MACID(pdesc);
|
||||
if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
|
||||
status->wake_match = BIT(2);
|
||||
if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
|
||||
wake_match = BIT(2);
|
||||
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
|
||||
status->wake_match = BIT(1);
|
||||
wake_match = BIT(1);
|
||||
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
|
||||
status->wake_match = BIT(0);
|
||||
wake_match = BIT(0);
|
||||
else
|
||||
status->wake_match = 0;
|
||||
if (status->wake_match)
|
||||
wake_match = 0;
|
||||
if (wake_match)
|
||||
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
|
||||
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
|
||||
status->wake_match);
|
||||
wake_match);
|
||||
rx_status->freq = hw->conf.chandef.chan->center_freq;
|
||||
rx_status->band = hw->conf.chandef.chan->band;
|
||||
|
||||
|
@ -663,7 +663,7 @@ void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
|
||||
|
||||
}
|
||||
|
||||
void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
|
||||
static void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
|
||||
|
@ -448,6 +448,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
|
||||
u1rsvdpageloc, 3);
|
||||
|
||||
skb = dev_alloc_skb(totalpacketlen);
|
||||
if (!skb)
|
||||
return;
|
||||
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
|
||||
|
||||
rtstatus = rtl_cmd_send_packet(hw, skb);
|
||||
|
@ -562,6 +562,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
|
||||
u1rsvdpageloc, sizeof(u1rsvdpageloc));
|
||||
|
||||
skb = dev_alloc_skb(totalpacketlen);
|
||||
if (!skb)
|
||||
return;
|
||||
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
|
||||
|
||||
rtstatus = rtl_cmd_send_packet(hw, skb);
|
||||
|
@ -300,7 +300,7 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rx_fwinfo_8723be *p_drvinfo;
|
||||
struct ieee80211_hdr *hdr;
|
||||
|
||||
u8 wake_match;
|
||||
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
|
||||
|
||||
status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
|
||||
@ -329,18 +329,18 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
|
||||
status->packet_report_type = NORMAL_RX;
|
||||
|
||||
|
||||
if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
|
||||
status->wake_match = BIT(2);
|
||||
if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
|
||||
wake_match = BIT(2);
|
||||
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
|
||||
status->wake_match = BIT(1);
|
||||
wake_match = BIT(1);
|
||||
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
|
||||
status->wake_match = BIT(0);
|
||||
wake_match = BIT(0);
|
||||
else
|
||||
status->wake_match = 0;
|
||||
if (status->wake_match)
|
||||
wake_match = 0;
|
||||
if (wake_match)
|
||||
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
|
||||
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
|
||||
status->wake_match);
|
||||
wake_match);
|
||||
rx_status->freq = hw->conf.chandef.chan->center_freq;
|
||||
rx_status->band = hw->conf.chandef.chan->band;
|
||||
|
||||
|
@ -1623,6 +1623,8 @@ out:
|
||||
&reserved_page_packet_8812[0], totalpacketlen);
|
||||
|
||||
skb = dev_alloc_skb(totalpacketlen);
|
||||
if (!skb)
|
||||
return;
|
||||
skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
|
||||
|
||||
rtstatus = rtl_cmd_send_packet(hw, skb);
|
||||
@ -1759,6 +1761,8 @@ out:
|
||||
&reserved_page_packet_8821[0], totalpacketlen);
|
||||
|
||||
skb = dev_alloc_skb(totalpacketlen);
|
||||
if (!skb)
|
||||
return;
|
||||
skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
|
||||
|
||||
rtstatus = rtl_cmd_send_packet(hw, skb);
|
||||
|
@ -436,7 +436,7 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rx_fwinfo_8821ae *p_drvinfo;
|
||||
struct ieee80211_hdr *hdr;
|
||||
|
||||
u8 wake_match;
|
||||
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
|
||||
|
||||
status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
|
||||
@ -473,18 +473,18 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
|
||||
status->packet_report_type = NORMAL_RX;
|
||||
|
||||
if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
|
||||
status->wake_match = BIT(2);
|
||||
wake_match = BIT(2);
|
||||
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
|
||||
status->wake_match = BIT(1);
|
||||
wake_match = BIT(1);
|
||||
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
|
||||
status->wake_match = BIT(0);
|
||||
wake_match = BIT(0);
|
||||
else
|
||||
status->wake_match = 0;
|
||||
wake_match = 0;
|
||||
|
||||
if (status->wake_match)
|
||||
if (wake_match)
|
||||
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
|
||||
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
|
||||
status->wake_match);
|
||||
wake_match);
|
||||
rx_status->freq = hw->conf.chandef.chan->center_freq;
|
||||
rx_status->band = hw->conf.chandef.chan->band;
|
||||
|
||||
|
@ -2138,7 +2138,6 @@ struct rtl_stats {
|
||||
u8 packet_report_type;
|
||||
|
||||
u32 macid;
|
||||
u8 wake_match;
|
||||
u32 bt_rx_rssi_percentage;
|
||||
u32 macid_valid_entry[2];
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user