wireless-drivers-next patches for v5.9

Second set of patches for v5.9. mt76 has most of patches this time.
 Otherwise it's just smaller fixes and cleanups to other drivers.
 
 There was a major conflict in mt76 driver between wireless-drivers and
 wireless-drivers-next. I solved that by merging the former to the
 latter.
 
 Major changes:
 
 rtw88
 
 * add support for ieee80211_ops::change_interface
 
 * add support for enabling and disabling beacon
 
 * add debugfs file for testing h2c
 
 mt76
 
 * ARP filter offload for 7663
 
 * runtime power management for 7663
 
 * testmode support for mfg calibration
 
 * support for more channels
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJfKUIHAAoJEG4XJFUm622bXqkH/jgjKWh4b96Pv56jLPtyoPKj
 q9ZvIS1MFhfeY/DFX2gAx34iOwDi7lRVsb1r8IX+rui+B4yTDkvgM2azduSfUpA7
 +WOHaQdRYMbUa0YlvotaxFaHpqABKFnRd3zQKTMgT3LyVgj6OMiyHhc7DJTrBvMM
 KR+Z6/aNmMccWcSR4OPPF8zPRmp7h5yLW55UgqfOm0JzRfCnXtq6vb6MUDxYelGm
 ruvKP2W86m0DfQzPSwCEdSPkD/2aspi9HrMJNXm/cNqGk6AFQTZzPpQC6PowSrWA
 9rpzBRti2OwDD6Q6QJqmWzQ8pclP4BMZWPyYBqaC8tTHDvD13OV/siZVk9nP+As=
 =KOz+
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.9

Second set of patches for v5.9. mt76 has most of patches this time.
Otherwise it's just smaller fixes and cleanups to other drivers.

There was a major conflict in mt76 driver between wireless-drivers and
wireless-drivers-next. I solved that by merging the former to the
latter.

Major changes:

rtw88

* add support for ieee80211_ops::change_interface

* add support for enabling and disabling beacon

* add debugfs file for testing h2c

mt76

* ARP filter offload for 7663

* runtime power management for 7663

* testmode support for mfg calibration

* support for more channels
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-08-04 12:57:02 -07:00
commit cabf06e5a2
124 changed files with 5599 additions and 1281 deletions

View File

@ -78,7 +78,7 @@ such, if you are interested in deploying or shipping a driver as part of
solution intended to be used for purposes other than development, please
obtain a tested driver from Intel Customer Support at:
http://www.intel.com/support/wireless/sb/CS-006408.htm
https://www.intel.com/support/wireless/sb/CS-006408.htm
1. Introduction
===============

View File

@ -122,6 +122,7 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
struct gpio_irq_chip *girq = &chip->irq;
int hwirq, err;
if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
@ -136,15 +137,13 @@ static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
bcma_chipco_gpio_intmask(cc, ~0, 0);
bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
err = gpiochip_irqchip_add(chip,
&bcma_gpio_irq_chip,
0,
handle_simple_irq,
IRQ_TYPE_NONE);
if (err) {
free_irq(hwirq, cc);
return err;
}
girq->chip = &bcma_gpio_irq_chip;
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
girq->parents = NULL;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
return 0;
}
@ -212,13 +211,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
else
chip->base = -1;
err = gpiochip_add_data(chip, cc);
err = bcma_gpio_irq_init(cc);
if (err)
return err;
err = bcma_gpio_irq_init(cc);
err = gpiochip_add_data(chip, cc);
if (err) {
gpiochip_remove(chip);
bcma_gpio_irq_exit(cc);
return err;
}

View File

@ -219,7 +219,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
u32 type, u8 port)
{
u32 addrl, addrh, sizeh = 0;
u32 addrl;
u32 size;
u32 ent = bcma_erom_get_ent(bus, eromptr);
@ -233,14 +233,12 @@ static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
addrl = ent & SCAN_ADDR_ADDR;
if (ent & SCAN_ADDR_AG32)
addrh = bcma_erom_get_ent(bus, eromptr);
else
addrh = 0;
bcma_erom_get_ent(bus, eromptr);
if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) {
size = bcma_erom_get_ent(bus, eromptr);
if (size & SCAN_SIZE_SG32)
sizeh = bcma_erom_get_ent(bus, eromptr);
bcma_erom_get_ent(bus, eromptr);
}
return addrl;

View File

@ -734,7 +734,7 @@ static void b43_short_slot_timing_disable(struct b43_wldev *dev)
}
/* DummyTransmission function, as documented on
* http://bcm-v4.sipsolutions.net/802.11/DummyTransmission
* https://bcm-v4.sipsolutions.net/802.11/DummyTransmission
*/
void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
{
@ -1198,7 +1198,7 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */
void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev)
{
struct bcma_drv_cc *bcma_cc __maybe_unused;
@ -2290,7 +2290,7 @@ err_format:
return -EPROTO;
}
/* http://bcm-v4.sipsolutions.net/802.11/Init/Firmware */
/* https://bcm-v4.sipsolutions.net/802.11/Init/Firmware */
static int b43_try_request_fw(struct b43_request_fw_context *ctx)
{
struct b43_wldev *dev = ctx->dev;
@ -2843,7 +2843,7 @@ static int b43_upload_initvals_band(struct b43_wldev *dev)
}
/* Initialize the GPIOs
* http://bcm-specs.sipsolutions.net/GPIO
* https://bcm-specs.sipsolutions.net/GPIO
*/
#ifdef CONFIG_B43_SSB
@ -2971,7 +2971,7 @@ void b43_mac_enable(struct b43_wldev *dev)
}
}
/* http://bcm-specs.sipsolutions.net/SuspendMAC */
/* https://bcm-specs.sipsolutions.net/SuspendMAC */
void b43_mac_suspend(struct b43_wldev *dev)
{
int i;
@ -3004,7 +3004,7 @@ out:
dev->mac_suspended++;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on)
{
u32 tmp;
@ -3231,7 +3231,7 @@ static void b43_chip_exit(struct b43_wldev *dev)
}
/* Initialize the chip
* http://bcm-specs.sipsolutions.net/ChipInit
* https://bcm-specs.sipsolutions.net/ChipInit
*/
static int b43_chip_init(struct b43_wldev *dev)
{

View File

@ -559,7 +559,7 @@ bool b43_is_40mhz(struct b43_wldev *dev)
return dev->phy.chandef->width == NL80211_CHAN_WIDTH_40;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
void b43_phy_force_clock(struct b43_wldev *dev, bool force)
{
u32 tmp;

View File

@ -357,14 +357,14 @@ static void b43_set_original_gains(struct b43_wldev *dev)
b43_dummy_transmission(dev, false, true);
}
/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
static void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val)
{
b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset);
b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val);
}
/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset)
{
u16 val;
@ -375,7 +375,7 @@ static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset)
return (s16) val;
}
/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
{
u16 i;
@ -389,7 +389,7 @@ static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
}
}
/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
static void b43_nrssi_mem_update(struct b43_wldev *dev)
{
struct b43_phy_g *gphy = dev->phy.g;
@ -1575,7 +1575,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
static void b43_phy_initb6(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -2746,7 +2746,7 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
return 0;
}
/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
/* https://bcm-specs.sipsolutions.net/EstimatePowerOut
* This function converts a TSSI value to dBm in Q5.2
*/
static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)

View File

@ -1018,7 +1018,7 @@ static void b43_phy_ht_op_free(struct b43_wldev *dev)
phy->ht = NULL;
}
/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
/* https://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
bool blocked)
{

View File

@ -70,7 +70,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
dev->phy.lp = NULL;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
static void lpphy_read_band_sprom(struct b43_wldev *dev)
{
struct ssb_sprom *sprom = dev->dev->bus_sprom;

View File

@ -98,7 +98,7 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev)
(dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ));
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev)
{
return (b43_phy_read(dev, B43_NPHY_RFSEQCA) & B43_NPHY_RFSEQCA_RXEN) >>
@ -109,7 +109,7 @@ static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev)
* RF (just without b43_nphy_rf_ctl_intc_override)
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
enum b43_nphy_rf_sequence seq)
{
@ -146,7 +146,7 @@ static void b43_nphy_rf_ctl_override_rev19(struct b43_wldev *dev, u16 field,
/* TODO */
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
u16 value, u8 core, bool off,
u8 override)
@ -193,7 +193,7 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */
static void b43_nphy_rf_ctl_override_one_to_many(struct b43_wldev *dev,
enum n_rf_ctl_over_cmd cmd,
u16 value, u8 core, bool off)
@ -237,7 +237,7 @@ static void b43_nphy_rf_ctl_override_one_to_many(struct b43_wldev *dev,
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
u16 value, u8 core, bool off)
{
@ -382,7 +382,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
enum n_intc_override intc_override,
u16 value, u8 core)
@ -490,7 +490,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
* Various PHY ops
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
const u16 *clip_st)
{
@ -498,14 +498,14 @@ static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
{
clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
{
u16 tmp;
@ -526,7 +526,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
return tmp;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
static void b43_nphy_reset_cca(struct b43_wldev *dev)
{
u16 bbcfg;
@ -540,7 +540,7 @@ static void b43_nphy_reset_cca(struct b43_wldev *dev)
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
{
struct b43_phy *phy = &dev->phy;
@ -564,7 +564,7 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
}
}
/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
/* https://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
{
if (!offset)
@ -572,7 +572,7 @@ static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -628,7 +628,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
u8 *events, u8 *delays, u8 length)
{
@ -805,7 +805,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
}
/* Calibrate resistors in LPF of PLL?
* http://bcm-v4.sipsolutions.net/PHY/radio205x_rcal
* https://bcm-v4.sipsolutions.net/PHY/radio205x_rcal
*/
static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
{
@ -919,7 +919,7 @@ static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
}
/* Calibrate the internal RC oscillator?
* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal
* https://bcm-v4.sipsolutions.net/PHY/radio2057_rccal
*/
static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
{
@ -1030,7 +1030,7 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
}
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
/* https://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
static void b43_radio_2057_init(struct b43_wldev *dev)
{
b43_radio_2057_init_pre(dev);
@ -1117,7 +1117,7 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
e->radio_tx1_mixg_boost_tune);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */
static void b43_radio_2056_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e)
{
@ -1356,7 +1356,7 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
/*
* Initialize a Broadcom 2056 N-radio
* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
* https://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
*/
static void b43_radio_init2056(struct b43_wldev *dev)
{
@ -1406,7 +1406,7 @@ static void b43_chantab_radio_upload(struct b43_wldev *dev,
b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
static void b43_radio_2055_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev2 *e)
{
@ -1480,7 +1480,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
/*
* Initialize a Broadcom 2055 N-radio
* http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
* https://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
*/
static void b43_radio_init2055(struct b43_wldev *dev)
{
@ -1499,7 +1499,7 @@ static void b43_radio_init2055(struct b43_wldev *dev)
* Samples
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
static int b43_nphy_load_samples(struct b43_wldev *dev,
struct cordic_iq *samples, u16 len) {
struct b43_phy_n *nphy = dev->phy.n;
@ -1526,7 +1526,7 @@ static int b43_nphy_load_samples(struct b43_wldev *dev,
return 0;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
bool test)
{
@ -1569,7 +1569,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
return (i < 0) ? 0 : len;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
u16 wait, bool iqmode, bool dac_test,
bool modify_bbmult)
@ -1650,7 +1650,7 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
* RSSI
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
s8 offset, u8 core,
enum n_rail_type rail,
@ -1895,7 +1895,7 @@ static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code,
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code,
enum n_rssi_type type)
{
@ -1907,7 +1907,7 @@ static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code,
b43_nphy_rev2_rssi_select(dev, code, type);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev,
enum n_rssi_type rssi_type, u8 *buf)
{
@ -1936,7 +1936,7 @@ static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev,
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type,
s32 *buf, u8 nsamp)
{
@ -2025,7 +2025,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type,
return out;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -2287,7 +2287,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_nphy_write_clip_detection(dev, clip_state);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
{
int i, j, vcm;
@ -2453,7 +2453,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
/*
* RSSI Calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
*/
static void b43_nphy_rssi_cal(struct b43_wldev *dev)
{
@ -2680,7 +2680,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
{
if (dev->phy.rev >= 19)
@ -3433,7 +3433,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
B43_NPHY_FINERX2_CGC_DECGC);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
static void b43_nphy_workarounds(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -3468,7 +3468,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
/*
* Transmits a known value for LO calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
*/
static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
bool iqmode, bool dac_test, bool modify_bbmult)
@ -3481,7 +3481,7 @@ static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
return 0;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -3509,7 +3509,7 @@ static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
~B43_NPHY_RFSEQMODE_CAOVER);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
static void b43_nphy_stop_playback(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -3546,7 +3546,7 @@ static void b43_nphy_stop_playback(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
struct nphy_txgains target,
struct nphy_iqcal_params *params)
@ -3595,7 +3595,7 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
* Tx and Rx
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
{
struct b43_phy *phy = &dev->phy;
@ -3732,7 +3732,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
b43_nphy_stay_in_carrier_search(dev, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -3926,7 +3926,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
/*
* Stop radio and transmit known signal. Then check received signal strength to
* get TSSI (Transmit Signal Strength Indicator).
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
*/
static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
{
@ -3978,7 +3978,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF;
}
/* http://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */
/* https://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */
static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -4039,7 +4039,7 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */
static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -4272,7 +4272,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -4310,7 +4310,7 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
/*
* TX low-pass filter bandwidth setup
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw
* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw
*/
static void b43_nphy_tx_lpf_bw(struct b43_wldev *dev)
{
@ -4333,7 +4333,7 @@ static void b43_nphy_tx_lpf_bw(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
u16 samps, u8 time, bool wait)
{
@ -4372,7 +4372,7 @@ static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
memset(est, 0, sizeof(*est));
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
struct b43_phy_n_iq_comp *pcomp)
{
@ -4391,7 +4391,7 @@ static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
#if 0
/* Ready but not used anywhere */
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
{
u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
@ -4414,7 +4414,7 @@ static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
{
u8 rxval, txval;
@ -4476,7 +4476,7 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
}
#endif
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
{
int i;
@ -4574,7 +4574,7 @@ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
b43_nphy_rx_iq_coeffs(dev, true, &new);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
{
u16 array[4];
@ -4586,7 +4586,7 @@ static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
static void b43_nphy_spur_workaround(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -4645,7 +4645,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -4713,7 +4713,7 @@ static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
/*
* Restore RSSI Calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
*/
static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
{
@ -4822,7 +4822,7 @@ static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -4921,7 +4921,7 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -4955,14 +4955,14 @@ static void b43_nphy_pa_set_tx_dig_filter(struct b43_wldev *dev, u16 offset,
b43_phy_write(dev, offset, filter[i]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev)
{
b43_nphy_pa_set_tx_dig_filter(dev, 0x2C5,
tbl_tx_filter_coef_rev4[2]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
{
/* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
@ -5002,7 +5002,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -5077,7 +5077,7 @@ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
return target;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
{
u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
@ -5106,7 +5106,7 @@ static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -5207,7 +5207,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
static void b43_nphy_save_cal(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -5278,7 +5278,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
static void b43_nphy_restore_cal(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -5366,7 +5366,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
struct nphy_txgains target,
bool full, bool mphase)
@ -5599,7 +5599,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
return error;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
@ -5634,7 +5634,7 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
struct nphy_txgains target, u8 type, bool debug)
{
@ -5821,7 +5821,7 @@ static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
return -1;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
struct nphy_txgains target, u8 type, bool debug)
{
@ -5834,7 +5834,7 @@ static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */
static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
{
struct b43_phy *phy = &dev->phy;
@ -5939,7 +5939,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
* N-PHY init
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
{
u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
@ -5953,7 +5953,7 @@ static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
static void b43_nphy_bphy_init(struct b43_wldev *dev)
{
unsigned int i;
@ -5972,7 +5972,7 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
{
if (dev->phy.rev >= 7)
@ -6246,7 +6246,7 @@ static void b43_chantab_phy_upload(struct b43_wldev *dev,
b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
}
/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
/* https://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
{
switch (dev->dev->bus_type) {
@ -6265,7 +6265,7 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
static void b43_nphy_channel_setup(struct b43_wldev *dev,
const struct b43_phy_n_sfo_cfg *e,
struct ieee80211_channel *new_channel)
@ -6372,7 +6372,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_nphy_spur_workaround(dev);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
static int b43_nphy_set_channel(struct b43_wldev *dev,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type)
@ -6589,7 +6589,7 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
}
/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
/* https://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
bool blocked)
{
@ -6643,7 +6643,7 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
{
struct b43_phy *phy = &dev->phy;

View File

@ -3072,7 +3072,7 @@ INITTABSPTS(b2056_inittab_radio_rev11);
.phy_regs.phy_bw5 = r4, \
.phy_regs.phy_bw6 = r5
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
/* https://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,

View File

@ -3620,7 +3620,7 @@ static void b43_nphy_tables_init_rev0(struct b43_wldev *dev)
ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */
void b43_nphy_tables_init(struct b43_wldev *dev)
{
if (dev->phy.rev >= 16)
@ -3633,7 +3633,7 @@ void b43_nphy_tables_init(struct b43_wldev *dev)
b43_nphy_tables_init_rev0(dev);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;

View File

@ -591,7 +591,7 @@ static void b43legacy_synchronize_irq(struct b43legacy_wldev *dev)
}
/* DummyTransmission function, as documented on
* http://bcm-specs.sipsolutions.net/DummyTransmission
* https://bcm-specs.sipsolutions.net/DummyTransmission
*/
void b43legacy_dummy_transmission(struct b43legacy_wldev *dev)
{
@ -1870,7 +1870,7 @@ out:
}
/* Initialize the GPIOs
* http://bcm-specs.sipsolutions.net/GPIO
* https://bcm-specs.sipsolutions.net/GPIO
*/
static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
{
@ -1960,7 +1960,7 @@ void b43legacy_mac_enable(struct b43legacy_wldev *dev)
}
}
/* http://bcm-specs.sipsolutions.net/SuspendMAC */
/* https://bcm-specs.sipsolutions.net/SuspendMAC */
void b43legacy_mac_suspend(struct b43legacy_wldev *dev)
{
int i;
@ -2141,7 +2141,7 @@ static void b43legacy_chip_exit(struct b43legacy_wldev *dev)
}
/* Initialize the chip
* http://bcm-specs.sipsolutions.net/ChipInit
* https://bcm-specs.sipsolutions.net/ChipInit
*/
static int b43legacy_chip_init(struct b43legacy_wldev *dev)
{

View File

@ -129,7 +129,7 @@ void b43legacy_phy_calibrate(struct b43legacy_wldev *dev)
}
/* initialize B PHY power control
* as described in http://bcm-specs.sipsolutions.net/InitPowerControl
* as described in https://bcm-specs.sipsolutions.net/InitPowerControl
*/
static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev)
{
@ -1461,7 +1461,7 @@ void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev,
b43legacy_phy_write(dev, 0x0060, value);
}
/* http://bcm-specs.sipsolutions.net/LocalOscillator/Measure */
/* https://bcm-specs.sipsolutions.net/LocalOscillator/Measure */
void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
{
static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 };
@ -1721,7 +1721,7 @@ void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev)
}
}
/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
/* https://bcm-specs.sipsolutions.net/EstimatePowerOut
* This function converts a TSSI value to dBm in Q5.2
*/
static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi)
@ -1747,7 +1747,7 @@ static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi)
return dbm;
}
/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
/* https://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;

View File

@ -313,14 +313,14 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev)
return ret[channel - 1];
}
/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset);
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val);
}
/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset)
{
u16 val;
@ -331,7 +331,7 @@ s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset)
return (s16)val;
}
/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val)
{
u16 i;
@ -345,7 +345,7 @@ void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val)
}
}
/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;

View File

@ -84,6 +84,8 @@
#define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000)
#define BRCMF_PS_MAX_TIMEOUT_MS 2000
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
@ -2942,6 +2944,12 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
else
bphy_err(drvr, "error (%d)\n", err);
}
err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret",
min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS));
if (err)
bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err);
done:
brcmf_dbg(TRACE, "Exit\n");
return err;

View File

@ -74,16 +74,19 @@ MODULE_DEVICE_TABLE(pci, card_ids);
static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *);
static void airo_pci_remove(struct pci_dev *);
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state);
static int airo_pci_resume(struct pci_dev *pdev);
static int __maybe_unused airo_pci_suspend(struct device *dev);
static int __maybe_unused airo_pci_resume(struct device *dev);
static SIMPLE_DEV_PM_OPS(airo_pci_pm_ops,
airo_pci_suspend,
airo_pci_resume);
static struct pci_driver airo_driver = {
.name = DRV_NAME,
.id_table = card_ids,
.probe = airo_pci_probe,
.remove = airo_pci_remove,
.suspend = airo_pci_suspend,
.resume = airo_pci_resume,
.name = DRV_NAME,
.id_table = card_ids,
.probe = airo_pci_probe,
.remove = airo_pci_remove,
.driver.pm = &airo_pci_pm_ops,
};
#endif /* CONFIG_PCI */
@ -5573,9 +5576,9 @@ static void airo_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
static int __maybe_unused airo_pci_suspend(struct device *dev_d)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct net_device *dev = dev_get_drvdata(dev_d);
struct airo_info *ai = dev->ml_priv;
Cmd cmd;
Resp rsp;
@ -5591,25 +5594,21 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
return -EAGAIN;
disable_MAC(ai, 0);
netif_device_detach(dev);
ai->power = state;
ai->power = PMSG_SUSPEND;
cmd.cmd = HOSTSLEEP;
issuecommand(ai, &cmd, &rsp);
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
pci_save_state(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
device_wakeup_enable(dev_d);
return 0;
}
static int airo_pci_resume(struct pci_dev *pdev)
static int __maybe_unused airo_pci_resume(struct device *dev_d)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct net_device *dev = dev_get_drvdata(dev_d);
struct airo_info *ai = dev->ml_priv;
pci_power_t prev_state = pdev->current_state;
pci_power_t prev_state = to_pci_dev(dev_d)->current_state;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
device_wakeup_disable(dev_d);
if (prev_state != PCI_D1) {
reset_card(dev, 0);

View File

@ -28,7 +28,7 @@ config IPW2100
You will also very likely need the Wireless Tools in order to
configure your card:
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
<https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
It is recommended that you compile this driver as a module (M)
rather than built-in (Y). This driver requires firmware at device
@ -90,7 +90,7 @@ config IPW2200
You will also very likely need the Wireless Tools in order to
configure your card:
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
<https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
It is recommended that you compile this driver as a module (M)
rather than built-in (Y). This driver requires firmware at device

View File

@ -2295,10 +2295,11 @@ static int ipw2100_alloc_skb(struct ipw2100_priv *priv,
return -ENOMEM;
packet->rxp = (struct ipw2100_rx *)packet->skb->data;
packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data,
packet->dma_addr = dma_map_single(&priv->pci_dev->dev,
packet->skb->data,
sizeof(struct ipw2100_rx),
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pci_dev, packet->dma_addr)) {
DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pci_dev->dev, packet->dma_addr)) {
dev_kfree_skb(packet->skb);
return -ENOMEM;
}
@ -2479,9 +2480,8 @@ static void isr_rx(struct ipw2100_priv *priv, int i,
return;
}
pci_unmap_single(priv->pci_dev,
packet->dma_addr,
sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr,
sizeof(struct ipw2100_rx), DMA_FROM_DEVICE);
skb_put(packet->skb, status->frame_size);
@ -2563,8 +2563,8 @@ static void isr_rx_monitor(struct ipw2100_priv *priv, int i,
return;
}
pci_unmap_single(priv->pci_dev, packet->dma_addr,
sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr,
sizeof(struct ipw2100_rx), DMA_FROM_DEVICE);
memmove(packet->skb->data + sizeof(struct ipw_rt_hdr),
packet->skb->data, status->frame_size);
@ -2689,9 +2689,9 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
/* Sync the DMA for the RX buffer so CPU is sure to get
* the correct values */
pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
sizeof(struct ipw2100_rx),
PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&priv->pci_dev->dev, packet->dma_addr,
sizeof(struct ipw2100_rx),
DMA_FROM_DEVICE);
if (unlikely(ipw2100_corruption_check(priv, i))) {
ipw2100_corruption_detected(priv, i);
@ -2923,9 +2923,8 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv)
(packet->index + 1 + i) % txq->entries,
tbd->host_addr, tbd->buf_length);
pci_unmap_single(priv->pci_dev,
tbd->host_addr,
tbd->buf_length, PCI_DMA_TODEVICE);
dma_unmap_single(&priv->pci_dev->dev, tbd->host_addr,
tbd->buf_length, DMA_TO_DEVICE);
}
libipw_txb_free(packet->info.d_struct.txb);
@ -3165,15 +3164,13 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
tbd->buf_length = packet->info.d_struct.txb->
fragments[i]->len - LIBIPW_3ADDR_LEN;
tbd->host_addr = pci_map_single(priv->pci_dev,
tbd->host_addr = dma_map_single(&priv->pci_dev->dev,
packet->info.d_struct.
txb->fragments[i]->
data +
txb->fragments[i]->data +
LIBIPW_3ADDR_LEN,
tbd->buf_length,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pci_dev,
tbd->host_addr)) {
DMA_TO_DEVICE);
if (dma_mapping_error(&priv->pci_dev->dev, tbd->host_addr)) {
IPW_DEBUG_TX("dma mapping error\n");
break;
}
@ -3182,10 +3179,10 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
txq->next, tbd->host_addr,
tbd->buf_length);
pci_dma_sync_single_for_device(priv->pci_dev,
tbd->host_addr,
tbd->buf_length,
PCI_DMA_TODEVICE);
dma_sync_single_for_device(&priv->pci_dev->dev,
tbd->host_addr,
tbd->buf_length,
DMA_TO_DEVICE);
txq->next++;
txq->next %= txq->entries;
@ -3440,9 +3437,9 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
return -ENOMEM;
for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
v = pci_zalloc_consistent(priv->pci_dev,
sizeof(struct ipw2100_cmd_header),
&p);
v = dma_alloc_coherent(&priv->pci_dev->dev,
sizeof(struct ipw2100_cmd_header), &p,
GFP_KERNEL);
if (!v) {
printk(KERN_ERR DRV_NAME ": "
"%s: PCI alloc failed for msg "
@ -3461,11 +3458,10 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
return 0;
for (j = 0; j < i; j++) {
pci_free_consistent(priv->pci_dev,
sizeof(struct ipw2100_cmd_header),
priv->msg_buffers[j].info.c_struct.cmd,
priv->msg_buffers[j].info.c_struct.
cmd_phys);
dma_free_coherent(&priv->pci_dev->dev,
sizeof(struct ipw2100_cmd_header),
priv->msg_buffers[j].info.c_struct.cmd,
priv->msg_buffers[j].info.c_struct.cmd_phys);
}
kfree(priv->msg_buffers);
@ -3496,11 +3492,10 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
return;
for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
pci_free_consistent(priv->pci_dev,
sizeof(struct ipw2100_cmd_header),
priv->msg_buffers[i].info.c_struct.cmd,
priv->msg_buffers[i].info.c_struct.
cmd_phys);
dma_free_coherent(&priv->pci_dev->dev,
sizeof(struct ipw2100_cmd_header),
priv->msg_buffers[i].info.c_struct.cmd,
priv->msg_buffers[i].info.c_struct.cmd_phys);
}
kfree(priv->msg_buffers);
@ -4323,7 +4318,8 @@ static int status_queue_allocate(struct ipw2100_priv *priv, int entries)
IPW_DEBUG_INFO("enter\n");
q->size = entries * sizeof(struct ipw2100_status);
q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic,
GFP_KERNEL);
if (!q->drv) {
IPW_DEBUG_WARNING("Can not allocate status queue.\n");
return -ENOMEM;
@ -4339,9 +4335,10 @@ static void status_queue_free(struct ipw2100_priv *priv)
IPW_DEBUG_INFO("enter\n");
if (priv->status_queue.drv) {
pci_free_consistent(priv->pci_dev, priv->status_queue.size,
priv->status_queue.drv,
priv->status_queue.nic);
dma_free_coherent(&priv->pci_dev->dev,
priv->status_queue.size,
priv->status_queue.drv,
priv->status_queue.nic);
priv->status_queue.drv = NULL;
}
@ -4357,7 +4354,8 @@ static int bd_queue_allocate(struct ipw2100_priv *priv,
q->entries = entries;
q->size = entries * sizeof(struct ipw2100_bd);
q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic,
GFP_KERNEL);
if (!q->drv) {
IPW_DEBUG_INFO
("can't allocate shared memory for buffer descriptors\n");
@ -4377,7 +4375,8 @@ static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
return;
if (q->drv) {
pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic);
dma_free_coherent(&priv->pci_dev->dev, q->size, q->drv,
q->nic);
q->drv = NULL;
}
@ -4430,16 +4429,16 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH,
sizeof(struct ipw2100_tx_packet),
GFP_ATOMIC);
GFP_KERNEL);
if (!priv->tx_buffers) {
bd_queue_free(priv, &priv->tx_queue);
return -ENOMEM;
}
for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
v = pci_alloc_consistent(priv->pci_dev,
sizeof(struct ipw2100_data_header),
&p);
v = dma_alloc_coherent(&priv->pci_dev->dev,
sizeof(struct ipw2100_data_header), &p,
GFP_KERNEL);
if (!v) {
printk(KERN_ERR DRV_NAME
": %s: PCI alloc failed for tx " "buffers.\n",
@ -4459,11 +4458,10 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
return 0;
for (j = 0; j < i; j++) {
pci_free_consistent(priv->pci_dev,
sizeof(struct ipw2100_data_header),
priv->tx_buffers[j].info.d_struct.data,
priv->tx_buffers[j].info.d_struct.
data_phys);
dma_free_coherent(&priv->pci_dev->dev,
sizeof(struct ipw2100_data_header),
priv->tx_buffers[j].info.d_struct.data,
priv->tx_buffers[j].info.d_struct.data_phys);
}
kfree(priv->tx_buffers);
@ -4540,12 +4538,10 @@ static void ipw2100_tx_free(struct ipw2100_priv *priv)
priv->tx_buffers[i].info.d_struct.txb = NULL;
}
if (priv->tx_buffers[i].info.d_struct.data)
pci_free_consistent(priv->pci_dev,
sizeof(struct ipw2100_data_header),
priv->tx_buffers[i].info.d_struct.
data,
priv->tx_buffers[i].info.d_struct.
data_phys);
dma_free_coherent(&priv->pci_dev->dev,
sizeof(struct ipw2100_data_header),
priv->tx_buffers[i].info.d_struct.data,
priv->tx_buffers[i].info.d_struct.data_phys);
}
kfree(priv->tx_buffers);
@ -4608,9 +4604,10 @@ static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
return 0;
for (j = 0; j < i; j++) {
pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr,
dma_unmap_single(&priv->pci_dev->dev,
priv->rx_buffers[j].dma_addr,
sizeof(struct ipw2100_rx_packet),
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_buffers[j].skb);
}
@ -4662,10 +4659,10 @@ static void ipw2100_rx_free(struct ipw2100_priv *priv)
for (i = 0; i < RX_QUEUE_LENGTH; i++) {
if (priv->rx_buffers[i].rxp) {
pci_unmap_single(priv->pci_dev,
dma_unmap_single(&priv->pci_dev->dev,
priv->rx_buffers[i].dma_addr,
sizeof(struct ipw2100_rx),
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_buffers[i].skb);
}
}
@ -6196,7 +6193,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
pci_set_drvdata(pci_dev, priv);
err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_WARNING DRV_NAME
"Error calling pci_set_dma_mask.\n");

View File

@ -3442,8 +3442,9 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dma_unmap_single(&priv->pci_dev->dev,
rxq->pool[i].dma_addr,
IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(rxq->pool[i].skb);
rxq->pool[i].skb = NULL;
}
@ -3774,7 +3775,8 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
return -ENOMEM;
q->bd =
pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
&q->q.dma_addr, GFP_KERNEL);
if (!q->bd) {
IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
sizeof(q->bd[0]) * count);
@ -3816,9 +3818,10 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
/* unmap chunks if any */
for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
dma_unmap_single(&dev->dev,
le32_to_cpu(bd->u.data.chunk_ptr[i]),
le16_to_cpu(bd->u.data.chunk_len[i]),
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
if (txq->txb[txq->q.last_used]) {
libipw_txb_free(txq->txb[txq->q.last_used]);
txq->txb[txq->q.last_used] = NULL;
@ -3850,8 +3853,8 @@ static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
}
/* free buffers belonging to queue itself */
pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
q->dma_addr);
dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
q->dma_addr);
kfree(txq->txb);
/* 0 fill whole structure */
@ -5196,8 +5199,8 @@ static void ipw_rx_queue_replenish(void *data)
list_del(element);
rxb->dma_addr =
pci_map_single(priv->pci_dev, rxb->skb->data,
IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dma_map_single(&priv->pci_dev->dev, rxb->skb->data,
IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
@ -5230,8 +5233,9 @@ static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dma_unmap_single(&priv->pci_dev->dev,
rxq->pool[i].dma_addr,
IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
}
@ -8263,9 +8267,8 @@ static void ipw_rx(struct ipw_priv *priv)
}
priv->rxq->queue[i] = NULL;
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
IPW_RX_BUF_SIZE,
PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr,
IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
pkt = (struct ipw_rx_packet *)rxb->skb->data;
IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
@ -8417,8 +8420,8 @@ static void ipw_rx(struct ipw_priv *priv)
rxb->skb = NULL;
}
pci_unmap_single(priv->pci_dev, rxb->dma_addr,
IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr,
IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &priv->rxq->rx_used);
i = (i + 1) % RX_QUEUE_SIZE;
@ -10217,11 +10220,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
txb->fragments[i]->len - hdr_len);
tfd->u.data.chunk_ptr[i] =
cpu_to_le32(pci_map_single
(priv->pci_dev,
txb->fragments[i]->data + hdr_len,
txb->fragments[i]->len - hdr_len,
PCI_DMA_TODEVICE));
cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
txb->fragments[i]->data + hdr_len,
txb->fragments[i]->len - hdr_len,
DMA_TO_DEVICE));
tfd->u.data.chunk_len[i] =
cpu_to_le16(txb->fragments[i]->len - hdr_len);
}
@ -10251,10 +10253,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
dev_kfree_skb_any(txb->fragments[i]);
txb->fragments[i] = skb;
tfd->u.data.chunk_ptr[i] =
cpu_to_le32(pci_map_single
(priv->pci_dev, skb->data,
remaining_bytes,
PCI_DMA_TODEVICE));
cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
skb->data,
remaining_bytes,
DMA_TO_DEVICE));
le32_add_cpu(&tfd->u.data.num_chunks, 1);
}
@ -11620,9 +11622,9 @@ static int ipw_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
goto out_pci_disable_device;

View File

@ -480,7 +480,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
if (!iwlwifi_mod_params.enable_ini)
return;
res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev);
res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
if (res)
return;

View File

@ -30,7 +30,7 @@ config PRISM54
For more information refer to the p54 wiki:
http://wireless.kernel.org/en/users/Drivers/p54
http://wireless.wiki.kernel.org/en/users/Drivers/p54
Note: You need a motherboard with DMA support to use any of these cards

View File

@ -3366,8 +3366,8 @@ static void prism2_free_local_data(struct net_device *dev)
}
#if (defined(PRISM2_PCI) && defined(CONFIG_PM)) || defined(PRISM2_PCCARD)
static void prism2_suspend(struct net_device *dev)
#if defined(PRISM2_PCI) || defined(PRISM2_PCCARD)
static void __maybe_unused prism2_suspend(struct net_device *dev)
{
struct hostap_interface *iface;
struct local_info *local;
@ -3385,7 +3385,7 @@ static void prism2_suspend(struct net_device *dev)
/* Disable hardware and firmware */
prism2_hw_shutdown(dev, 0);
}
#endif /* (PRISM2_PCI && CONFIG_PM) || PRISM2_PCCARD */
#endif /* PRISM2_PCI || PRISM2_PCCARD */
/* These might at some point be compiled separately and used as separate

View File

@ -403,36 +403,23 @@ static void prism2_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
#ifdef CONFIG_PM
static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state)
static int __maybe_unused prism2_pci_suspend(struct device *dev_d)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct net_device *dev = dev_get_drvdata(dev_d);
if (netif_running(dev)) {
netif_stop_queue(dev);
netif_device_detach(dev);
}
prism2_suspend(dev);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int prism2_pci_resume(struct pci_dev *pdev)
static int __maybe_unused prism2_pci_resume(struct device *dev_d)
{
struct net_device *dev = pci_get_drvdata(pdev);
int err;
struct net_device *dev = dev_get_drvdata(dev_d);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
dev->name);
return err;
}
pci_restore_state(pdev);
prism2_hw_config(dev, 0);
if (netif_running(dev)) {
netif_device_attach(dev);
@ -441,20 +428,19 @@ static int prism2_pci_resume(struct pci_dev *pdev)
return 0;
}
#endif /* CONFIG_PM */
MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
static SIMPLE_DEV_PM_OPS(prism2_pci_pm_ops,
prism2_pci_suspend,
prism2_pci_resume);
static struct pci_driver prism2_pci_driver = {
.name = "hostap_pci",
.id_table = prism2_pci_id_table,
.probe = prism2_pci_probe,
.remove = prism2_pci_remove,
#ifdef CONFIG_PM
.suspend = prism2_pci_suspend,
.resume = prism2_pci_resume,
#endif /* CONFIG_PM */
.driver.pm = &prism2_pci_pm_ops,
};
module_pci_driver(prism2_pci_driver);

View File

@ -27,7 +27,7 @@ config HERMES
You will also very likely also need the Wireless Tools in order to
configure your card and that /etc/pcmcia/wireless.opts works :
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
<https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
config HERMES_PRISM
bool "Support Prism 2/2.5 chipset"
@ -120,7 +120,7 @@ config PCMCIA_HERMES
You will very likely need the Wireless Tools in order to
configure your card and that /etc/pcmcia/wireless.opts works:
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
<https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
config PCMCIA_SPECTRUM
tristate "Symbol Spectrum24 Trilogy PCMCIA card support"

View File

@ -10,7 +10,7 @@ config P54_COMMON
also need to be enabled in order to support any devices.
These devices require softmac firmware which can be found at
<http://wireless.kernel.org/en/users/Drivers/p54>
<http://wireless.wiki.kernel.org/en/users/Drivers/p54>
If you choose to build a module, it'll be called p54common.
@ -22,7 +22,7 @@ config P54_USB
This driver is for USB isl38xx based wireless cards.
These devices require softmac firmware which can be found at
<http://wireless.kernel.org/en/users/Drivers/p54>
<http://wireless.wiki.kernel.org/en/users/Drivers/p54>
If you choose to build a module, it'll be called p54usb.
@ -36,7 +36,7 @@ config P54_PCI
supported by the fullmac driver/firmware.
This driver requires softmac firmware which can be found at
<http://wireless.kernel.org/en/users/Drivers/p54>
<http://wireless.wiki.kernel.org/en/users/Drivers/p54>
If you choose to build a module, it'll be called p54pci.

View File

@ -132,7 +132,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
if (priv->fw_var < 0x500)
wiphy_info(priv->hw->wiphy,
"you are using an obsolete firmware. "
"visit http://wireless.kernel.org/en/users/Drivers/p54 "
"visit http://wireless.wiki.kernel.org/en/users/Drivers/p54 "
"and grab one for \"kernel >= 2.6.28\"!\n");
if (priv->fw_var >= 0x300) {

View File

@ -153,12 +153,12 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
if (!skb)
break;
mapping = pci_map_single(priv->pdev,
mapping = dma_map_single(&priv->pdev->dev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
if (dma_mapping_error(&priv->pdev->dev, mapping)) {
dev_kfree_skb_any(skb);
dev_err(&priv->pdev->dev,
"RX DMA Mapping error\n");
@ -215,19 +215,22 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
len = priv->common.rx_mtu;
}
dma_addr = le32_to_cpu(desc->host_addr);
pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr,
priv->common.rx_mtu + 32,
DMA_FROM_DEVICE);
skb_put(skb, len);
if (p54_rx(dev, skb)) {
pci_unmap_single(priv->pdev, dma_addr,
priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
dma_unmap_single(&priv->pdev->dev, dma_addr,
priv->common.rx_mtu + 32,
DMA_FROM_DEVICE);
rx_buf[i] = NULL;
desc->host_addr = cpu_to_le32(0);
} else {
skb_trim(skb, 0);
pci_dma_sync_single_for_device(priv->pdev, dma_addr,
priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
dma_sync_single_for_device(&priv->pdev->dev, dma_addr,
priv->common.rx_mtu + 32,
DMA_FROM_DEVICE);
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
}
@ -258,8 +261,9 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
skb = tx_buf[i];
tx_buf[i] = NULL;
pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
dma_unmap_single(&priv->pdev->dev,
le32_to_cpu(desc->host_addr),
le16_to_cpu(desc->len), DMA_TO_DEVICE);
desc->host_addr = 0;
desc->device_addr = 0;
@ -334,9 +338,9 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, mapping)) {
spin_unlock_irqrestore(&priv->lock, flags);
p54_free_skb(dev, skb);
dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
@ -378,10 +382,10 @@ static void p54p_stop(struct ieee80211_hw *dev)
for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
desc = &ring_control->rx_data[i];
if (desc->host_addr)
pci_unmap_single(priv->pdev,
dma_unmap_single(&priv->pdev->dev,
le32_to_cpu(desc->host_addr),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
kfree_skb(priv->rx_buf_data[i]);
priv->rx_buf_data[i] = NULL;
}
@ -389,10 +393,10 @@ static void p54p_stop(struct ieee80211_hw *dev)
for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
desc = &ring_control->rx_mgmt[i];
if (desc->host_addr)
pci_unmap_single(priv->pdev,
dma_unmap_single(&priv->pdev->dev,
le32_to_cpu(desc->host_addr),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
kfree_skb(priv->rx_buf_mgmt[i]);
priv->rx_buf_mgmt[i] = NULL;
}
@ -400,10 +404,10 @@ static void p54p_stop(struct ieee80211_hw *dev)
for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
desc = &ring_control->tx_data[i];
if (desc->host_addr)
pci_unmap_single(priv->pdev,
dma_unmap_single(&priv->pdev->dev,
le32_to_cpu(desc->host_addr),
le16_to_cpu(desc->len),
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
p54_free_skb(dev, priv->tx_buf_data[i]);
priv->tx_buf_data[i] = NULL;
@ -412,10 +416,10 @@ static void p54p_stop(struct ieee80211_hw *dev)
for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
desc = &ring_control->tx_mgmt[i];
if (desc->host_addr)
pci_unmap_single(priv->pdev,
dma_unmap_single(&priv->pdev->dev,
le32_to_cpu(desc->host_addr),
le16_to_cpu(desc->len),
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
p54_free_skb(dev, priv->tx_buf_mgmt[i]);
priv->tx_buf_mgmt[i] = NULL;
@ -568,9 +572,9 @@ static int p54p_probe(struct pci_dev *pdev,
goto err_disable_dev;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No suitable DMA available\n");
goto err_free_reg;
@ -603,8 +607,9 @@ static int p54p_probe(struct pci_dev *pdev,
goto err_free_dev;
}
priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
&priv->ring_control_dma);
priv->ring_control = dma_alloc_coherent(&pdev->dev,
sizeof(*priv->ring_control),
&priv->ring_control_dma, GFP_KERNEL);
if (!priv->ring_control) {
dev_err(&pdev->dev, "Cannot allocate rings\n");
err = -ENOMEM;
@ -623,8 +628,8 @@ static int p54p_probe(struct pci_dev *pdev,
if (!err)
return 0;
pci_free_consistent(pdev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
err_iounmap:
iounmap(priv->map);
@ -653,8 +658,8 @@ static void p54p_remove(struct pci_dev *pdev)
wait_for_completion(&priv->fw_loaded);
p54_unregister_common(dev);
release_firmware(priv->firmware);
pci_free_consistent(pdev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
iounmap(priv->map);
pci_release_regions(pdev);
pci_disable_device(pdev);

View File

@ -36,7 +36,7 @@ static struct usb_driver p54u_driver;
* Note:
*
* Always update our wiki's device list (located at:
* http://wireless.kernel.org/en/users/Drivers/p54/devices ),
* http://wireless.wiki.kernel.org/en/users/Drivers/p54/devices ),
* whenever you add a new device.
*/

View File

@ -143,7 +143,7 @@ enum dot11_priv_t {
* together with a CSMA contention. Without this all frames are
* sent with a CSMA contention.
* Bibliography:
* http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html
* https://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html
*/
enum dot11_maxframeburst_t {
/* Values for DOT11_OID_MAXFRAMEBURST */

View File

@ -636,10 +636,10 @@ islpci_alloc_memory(islpci_private *priv)
*/
/* perform the allocation */
priv->driver_mem_address = pci_alloc_consistent(priv->pdev,
HOST_MEM_BLOCK,
&priv->
device_host_address);
priv->driver_mem_address = dma_alloc_coherent(&priv->pdev->dev,
HOST_MEM_BLOCK,
&priv->device_host_address,
GFP_KERNEL);
if (!priv->driver_mem_address) {
/* error allocating the block of PCI memory */
@ -692,11 +692,9 @@ islpci_alloc_memory(islpci_private *priv)
/* map the allocated skb data area to pci */
priv->pci_map_rx_address[counter] =
pci_map_single(priv->pdev, (void *) skb->data,
MAX_FRAGMENT_SIZE_RX + 2,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pdev,
priv->pci_map_rx_address[counter])) {
dma_map_single(&priv->pdev->dev, (void *)skb->data,
MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[counter])) {
priv->pci_map_rx_address[counter] = 0;
/* error mapping the buffer to device
accessible memory address */
@ -727,9 +725,9 @@ islpci_free_memory(islpci_private *priv)
/* free consistent DMA area... */
if (priv->driver_mem_address)
pci_free_consistent(priv->pdev, HOST_MEM_BLOCK,
priv->driver_mem_address,
priv->device_host_address);
dma_free_coherent(&priv->pdev->dev, HOST_MEM_BLOCK,
priv->driver_mem_address,
priv->device_host_address);
/* clear some dangling pointers */
priv->driver_mem_address = NULL;
@ -741,8 +739,8 @@ islpci_free_memory(islpci_private *priv)
for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
struct islpci_membuf *buf = &priv->mgmt_rx[counter];
if (buf->pci_addr)
pci_unmap_single(priv->pdev, buf->pci_addr,
buf->size, PCI_DMA_FROMDEVICE);
dma_unmap_single(&priv->pdev->dev, buf->pci_addr,
buf->size, DMA_FROM_DEVICE);
buf->pci_addr = 0;
kfree(buf->mem);
buf->size = 0;
@ -752,10 +750,10 @@ islpci_free_memory(islpci_private *priv)
/* clean up data rx buffers */
for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
if (priv->pci_map_rx_address[counter])
pci_unmap_single(priv->pdev,
dma_unmap_single(&priv->pdev->dev,
priv->pci_map_rx_address[counter],
MAX_FRAGMENT_SIZE_RX + 2,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
priv->pci_map_rx_address[counter] = 0;
if (priv->data_low_rx[counter])

View File

@ -50,9 +50,9 @@ islpci_eth_cleanup_transmit(islpci_private *priv,
skb, skb->data, skb->len, skb->truesize);
#endif
pci_unmap_single(priv->pdev,
dma_unmap_single(&priv->pdev->dev,
priv->pci_map_tx_address[index],
skb->len, PCI_DMA_TODEVICE);
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
skb = NULL;
}
@ -176,10 +176,9 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
#endif
/* map the skb buffer to pci memory for DMA operation */
pci_map_address = pci_map_single(priv->pdev,
(void *) skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, pci_map_address)) {
pci_map_address = dma_map_single(&priv->pdev->dev, (void *)skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, pci_map_address)) {
printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
ndev->name);
goto drop_free;
@ -323,9 +322,8 @@ islpci_eth_receive(islpci_private *priv)
#endif
/* delete the streaming DMA mapping before processing the skb */
pci_unmap_single(priv->pdev,
priv->pci_map_rx_address[index],
MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
dma_unmap_single(&priv->pdev->dev, priv->pci_map_rx_address[index],
MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
/* update the skb structure and align the buffer */
skb_put(skb, size);
@ -431,11 +429,9 @@ islpci_eth_receive(islpci_private *priv)
/* set the streaming DMA mapping for proper PCI bus operation */
priv->pci_map_rx_address[index] =
pci_map_single(priv->pdev, (void *) skb->data,
MAX_FRAGMENT_SIZE_RX + 2,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pdev,
priv->pci_map_rx_address[index])) {
dma_map_single(&priv->pdev->dev, (void *)skb->data,
MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[index])) {
/* error mapping the buffer to device accessible memory address */
DEBUG(SHOW_ERROR_MESSAGES,
"Error mapping DMA address\n");

View File

@ -26,7 +26,8 @@ module_param(init_pcitm, int, 0);
/* In this order: vendor, device, subvendor, subdevice, class, class_mask,
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://wireless.kernel.org/en/users/Drivers/p54 */
* The latest list can be found at http://wireless.wiki.kernel.org/en/users/Drivers/p54
*/
static const struct pci_device_id prism54_id_tbl[] = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{
@ -63,16 +64,17 @@ MODULE_DEVICE_TABLE(pci, prism54_id_tbl);
static int prism54_probe(struct pci_dev *, const struct pci_device_id *);
static void prism54_remove(struct pci_dev *);
static int prism54_suspend(struct pci_dev *, pm_message_t state);
static int prism54_resume(struct pci_dev *);
static int __maybe_unused prism54_suspend(struct device *);
static int __maybe_unused prism54_resume(struct device *);
static SIMPLE_DEV_PM_OPS(prism54_pm_ops, prism54_suspend, prism54_resume);
static struct pci_driver prism54_driver = {
.name = DRV_NAME,
.id_table = prism54_id_tbl,
.probe = prism54_probe,
.remove = prism54_remove,
.suspend = prism54_suspend,
.resume = prism54_resume,
.driver.pm = &prism54_pm_ops,
};
/******************************************************************************
@ -106,7 +108,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* enable PCI DMA */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME);
goto do_pci_disable_device;
}
@ -243,16 +245,13 @@ prism54_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
static int
prism54_suspend(struct pci_dev *pdev, pm_message_t state)
static int __maybe_unused
prism54_suspend(struct device *dev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct net_device *ndev = dev_get_drvdata(dev);
islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
BUG_ON(!priv);
pci_save_state(pdev);
/* tell the device not to trigger interrupts for now... */
isl38xx_disable_interrupts(priv->device_base);
@ -266,26 +265,16 @@ prism54_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
static int
prism54_resume(struct pci_dev *pdev)
static int __maybe_unused
prism54_resume(struct device *dev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct net_device *ndev = dev_get_drvdata(dev);
islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
int err;
BUG_ON(!priv);
printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
ndev->name);
return err;
}
pci_restore_state(pdev);
/* alright let's go into the PREBOOT state */
islpci_reset(priv, 1);

View File

@ -115,10 +115,11 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
buf->size = MGMT_FRAME_SIZE;
}
if (buf->pci_addr == 0) {
buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
buf->pci_addr = dma_map_single(&priv->pdev->dev,
buf->mem,
MGMT_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pdev, buf->pci_addr)) {
DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, buf->pci_addr)) {
printk(KERN_WARNING
"Failed to make memory DMA'able.\n");
return -ENOMEM;
@ -203,9 +204,9 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
#endif
err = -ENOMEM;
buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, buf.pci_addr)) {
buf.pci_addr = dma_map_single(&priv->pdev->dev, buf.mem, frag_len,
DMA_TO_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, buf.pci_addr)) {
printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n",
ndev->name);
goto error_free;
@ -302,8 +303,8 @@ islpci_mgt_receive(struct net_device *ndev)
}
/* Ensure the results of device DMA are visible to the CPU. */
pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
buf->size, PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&priv->pdev->dev, buf->pci_addr,
buf->size, DMA_FROM_DEVICE);
/* Perform endianess conversion for PIMFOR header in-place. */
header = pimfor_decode_header(buf->mem, frag_len);
@ -414,8 +415,8 @@ islpci_mgt_cleanup_transmit(struct net_device *ndev)
for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) {
int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE;
struct islpci_membuf *buf = &priv->mgmt_tx[index];
pci_unmap_single(priv->pdev, buf->pci_addr, buf->size,
PCI_DMA_TODEVICE);
dma_unmap_single(&priv->pdev->dev, buf->pci_addr, buf->size,
DMA_TO_DEVICE);
buf->pci_addr = 0;
kfree(buf->mem);
buf->mem = NULL;

View File

@ -398,7 +398,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->rx_reorder_ptr = kcalloc(win_size, sizeof(void *),
GFP_KERNEL);
if (!new_node->rx_reorder_ptr) {
kfree((u8 *) new_node);
kfree(new_node);
mwifiex_dbg(priv->adapter, ERROR,
"%s: failed to alloc reorder_ptr\n", __func__);
return;

View File

@ -12,6 +12,10 @@ config MT76_USB
tristate
depends on MT76_CORE
config MT76_SDIO
tristate
depends on MT76_CORE
config MT76x02_LIB
tristate
select MT76_CORE

View File

@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MT76_CORE) += mt76.o
obj-$(CONFIG_MT76_USB) += mt76-usb.o
obj-$(CONFIG_MT76_SDIO) += mt76-sdio.o
obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
@ -9,8 +10,10 @@ mt76-y := \
tx.o agg-rx.o mcu.o
mt76-$(CONFIG_PCI) += pci.o
mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt76-usb-y := usb.o usb_trace.o
mt76-sdio-y := sdio.o
CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src)

View File

@ -9,7 +9,7 @@ mt76_reg_set(void *data, u64 val)
{
struct mt76_dev *dev = data;
dev->bus->wr(dev, dev->debugfs_reg, val);
__mt76_wr(dev, dev->debugfs_reg, val);
return 0;
}
@ -18,7 +18,7 @@ mt76_reg_get(void *data, u64 *val)
{
struct mt76_dev *dev = data;
*val = dev->bus->rr(dev, dev->debugfs_reg);
*val = __mt76_rr(dev, dev->debugfs_reg);
return 0;
}
@ -54,9 +54,6 @@ static int mt76_rx_queues_read(struct seq_file *s, void *data)
mt76_for_each_q_rx(dev, i) {
struct mt76_queue *q = &dev->q_rx[i];
if (!q->ndesc)
continue;
queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
seq_printf(s, "%d: queued=%d head=%d tail=%d\n",
i, queued, q->head, q->tail);

View File

@ -370,6 +370,12 @@ unmap:
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
#ifdef CONFIG_NL80211_TESTMODE
/* fix tx_done accounting on queue overflow */
if (tx_info.skb == dev->test.tx_skb)
dev->test.tx_done--;
#endif
e.skb = tx_info.skb;
e.txwi = t;
dev->drv->tx_complete_skb(dev, qid, &e);

View File

@ -74,6 +74,11 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
&data[i]);
}
#ifdef CONFIG_NL80211_TESTMODE
dev->test.mtd_name = devm_kstrdup(dev->dev, part, GFP_KERNEL);
dev->test.mtd_offset = offset;
#endif
out_put_node:
of_node_put(np);
return ret;

View File

@ -58,12 +58,15 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
CHAN5G(132, 5660),
CHAN5G(136, 5680),
CHAN5G(140, 5700),
CHAN5G(144, 5720),
CHAN5G(149, 5745),
CHAN5G(153, 5765),
CHAN5G(157, 5785),
CHAN5G(161, 5805),
CHAN5G(165, 5825),
CHAN5G(169, 5845),
CHAN5G(173, 5865),
};
static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
@ -279,7 +282,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_SUPPORTS_TDLS;
WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_AP_UAPSD;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
@ -289,6 +293,7 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
wiphy->available_antennas_rx = dev->phy.antenna_mask;
hw->txq_data_size = sizeof(struct mt76_txq);
hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
if (!hw->max_tx_fragments)
hw->max_tx_fragments = 16;
@ -300,7 +305,11 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
/* TODO: avoid linearization for SDIO */
if (!mt76_is_sdio(dev))
ieee80211_hw_set(hw, TX_FRAG_LIST);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
@ -432,6 +441,12 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
dev->wq = alloc_ordered_workqueue("mt76", 0);
if (!dev->wq) {
ieee80211_free_hw(hw);
return NULL;
}
return dev;
}
EXPORT_SYMBOL_GPL(mt76_alloc_device);
@ -485,7 +500,12 @@ EXPORT_SYMBOL_GPL(mt76_unregister_device);
void mt76_free_device(struct mt76_dev *dev)
{
mt76_tx_free(dev);
if (dev->wq) {
destroy_workqueue(dev->wq);
dev->wq = NULL;
}
if (mt76_is_mmio(dev))
mt76_tx_free(dev);
ieee80211_free_hw(dev->hw);
}
EXPORT_SYMBOL_GPL(mt76_free_device);
@ -500,6 +520,13 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
return;
}
#ifdef CONFIG_NL80211_TESTMODE
if (dev->test.state == MT76_TM_STATE_RX_FRAMES) {
dev->test.rx_stats.packets[q]++;
if (status->flag & RX_FLAG_FAILED_FCS_CRC)
dev->test.rx_stats.fcs_error[q]++;
}
#endif
__skb_queue_tail(&dev->rx_skb[q], skb);
}
EXPORT_SYMBOL_GPL(mt76_rx);
@ -537,8 +564,7 @@ mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
return &msband->chan[idx];
}
static void
mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
{
struct mt76_channel_state *state = phy->chan_state;
@ -546,6 +572,7 @@ mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
phy->survey_time));
phy->survey_time = time;
}
EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
void mt76_update_survey(struct mt76_dev *dev)
{

View File

@ -15,6 +15,7 @@
#include <linux/average.h>
#include <net/mac80211.h>
#include "util.h"
#include "testmode.h"
#define MT_TX_RING_SIZE 256
#define MT_MCU_RING_SIZE 32
@ -33,6 +34,7 @@ struct mt76_reg_pair {
enum mt76_bus_type {
MT76_BUS_MMIO,
MT76_BUS_USB,
MT76_BUS_SDIO,
};
struct mt76_bus_ops {
@ -52,6 +54,7 @@ struct mt76_bus_ops {
#define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
#define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
#define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
enum mt76_txq_id {
MT_TXQ_VO = IEEE80211_AC_VO,
@ -94,6 +97,7 @@ struct mt76_queue_entry {
union {
struct mt76_txwi_cache *txwi;
struct urb *urb;
int buf_sz;
};
enum mt76_txq_id qid;
bool skip_buf0:1;
@ -146,6 +150,8 @@ struct mt76_mcu_ops {
int len, bool wait_resp);
int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp);
u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
@ -290,6 +296,7 @@ enum {
MT76_STATE_POWER_OFF,
MT76_STATE_SUSPEND,
MT76_STATE_ROC,
MT76_STATE_PM,
};
struct mt76_hw_cap {
@ -422,7 +429,6 @@ struct mt76_usb {
u16 data_len;
struct tasklet_struct rx_tasklet;
struct workqueue_struct *wq;
struct work_struct stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
@ -439,6 +445,24 @@ struct mt76_usb {
} mcu;
};
struct mt76_sdio {
struct task_struct *tx_kthread;
struct task_struct *kthread;
struct work_struct stat_work;
unsigned long state;
struct sdio_func *func;
struct {
struct mutex lock;
int pse_data_quota;
int ple_data_quota;
int pse_mcu_quota;
int deficit;
} sched;
};
struct mt76_mmio {
void __iomem *regs;
spinlock_t irq_lock;
@ -475,6 +499,47 @@ struct mt76_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
struct mt76_testmode_ops {
int (*set_state)(struct mt76_dev *dev, enum mt76_testmode_state state);
int (*set_params)(struct mt76_dev *dev, struct nlattr **tb,
enum mt76_testmode_state new_state);
int (*dump_stats)(struct mt76_dev *dev, struct sk_buff *msg);
};
struct mt76_testmode_data {
enum mt76_testmode_state state;
u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
struct sk_buff *tx_skb;
u32 tx_count;
u16 tx_msdu_len;
u8 tx_rate_mode;
u8 tx_rate_idx;
u8 tx_rate_nss;
u8 tx_rate_sgi;
u8 tx_rate_ldpc;
u8 tx_antenna_mask;
u32 freq_offset;
u8 tx_power[4];
u8 tx_power_control;
const char *mtd_name;
u32 mtd_offset;
u32 tx_pending;
u32 tx_queued;
u32 tx_done;
struct {
u64 packets[__MT_RXQ_MAX];
u64 fcs_error[__MT_RXQ_MAX];
} rx_stats;
};
struct mt76_phy {
struct ieee80211_hw *hw;
struct mt76_dev *dev;
@ -491,6 +556,8 @@ struct mt76_phy {
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
u32 vif_mask;
int txpower_cur;
u8 antenna_mask;
};
@ -572,9 +639,17 @@ struct mt76_dev {
u32 rxfilter;
#ifdef CONFIG_NL80211_TESTMODE
const struct mt76_testmode_ops *test_ops;
struct mt76_testmode_data test;
#endif
struct workqueue_struct *wq;
union {
struct mt76_mmio mmio;
struct mt76_usb usb;
struct mt76_sdio sdio;
};
};
@ -805,6 +880,15 @@ static inline u8 mt76_tx_power_nss_delta(u8 nss)
return nss_delta[nss - 1];
}
static inline bool mt76_testmode_enabled(struct mt76_dev *dev)
{
#ifdef CONFIG_NL80211_TESTMODE
return dev->test.state != MT76_TM_STATE_OFF;
#else
return false;
#endif
}
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
@ -824,6 +908,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
bool mt76_has_tx_pending(struct mt76_phy *phy);
void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_dev *dev);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
@ -877,6 +962,24 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb, void *data, int len);
int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state);
static inline void mt76_testmode_reset(struct mt76_dev *dev, bool disable)
{
#ifdef CONFIG_NL80211_TESTMODE
enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
if (disable || dev->test.state == MT76_TM_STATE_OFF)
state = MT76_TM_STATE_OFF;
mt76_testmode_set_state(dev, state);
#endif
}
/* internal */
static inline struct ieee80211_hw *
@ -901,6 +1004,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_dev *dev);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@ -935,13 +1039,12 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
int mt76u_skb_dma_info(struct sk_buff *skb, u32 info);
int mt76_skb_adjust_pad(struct sk_buff *skb);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
void mt76u_deinit(struct mt76_dev *dev);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
bool ext);
int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
@ -951,6 +1054,12 @@ void mt76u_stop_rx(struct mt76_dev *dev);
int mt76u_resume_rx(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);
int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops);
int mt76s_alloc_queues(struct mt76_dev *dev);
void mt76s_stop_txrx(struct mt76_dev *dev);
void mt76s_deinit(struct mt76_dev *dev);
struct sk_buff *
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
int data_len);

View File

@ -44,7 +44,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&dev->mt76.mutex);
mvif->idx = ffs(~dev->vif_mask) - 1;
mvif->idx = ffs(~dev->mphy.vif_mask) - 1;
if (mvif->idx >= MT7603_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@ -65,7 +65,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
dev->vif_mask |= BIT(mvif->idx);
dev->mphy.vif_mask |= BIT(mvif->idx);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@ -107,7 +107,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
spin_unlock_bh(&dev->sta_poll_lock);
mutex_lock(&dev->mt76.mutex);
dev->vif_mask &= ~BIT(mvif->idx);
dev->mphy.vif_mask &= ~BIT(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
}

View File

@ -108,8 +108,6 @@ struct mt7603_dev {
u32 rxfilter;
u8 vif_mask;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;

View File

@ -28,13 +28,28 @@ config MT7622_WMAC
which has the same feature set as a MT7615, but limited to
2.4 GHz only.
config MT7663_USB_SDIO_COMMON
tristate
select MT7615_COMMON
config MT7663U
tristate "MediaTek MT7663U (USB) support"
select MT76_USB
select MT7615_COMMON
select MT7663_USB_SDIO_COMMON
depends on MAC80211
depends on USB
help
This adds support for MT7663U 802.11ax 2x2:2 wireless devices.
This adds support for MT7663U 802.11ac 2x2:2 wireless devices.
To compile this driver as a module, choose M here.
config MT7663S
tristate "MediaTek MT7663S (SDIO) support"
select MT76_SDIO
select MT7663_USB_SDIO_COMMON
depends on MAC80211
depends on MMC
help
This adds support for MT7663S 802.11ac 2x2:2 wireless devices.
To compile this driver as a module, choose M here.

View File

@ -2,14 +2,19 @@
obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
obj-$(CONFIG_MT7615E) += mt7615e.o
obj-$(CONFIG_MT7663_USB_SDIO_COMMON) += mt7663-usb-sdio-common.o
obj-$(CONFIG_MT7663U) += mt7663u.o
obj-$(CONFIG_MT7663S) += mt7663s.o
CFLAGS_trace.o := -I$(src)
mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \
debugfs.o trace.o
mt7615-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o
mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
mt7663u-y := usb.o usb_mcu.o usb_init.o
mt7663-usb-sdio-common-y := usb_sdio.o
mt7663u-y := usb.o usb_mcu.o
mt7663s-y := sdio.o sdio_mcu.o sdio_txrx.o

View File

@ -6,11 +6,16 @@ static int
mt7615_radar_pattern_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
int err;
if (!mt7615_wait_for_mcu_init(dev))
return 0;
return mt7615_mcu_rdd_send_pattern(dev);
mt7615_mutex_acquire(dev);
err = mt7615_mcu_rdd_send_pattern(dev);
mt7615_mutex_release(dev);
return err;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL,
@ -46,6 +51,52 @@ mt7615_scs_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
mt7615_scs_set, "%lld\n");
static int
mt7615_pm_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
if (!mt7615_wait_for_mcu_init(dev))
return 0;
return mt7615_pm_set_enable(dev, val);
}
static int
mt7615_pm_get(void *data, u64 *val)
{
struct mt7615_dev *dev = data;
*val = dev->pm.enable;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n");
static int
mt7615_pm_idle_timeout_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
dev->pm.idle_timeout = msecs_to_jiffies(val);
return 0;
}
static int
mt7615_pm_idle_timeout_get(void *data, u64 *val)
{
struct mt7615_dev *dev = data;
*val = jiffies_to_msecs(dev->pm.idle_timeout);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7615_pm_idle_timeout_get,
mt7615_pm_idle_timeout_set, "%lld\n");
static int
mt7615_dbdc_set(void *data, u64 val)
{
@ -84,7 +135,10 @@ mt7615_fw_debug_set(void *data, u64 val)
return 0;
dev->fw_debug = val;
mt7615_mutex_acquire(dev);
mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
mt7615_mutex_release(dev);
return 0;
}
@ -111,6 +165,8 @@ mt7615_reset_test_set(void *data, u64 val)
if (!mt7615_wait_for_mcu_init(dev))
return 0;
mt7615_mutex_acquire(dev);
skb = alloc_skb(1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@ -118,6 +174,8 @@ mt7615_reset_test_set(void *data, u64 val)
skb_put(skb, 1);
mt76_tx_queue_skb_raw(dev, 0, skb, 0);
mt7615_mutex_release(dev);
return 0;
}
@ -167,9 +225,13 @@ mt7615_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt7615_dev *dev = file->private;
mt7615_mutex_acquire(dev);
mt7615_ampdu_stat_read_phy(&dev->phy, file);
mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file);
mt7615_mutex_release(dev);
return 0;
}
@ -221,7 +283,10 @@ static int mt7615_read_temperature(struct seq_file *s, void *data)
return 0;
/* cpu */
mt7615_mutex_acquire(dev);
temp = mt7615_mcu_get_temperature(dev, 0);
mt7615_mutex_release(dev);
seq_printf(s, "Temperature: %d\n", temp);
return 0;
@ -233,6 +298,8 @@ mt7615_queues_acq(struct seq_file *s, void *data)
struct mt7615_dev *dev = dev_get_drvdata(s->private);
int i;
mt7615_mutex_acquire(dev);
for (i = 0; i < 16; i++) {
int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
int acs = i / MT7615_MAX_WMM_SETS;
@ -253,6 +320,8 @@ mt7615_queues_acq(struct seq_file *s, void *data)
seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
}
mt7615_mutex_release(dev);
return 0;
}
@ -285,6 +354,29 @@ mt7615_queues_read(struct seq_file *s, void *data)
return 0;
}
static int
mt7615_rf_reg_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
mt7615_rf_wr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg, val);
return 0;
}
static int
mt7615_rf_reg_get(void *data, u64 *val)
{
struct mt7615_dev *dev = data;
*val = mt7615_rf_rr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_reg, mt7615_rf_reg_get, mt7615_rf_reg_set,
"0x%08llx\n");
int mt7615_init_debugfs(struct mt7615_dev *dev)
{
struct dentry *dir;
@ -305,6 +397,9 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm);
debugfs_create_file("idle-timeout", 0600, dir, dev,
&fops_pm_idle_timeout);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7615_radio_read);
debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
@ -324,6 +419,11 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
mt7615_read_temperature);
debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf);
debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg);
debugfs_create_file_unsafe("rf_regval", 0600, dir, dev,
&fops_rf_reg);
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_init_debugfs);

View File

@ -122,10 +122,6 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
mt7615_tx_cleanup(dev);
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;

View File

@ -285,7 +285,9 @@ mt7615_regd_notifier(struct wiphy *wiphy,
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
mt7615_mutex_acquire(dev);
mt7615_dfs_init_radar_detector(phy);
mt7615_mutex_release(dev);
}
static void
@ -321,6 +323,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
if (is_mt7615(&phy->dev->mt76))
hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
@ -405,9 +408,6 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
mphy->sband_2g.sband.n_channels = 0;
mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
/* The second interface does not get any packets unless it has a vif */
ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF);
ret = mt76_register_phy(mphy);
if (ret)
ieee80211_free_hw(mphy->hw);
@ -437,6 +437,12 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work);
INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work);
init_completion(&dev->pm.wake_cmpl);
spin_lock_init(&dev->pm.txq_lock);
set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
skb_queue_head_init(&dev->phy.scan_event_list);
@ -450,6 +456,7 @@ void mt7615_init_device(struct mt7615_dev *dev)
timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0);
mt7615_init_wiphy(hw);
dev->pm.idle_timeout = MT7615_PM_TIMEOUT;
dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mphy.sband_5g.sband.vht_cap.cap |=
@ -457,5 +464,9 @@ void mt7615_init_device(struct mt7615_dev *dev)
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
mt7615_cap_dbdc_disable(dev);
dev->phy.dfs_state = -1;
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7615_testmode_ops;
#endif
}
EXPORT_SYMBOL_GPL(mt7615_init_device);

View File

@ -186,6 +186,40 @@ mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
}
static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv)
{
#ifdef CONFIG_NL80211_TESTMODE
u32 rxv1 = le32_to_cpu(rxv[0]);
u32 rxv3 = le32_to_cpu(rxv[2]);
u32 rxv4 = le32_to_cpu(rxv[3]);
u32 rxv5 = le32_to_cpu(rxv[4]);
u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1);
u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1);
s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5);
u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000;
if (!mode) {
/* CCK */
foe &= ~BIT(11);
foe *= 1000;
foe >>= 11;
} else {
if (foe > 2048)
foe -= 4096;
foe = (foe * foe_const) >> 15;
}
dev->test.last_freq_offset = foe;
dev->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
dev->test.last_ib_rssi = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
dev->test.last_wb_rssi = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
#endif
}
static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@ -401,6 +435,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->chain_signal[i]);
}
mt7615_mac_fill_tm_rx(dev, rxd);
rxd += 6;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@ -493,18 +529,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct ieee80211_sta *sta, int pid,
struct ieee80211_key_conf *key, bool beacon)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
bool is_mmio = mt76_is_mmio(&dev->mt76);
u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE;
struct mt76_phy *mphy = &dev->mphy;
bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
bool is_usb = mt76_is_usb(&dev->mt76);
int tx_count = 8;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
__le16 fc = hdr->frame_control;
u32 val, sz_txd = is_usb ? MT_USB_TXD_SIZE : MT_TXD_SIZE;
int tx_count = 8;
u16 seqno = 0;
if (vif) {
@ -530,10 +566,10 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
p_fmt = MT_TX_TYPE_FW;
q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
} else {
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
}
@ -617,16 +653,19 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
}
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
if (ieee80211_is_data_qos(hdr->frame_control)) {
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
val |= MT_TXD3_SN_VALID;
} else if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
seqno = le16_to_cpu(hdr->seq_ctrl);
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
val |= MT_TXD3_SN_VALID;
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
bar = (struct ieee80211_bar *)skb->data;
seqno = le16_to_cpu(bar->start_seq_num);
}
val |= MT_TXD3_SN_VALID |
FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
}
val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
txwi[3] |= cpu_to_le32(val);
@ -636,7 +675,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
if (is_usb)
if (!is_mmio)
txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
@ -878,6 +917,9 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct mt7615_dev *dev = phy->dev;
struct mt7615_wtbl_desc *wd;
if (work_pending(&dev->wtbl_work))
return -EBUSY;
wd = kzalloc(sizeof(*wd), GFP_ATOMIC);
if (!wd)
return -ENOMEM;
@ -888,11 +930,34 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
&wd->rate);
list_add_tail(&wd->node, &dev->wd_head);
queue_work(dev->mt76.usb.wq, &dev->wtbl_work);
queue_work(dev->mt76.wq, &dev->wtbl_work);
return 0;
}
u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
{
u32 addr, val, val2;
u8 offset;
addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4;
offset = tid * 12;
addr += 4 * (offset / 32);
offset %= 32;
val = mt76_rr(dev, addr);
val >>= (tid % 32);
if (offset > 20) {
addr += 4;
val2 = mt76_rr(dev, addr);
val |= val2 << (32 - offset);
}
return val & GENMASK(11, 0);
}
void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates)
@ -902,7 +967,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct mt7615_rate_desc rd;
u32 w5, w27, addr;
if (mt76_is_usb(&dev->mt76)) {
if (!mt76_is_mmio(&dev->mt76)) {
mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
return;
}
@ -961,6 +1026,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
sta->rate_probe = !!probe_rate;
}
EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
@ -1169,7 +1235,6 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
phy = dev->mt76.phy2->priv;
mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
sta->rate_probe = false;
}
spin_unlock_bh(&dev->mt76.lock);
} else {
@ -1373,6 +1438,12 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
}
dev_kfree_skb(skb);
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
tasklet_schedule(&dev->mt76.tx_tasklet);
}
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@ -1462,7 +1533,7 @@ void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
bool ext_phy = phy != &dev->phy;
u32 reg, mask;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
if (phy->scs_en == enable)
goto out;
@ -1489,7 +1560,7 @@ void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
phy->scs_en = enable;
out:
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
}
void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
@ -1679,9 +1750,9 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
state->noise = -(phy->noise >> 4);
}
void mt7615_update_channel(struct mt76_dev *mdev)
static void __mt7615_update_channel(struct mt7615_dev *dev)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt76_dev *mdev = &dev->mt76;
mt7615_phy_update_channel(&mdev->phy, 0);
if (mdev->phy2)
@ -1690,8 +1761,32 @@ void mt7615_update_channel(struct mt76_dev *mdev)
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
void mt7615_update_channel(struct mt76_dev *mdev)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
if (mt7615_pm_wake(dev))
return;
__mt7615_update_channel(dev);
mt7615_pm_power_save_sched(dev);
}
EXPORT_SYMBOL_GPL(mt7615_update_channel);
static void mt7615_update_survey(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
ktime_t cur_time;
__mt7615_update_channel(dev);
cur_time = ktime_get_boottime();
mt76_update_survey_active_time(&mdev->phy, cur_time);
if (mdev->phy2)
mt76_update_survey_active_time(mdev->phy2, cur_time);
}
static void
mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
{
@ -1740,6 +1835,163 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
}
}
void mt7615_pm_wake_work(struct work_struct *work)
{
struct mt7615_dev *dev;
struct mt76_phy *mphy;
int i;
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
pm.wake_work);
mphy = dev->phy.mt76;
if (mt7615_driver_own(dev)) {
dev_err(mphy->dev->dev, "failed to wake device\n");
goto out;
}
spin_lock_bh(&dev->pm.txq_lock);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
struct mt7615_sta *msta = dev->pm.tx_q[i].msta;
struct mt76_wcid *wcid = msta ? &msta->wcid : NULL;
struct ieee80211_sta *sta = NULL;
if (!dev->pm.tx_q[i].skb)
continue;
if (msta && wcid->sta)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
mt76_tx(mphy, sta, wcid, dev->pm.tx_q[i].skb);
dev->pm.tx_q[i].skb = NULL;
}
spin_unlock_bh(&dev->pm.txq_lock);
tasklet_schedule(&dev->mt76.tx_tasklet);
out:
ieee80211_wake_queues(mphy->hw);
complete_all(&dev->pm.wake_cmpl);
}
int mt7615_pm_wake(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = dev->phy.mt76;
if (!mt7615_firmware_offload(dev))
return 0;
if (!mt76_is_mmio(mphy->dev))
return 0;
if (!test_bit(MT76_STATE_PM, &mphy->state))
return 0;
if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
return 0;
if (queue_work(dev->mt76.wq, &dev->pm.wake_work))
reinit_completion(&dev->pm.wake_cmpl);
if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) {
ieee80211_wake_queues(mphy->hw);
return -ETIMEDOUT;
}
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_pm_wake);
void mt7615_pm_power_save_sched(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = dev->phy.mt76;
if (!mt7615_firmware_offload(dev))
return;
if (!mt76_is_mmio(mphy->dev))
return;
if (!dev->pm.enable || !test_bit(MT76_STATE_RUNNING, &mphy->state))
return;
dev->pm.last_activity = jiffies;
if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
return;
if (!test_bit(MT76_STATE_PM, &mphy->state))
queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work,
dev->pm.idle_timeout);
}
EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched);
void mt7615_pm_power_save_work(struct work_struct *work)
{
struct mt7615_dev *dev;
unsigned long delta;
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
pm.ps_work.work);
delta = dev->pm.idle_timeout;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
}
if (!mt7615_firmware_own(dev))
return;
out:
queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
}
static void
mt7615_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7615_phy *phy = priv;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
if (mt7615_mcu_set_bss_pm(dev, vif, dev->pm.enable))
return;
if (dev->pm.enable) {
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
mt76_set(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
} else {
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
mt76_clear(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
}
}
int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable)
{
struct mt76_phy *mphy = dev->phy.mt76;
if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
return -EOPNOTSUPP;
mt7615_mutex_acquire(dev);
if (dev->pm.enable == enable)
goto out;
dev->pm.enable = enable;
ieee80211_iterate_active_interfaces(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_pm_interface_iter, mphy->priv);
out:
mt7615_mutex_release(dev);
return 0;
}
void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_phy *phy;
@ -1749,9 +2001,9 @@ void mt7615_mac_work(struct work_struct *work)
mac_work.work);
mdev = &phy->dev->mt76;
mutex_lock(&mdev->mutex);
mt7615_mutex_acquire(phy->dev);
mt76_update_survey(mdev);
mt7615_update_survey(phy->dev);
if (++phy->mac_work_count == 5) {
phy->mac_work_count = 0;
@ -1759,7 +2011,7 @@ void mt7615_mac_work(struct work_struct *work)
mt7615_mac_scs_check(phy);
}
mutex_unlock(&mdev->mutex);
mt7615_mutex_release(phy->dev);
mt76_tx_status_check(mdev, NULL, false);
ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
@ -1863,7 +2115,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
napi_disable(&dev->mt76.napi[1]);
napi_disable(&dev->mt76.tx_napi);
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
@ -1896,10 +2148,10 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
mutex_unlock(&dev->mt76.mutex);
mt7615_update_beacons(dev);
mt7615_mutex_release(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
MT7615_WATCHDOG_TIME);
if (phy2)

View File

@ -100,11 +100,16 @@ enum rx_pkt_type {
#define MT_RXV2_GROUP_ID GENMASK(26, 21)
#define MT_RXV2_LENGTH GENMASK(20, 0)
#define MT_RXV3_WB_RSSI GENMASK(31, 24)
#define MT_RXV3_IB_RSSI GENMASK(23, 16)
#define MT_RXV4_RCPI3 GENMASK(31, 24)
#define MT_RXV4_RCPI2 GENMASK(23, 16)
#define MT_RXV4_RCPI1 GENMASK(15, 8)
#define MT_RXV4_RCPI0 GENMASK(7, 0)
#define MT_RXV5_FOE GENMASK(11, 0)
#define MT_RXV6_NF3 GENMASK(31, 24)
#define MT_RXV6_NF2 GENMASK(23, 16)
#define MT_RXV6_NF1 GENMASK(15, 8)

View File

@ -24,6 +24,22 @@ static bool mt7615_dev_running(struct mt7615_dev *dev)
return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
}
static void mt7615_free_pending_tx_skbs(struct mt7615_dev *dev,
struct mt7615_sta *msta)
{
int i;
spin_lock_bh(&dev->pm.txq_lock);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (msta && dev->pm.tx_q[i].msta != msta)
continue;
dev_kfree_skb(dev->pm.tx_q[i].skb);
dev->pm.tx_q[i].skb = NULL;
}
spin_unlock_bh(&dev->pm.txq_lock);
}
static int mt7615_start(struct ieee80211_hw *hw)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
@ -33,7 +49,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
if (!mt7615_wait_for_mcu_init(dev))
return -EIO;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
running = mt7615_dev_running(dev);
@ -60,7 +76,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
if (!running)
mt7615_mac_reset_counters(dev);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return 0;
}
@ -74,7 +90,14 @@ static void mt7615_stop(struct ieee80211_hw *hw)
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
mutex_lock(&dev->mt76.mutex);
cancel_delayed_work_sync(&dev->pm.ps_work);
cancel_work_sync(&dev->pm.wake_work);
mt7615_free_pending_tx_skbs(dev, NULL);
mt7615_mutex_acquire(dev);
mt76_testmode_reset(&dev->mt76, true);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
cancel_delayed_work_sync(&phy->scan_work);
@ -89,7 +112,7 @@ static void mt7615_stop(struct ieee80211_hw *hw)
mt7615_mcu_set_mac_enable(dev, 0, false);
}
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
}
static int get_omac_idx(enum nl80211_iftype type, u32 mask)
@ -135,9 +158,15 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
bool ext_phy = phy != &dev->phy;
int idx, ret = 0;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
mvif->idx = ffs(~dev->vif_mask) - 1;
mt76_testmode_reset(&dev->mt76, true);
if (vif->type == NL80211_IFTYPE_MONITOR &&
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
mvif->idx = ffs(~dev->mphy.vif_mask) - 1;
if (mvif->idx >= MT7615_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@ -157,7 +186,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
else
mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
dev->vif_mask |= BIT(mvif->idx);
dev->mphy.vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
phy->omac_mask |= BIT(mvif->omac_idx);
@ -180,8 +209,20 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
}
ret = mt7615_mcu_add_dev_info(dev, vif, true);
if (ret)
goto out;
if (dev->pm.enable) {
ret = mt7615_mcu_set_bss_pm(dev, vif, true);
if (ret)
goto out;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
mt76_set(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
}
out:
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return ret;
}
@ -197,17 +238,32 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
/* TODO: disable beacon for the bss */
mt7615_mutex_acquire(dev);
mt76_testmode_reset(&dev->mt76, true);
if (vif == phy->monitor_vif)
phy->monitor_vif = NULL;
mt7615_free_pending_tx_skbs(dev, msta);
if (dev->pm.enable) {
bool ext_phy = phy != &dev->phy;
mt7615_mcu_set_bss_pm(dev, vif, false);
mt76_clear(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
}
mt7615_mcu_add_dev_info(dev, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
if (vif->txq)
mt76_txq_remove(&dev->mt76, vif->txq);
mutex_lock(&dev->mt76.mutex);
dev->vif_mask &= ~BIT(mvif->idx);
dev->mphy.vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
phy->omac_mask &= ~BIT(mvif->omac_idx);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
@ -234,7 +290,7 @@ static void mt7615_init_dfs_state(struct mt7615_phy *phy)
phy->dfs_state = -1;
}
static int mt7615_set_channel(struct mt7615_phy *phy)
int mt7615_set_channel(struct mt7615_phy *phy)
{
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
@ -242,7 +298,8 @@ static int mt7615_set_channel(struct mt7615_phy *phy)
cancel_delayed_work_sync(&phy->mac_work);
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
set_bit(MT76_RESET, &phy->mt76->state);
mt7615_init_dfs_state(phy);
@ -260,7 +317,7 @@ static int mt7615_set_channel(struct mt7615_phy *phy)
mt7615_mac_set_timing(phy);
ret = mt7615_dfs_init_radar_detector(phy);
mt7615_mac_cca_stats_reset(phy);
mt7615_mcu_set_sku_en(phy, true);
mt7615_mcu_set_sku_en(phy, !mt76_testmode_enabled(&dev->mt76));
mt7615_mac_reset_counters(dev);
phy->noise = 0;
@ -268,11 +325,15 @@ static int mt7615_set_channel(struct mt7615_phy *phy)
out:
clear_bit(MT76_RESET, &phy->mt76->state);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
mt76_txq_schedule_all(phy->mt76);
ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
if (!mt76_testmode_enabled(&dev->mt76))
ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
return ret;
}
@ -301,7 +362,7 @@ mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
wd->key.cmd = cmd;
list_add_tail(&wd->node, &dev->wd_head);
queue_work(dev->mt76.usb.wq, &dev->wtbl_work);
queue_work(dev->mt76.wq, &dev->wtbl_work);
return 0;
}
@ -315,7 +376,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
struct mt76_wcid *wcid = &msta->wcid;
int idx = key->keyidx;
int idx = key->keyidx, err;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@ -345,6 +406,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
mt7615_mutex_acquire(dev);
if (cmd == SET_KEY) {
key->hw_key_idx = wcid->idx;
wcid->hw_key_idx = idx;
@ -354,10 +417,14 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
if (mt76_is_usb(&dev->mt76))
return mt7615_queue_key_update(dev, cmd, msta, key);
if (mt76_is_mmio(&dev->mt76))
err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
else
err = mt7615_queue_key_update(dev, cmd, msta, key);
return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
mt7615_mutex_release(dev);
return err;
}
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
@ -369,14 +436,23 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER)) {
#ifdef CONFIG_NL80211_TESTMODE
if (dev->mt76.test.state != MT76_TM_STATE_OFF) {
mt7615_mutex_acquire(dev);
mt76_testmode_reset(&dev->mt76, false);
mt7615_mutex_release(dev);
}
#endif
ieee80211_stop_queues(hw);
ret = mt7615_set_channel(phy);
ieee80211_wake_queues(hw);
}
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mt76_testmode_reset(&dev->mt76, true);
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
else
@ -385,7 +461,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
}
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return ret;
}
@ -396,11 +472,17 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
int err;
mt7615_mutex_acquire(dev);
queue = mt7615_lmac_mapping(dev, queue);
queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
err = mt7615_mcu_set_wmm(dev, queue, params);
return mt7615_mcu_set_wmm(dev, queue, params);
mt7615_mutex_release(dev);
return err;
}
static void mt7615_configure_filter(struct ieee80211_hw *hw,
@ -419,10 +501,13 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR1_DROP_CFACK;
u32 flags = 0;
mt7615_mutex_acquire(dev);
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
phy->rxfilter &= ~(_hw); \
phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
if (!mt76_testmode_enabled(&dev->mt76)) \
phy->rxfilter |= !(flags & FIF_##_flag) * (_hw);\
} while (0)
phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
@ -455,6 +540,8 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
else
mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
mt7615_mutex_release(dev);
}
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
@ -465,7 +552,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
@ -491,7 +578,10 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS)
mt7615_mcu_set_vif_ps(dev, vif);
mutex_unlock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_ARP_FILTER)
mt7615_mcu_update_arp_filter(hw, vif, info);
mt7615_mutex_release(dev);
}
static void
@ -501,9 +591,9 @@ mt7615_channel_switch_beacon(struct ieee80211_hw *hw,
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
mt7615_mcu_add_beacon(dev, hw, vif, true);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
}
int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@ -512,7 +602,7 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
int idx;
int idx, err;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
if (idx < 0)
@ -524,6 +614,10 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.idx = idx;
msta->wcid.ext_phy = mvif->band_idx;
err = mt7615_pm_wake(dev);
if (err)
return err;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
struct mt7615_phy *phy;
@ -534,6 +628,8 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
mt7615_mcu_sta_add(dev, vif, sta, true);
mt7615_pm_power_save_sched(dev);
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_mac_sta_add);
@ -544,6 +640,9 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
mt7615_free_pending_tx_skbs(dev, msta);
mt7615_pm_wake(dev);
mt7615_mcu_sta_add(dev, vif, sta, false);
mt7615_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@ -559,6 +658,8 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
mt7615_pm_power_save_sched(dev);
}
EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove);
@ -582,11 +683,29 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
break;
}
msta->n_rates = i;
mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
msta->rate_probe = false;
if (!test_bit(MT76_STATE_PM, &phy->mt76->state))
mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
spin_unlock_bh(&dev->mt76.lock);
}
static void
mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt76_phy *mphy = phy->mt76;
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
return;
if (test_bit(MT76_STATE_PM, &mphy->state)) {
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return;
}
tasklet_schedule(&dev->mt76.tx_tasklet);
}
static void mt7615_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@ -596,22 +715,43 @@ static void mt7615_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct mt7615_sta *msta = NULL;
int qid;
if (control->sta) {
struct mt7615_sta *sta;
sta = (struct mt7615_sta *)control->sta->drv_priv;
wcid = &sta->wcid;
msta = (struct mt7615_sta *)control->sta->drv_priv;
wcid = &msta->wcid;
}
if (vif && !control->sta) {
struct mt7615_vif *mvif;
mvif = (struct mt7615_vif *)vif->drv_priv;
wcid = &mvif->sta.wcid;
msta = &mvif->sta;
wcid = &msta->wcid;
}
mt76_tx(mphy, control->sta, wcid, skb);
if (!test_bit(MT76_STATE_PM, &mphy->state)) {
mt76_tx(mphy, control->sta, wcid, skb);
return;
}
qid = skb_get_queue_mapping(skb);
if (qid >= MT_TXQ_PSD) {
qid = IEEE80211_AC_BE;
skb_set_queue_mapping(skb, qid);
}
spin_lock_bh(&dev->pm.txq_lock);
if (!dev->pm.tx_q[qid].skb) {
ieee80211_stop_queues(hw);
dev->pm.tx_q[qid].msta = msta;
dev->pm.tx_q[qid].skb = skb;
queue_work(dev->mt76.wq, &dev->pm.wake_work);
} else {
dev_kfree_skb(skb);
}
spin_unlock_bh(&dev->pm.txq_lock);
}
static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
@ -619,9 +759,9 @@ static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
mt7615_mcu_set_rts_thresh(phy, val);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return 0;
}
@ -645,7 +785,8 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mtxq = (struct mt76_txq *)txq->drv_priv;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
@ -660,6 +801,9 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mtxq->aggr = true;
mtxq->send_bar = false;
mt7615_mcu_add_tx_ba(dev, params, true);
ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
ieee80211_send_bar(vif, sta->addr, tid,
IEEE80211_SN_TO_SEQ(ssn));
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
@ -667,6 +811,8 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7615_mcu_add_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
params->ssn = ssn;
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
@ -676,7 +822,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return ret;
}
@ -721,27 +867,47 @@ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
u32 t32[2];
} tsf;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return tsf.t64;
}
static void
mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 timestamp)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
mt7615_mutex_acquire(dev);
mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
/* TSF software overwrite */
mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
mt7615_mutex_release(dev);
}
static void
mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_dev *dev = phy->dev;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
phy->coverage_class = max_t(s16, coverage_class, 0);
mt7615_mac_set_timing(phy);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
}
static int
@ -758,7 +924,7 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
tx_ant = BIT(ffs(tx_ant) - 1) - 1;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
phy->mt76->antenna_mask = tx_ant;
if (ext_phy) {
@ -771,7 +937,7 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
mt76_set_stream_caps(phy->mt76, true);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return 0;
}
@ -794,9 +960,11 @@ void mt7615_roc_work(struct work_struct *work)
if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
return;
mt7615_mutex_acquire(phy->dev);
ieee80211_iterate_active_interfaces(phy->mt76->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_roc_iter, phy);
mt7615_mutex_release(phy->dev);
ieee80211_remain_on_channel_expired(phy->mt76->hw);
}
@ -844,17 +1012,26 @@ static int
mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
int err;
return mt7615_mcu_hw_scan(mphy->priv, vif, req);
mt7615_mutex_acquire(dev);
err = mt7615_mcu_hw_scan(mphy->priv, vif, req);
mt7615_mutex_release(dev);
return err;
}
static void
mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
mt7615_mutex_acquire(dev);
mt7615_mcu_cancel_hw_scan(mphy->priv, vif);
mt7615_mutex_release(dev);
}
static int
@ -862,22 +1039,35 @@ mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
int err;
mt7615_mutex_acquire(dev);
err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req);
if (err < 0)
return err;
goto out;
return mt7615_mcu_sched_scan_enable(mphy->priv, vif, true);
err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, true);
out:
mt7615_mutex_release(dev);
return err;
}
static int
mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
int err;
return mt7615_mcu_sched_scan_enable(mphy->priv, vif, false);
mt7615_mutex_acquire(dev);
err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, false);
mt7615_mutex_release(dev);
return err;
}
static int mt7615_remain_on_channel(struct ieee80211_hw *hw,
@ -892,20 +1082,24 @@ static int mt7615_remain_on_channel(struct ieee80211_hw *hw,
if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))
return 0;
mt7615_mutex_acquire(phy->dev);
err = mt7615_mcu_set_roc(phy, vif, chan, duration);
if (err < 0) {
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
return err;
goto out;
}
if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) {
mt7615_mcu_set_roc(phy, vif, NULL, 0);
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
return -ETIMEDOUT;
err = -ETIMEDOUT;
}
return 0;
out:
mt7615_mutex_release(phy->dev);
return err;
}
static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
@ -919,7 +1113,9 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
mt7615_mutex_acquire(phy->dev);
mt7615_mcu_set_roc(phy, vif, NULL, 0);
mt7615_mutex_release(phy->dev);
return 0;
}
@ -933,7 +1129,10 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
bool ext_phy = phy != &dev->phy;
int err = 0;
mutex_lock(&dev->mt76.mutex);
cancel_delayed_work_sync(&dev->pm.ps_work);
mt7615_free_pending_tx_skbs(dev, NULL);
mt7615_mutex_acquire(dev);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
cancel_delayed_work_sync(&phy->scan_work);
@ -949,7 +1148,7 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
if (!mt7615_dev_running(dev))
err = mt7615_mcu_set_hif_suspend(dev, true);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return err;
}
@ -960,7 +1159,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
struct mt7615_phy *phy = mt7615_hw_phy(hw);
bool running, ext_phy = phy != &dev->phy;
mutex_lock(&dev->mt76.mutex);
mt7615_mutex_acquire(dev);
running = mt7615_dev_running(dev);
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
@ -970,7 +1169,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
err = mt7615_mcu_set_hif_suspend(dev, false);
if (err < 0) {
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return err;
}
}
@ -984,7 +1183,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
MT7615_WATCHDOG_TIME);
mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
mutex_unlock(&dev->mt76.mutex);
mt7615_mutex_release(dev);
return 0;
}
@ -1001,7 +1200,11 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *data)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
mt7615_mutex_acquire(dev);
mt7615_mcu_update_gtk_rekey(hw, vif, data);
mt7615_mutex_release(dev);
}
#endif /* CONFIG_PM */
@ -1021,7 +1224,7 @@ const struct ieee80211_ops mt7615_ops = {
.set_key = mt7615_set_key,
.ampdu_action = mt7615_ampdu_action,
.set_rts_threshold = mt7615_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
.wake_tx_queue = mt7615_wake_tx_queue,
.sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
.sw_scan_start = mt76_sw_scan,
.sw_scan_complete = mt76_sw_scan_complete,
@ -1030,6 +1233,7 @@ const struct ieee80211_ops mt7615_ops = {
.channel_switch_beacon = mt7615_channel_switch_beacon,
.get_stats = mt7615_get_stats,
.get_tsf = mt7615_get_tsf,
.set_tsf = mt7615_set_tsf,
.get_survey = mt76_get_survey,
.get_antenna = mt76_get_antenna,
.set_antenna = mt7615_set_antenna,
@ -1040,6 +1244,8 @@ const struct ieee80211_ops mt7615_ops = {
.sched_scan_stop = mt7615_stop_sched_scan,
.remain_on_channel = mt7615_remain_on_channel,
.cancel_remain_on_channel = mt7615_cancel_remain_on_channel,
CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
#ifdef CONFIG_PM
.suspend = mt7615_suspend,
.resume = mt7615_resume,

View File

@ -146,13 +146,19 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
mcu_txd->cid = mcu_cmd;
break;
case MCU_CE_PREFIX:
mcu_txd->set_query = MCU_Q_SET;
if (cmd & MCU_QUERY_MASK)
mcu_txd->set_query = MCU_Q_QUERY;
else
mcu_txd->set_query = MCU_Q_SET;
mcu_txd->cid = mcu_cmd;
break;
default:
mcu_txd->cid = MCU_CMD_EXT_CID;
mcu_txd->set_query = MCU_Q_SET;
mcu_txd->ext_cid = cmd;
if (cmd & MCU_QUERY_PREFIX)
mcu_txd->set_query = MCU_Q_QUERY;
else
mcu_txd->set_query = MCU_Q_SET;
mcu_txd->ext_cid = mcu_cmd;
mcu_txd->ext_cid_ack = 1;
break;
}
@ -180,8 +186,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
int ret = 0;
if (seq != rxd->seq)
return -EAGAIN;
if (seq != rxd->seq) {
ret = -EAGAIN;
goto out;
}
switch (cmd) {
case MCU_CMD_PATCH_SEM_CONTROL:
@ -192,6 +200,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
skb_pull(skb, sizeof(*rxd));
ret = le32_to_cpu(*(__le32 *)skb->data);
break;
case MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX:
skb_pull(skb, sizeof(*rxd));
ret = le32_to_cpu(*(__le32 *)&skb->data[8]);
break;
case MCU_UNI_CMD_DEV_INFO_UPDATE:
case MCU_UNI_CMD_BSS_INFO_UPDATE:
case MCU_UNI_CMD_STA_REC_UPDATE:
@ -205,9 +217,18 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
ret = le32_to_cpu(event->status);
break;
}
case MCU_CMD_REG_READ: {
struct mt7615_mcu_reg_event *event;
skb_pull(skb, sizeof(*rxd));
event = (struct mt7615_mcu_reg_event *)skb->data;
ret = (int)le32_to_cpu(event->val);
break;
}
default:
break;
}
out:
dev_kfree_skb(skb);
return ret;
@ -271,6 +292,38 @@ int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
}
EXPORT_SYMBOL_GPL(mt7615_mcu_msg_send);
u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg)
{
struct {
__le32 wifi_stream;
__le32 address;
__le32 data;
} req = {
.wifi_stream = cpu_to_le32(wf),
.address = cpu_to_le32(reg),
};
return __mt76_mcu_send_msg(&dev->mt76,
MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX,
&req, sizeof(req), true);
}
int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
{
struct {
__le32 wifi_stream;
__le32 address;
__le32 data;
} req = {
.wifi_stream = cpu_to_le32(wf),
.address = cpu_to_le32(reg),
.data = cpu_to_le32(val),
};
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req,
sizeof(req), false);
}
static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
@ -926,6 +979,38 @@ mt7615_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
}
static void
mt7615_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct sta_rec_uapsd *uapsd;
struct tlv *tlv;
if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
return;
tlv = mt7615_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
uapsd = (struct sta_rec_uapsd *)tlv;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
uapsd->dac_map |= BIT(3);
uapsd->tac_map |= BIT(3);
}
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
uapsd->dac_map |= BIT(2);
uapsd->tac_map |= BIT(2);
}
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
uapsd->dac_map |= BIT(1);
uapsd->tac_map |= BIT(1);
}
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
uapsd->dac_map |= BIT(0);
uapsd->tac_map |= BIT(0);
}
uapsd->max_sp = sta->max_sp;
}
static void
mt7615_mcu_wtbl_ba_tlv(struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
@ -1188,8 +1273,10 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(sskb);
mt7615_mcu_sta_basic_tlv(sskb, vif, sta, enable);
if (enable && sta)
if (enable && sta) {
mt7615_mcu_sta_ht_tlv(sskb, sta);
mt7615_mcu_sta_uapsd(sskb, vif, sta);
}
wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
NULL, &wskb);
@ -1206,8 +1293,12 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
skb = enable ? wskb : sskb;
err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
if (err < 0)
if (err < 0) {
skb = enable ? sskb : wskb;
dev_kfree_skb(skb);
return err;
}
cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE;
skb = enable ? sskb : wskb;
@ -1285,8 +1376,10 @@ mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
mt7615_mcu_sta_basic_tlv(skb, vif, sta, enable);
if (enable && sta)
if (enable && sta) {
mt7615_mcu_sta_ht_tlv(skb, sta);
mt7615_mcu_sta_uapsd(skb, vif, sta);
}
sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
@ -1429,6 +1522,7 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
u8 pad[3];
} __packed hdr;
struct mt7615_bss_basic_tlv basic;
struct mt7615_bss_qos_tlv qos;
} basic_req = {
.hdr = {
.bss_idx = mvif->idx,
@ -1444,6 +1538,11 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
.active = true, /* keep bss deactivated */
.phymode = 0x38,
},
.qos = {
.tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
.len = cpu_to_le16(sizeof(struct mt7615_bss_qos_tlv)),
.qos = vif->bss_conf.qos,
},
};
struct {
struct {
@ -1808,44 +1907,66 @@ static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
int mt7615_driver_own(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_dev *mdev = &dev->mt76;
u32 addr;
int i;
addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
goto out;
mt7622_trigger_hif_int(dev, true);
addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
u32 addr;
addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
break;
}
mt7622_trigger_hif_int(dev, false);
if (i == MT7615_DRV_OWN_RETRY_COUNT) {
dev_err(mdev->dev, "driver own failed\n");
set_bit(MT76_STATE_PM, &mphy->state);
return -EIO;
}
out:
dev->pm.last_activity = jiffies;
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_driver_own);
int mt7615_firmware_own(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
int err = 0;
u32 addr;
addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
return 0;
mt7622_trigger_hif_int(dev, true);
addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
if (!is_mt7615(&dev->mt76) &&
if (is_mt7622(&dev->mt76) &&
!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
MT_CFG_LPCR_HOST_FW_OWN, 300)) {
dev_err(dev->mt76.dev, "Timeout for firmware own\n");
return -EIO;
clear_bit(MT76_STATE_PM, &mphy->state);
err = -EIO;
}
mt7622_trigger_hif_int(dev, false);
return 0;
return err;
}
EXPORT_SYMBOL_GPL(mt7615_firmware_own);
@ -2725,6 +2846,14 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
#ifdef CONFIG_NL80211_TESTMODE
if (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES &&
dev->mt76.test.tx_antenna_mask) {
req.tx_streams = hweight8(dev->mt76.test.tx_antenna_mask);
req.rx_streams_mask = dev->mt76.test.tx_antenna_mask;
}
#endif
if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
@ -2736,7 +2865,10 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
req.band_idx = phy != &dev->phy;
req.bw = mt7615_mcu_chan_bw(chandef);
mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
if (mt76_testmode_enabled(&dev->mt76))
memset(req.txpower_sku, 0x3f, 49);
else
mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
}
@ -2754,6 +2886,27 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
sizeof(req), true);
}
int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
u32 val)
{
struct {
u8 test_mode_en;
u8 param_idx;
u8 _rsv[2];
__le32 value;
u8 pad[8];
} req = {
.test_mode_en = test_mode,
.param_idx = param,
.value = cpu_to_le32(val),
};
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req,
sizeof(req), false);
}
int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
{
struct mt7615_dev *dev = phy->dev;
@ -3332,43 +3485,8 @@ out:
return ret;
}
#ifdef CONFIG_PM
int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend)
{
struct {
struct {
u8 hif_type; /* 0x0: HIF_SDIO
* 0x1: HIF_USB
* 0x2: HIF_PCIE
*/
u8 pad[3];
} __packed hdr;
struct hif_suspend_tlv {
__le16 tag;
__le16 len;
u8 suspend;
} __packed hif_suspend;
} req = {
.hif_suspend = {
.tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
.len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
.suspend = suspend,
},
};
if (mt76_is_mmio(&dev->mt76))
req.hdr.hif_type = 2;
else if (mt76_is_usb(&dev->mt76))
req.hdr.hif_type = 1;
return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL,
&req, sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend);
static int
mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable)
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct {
@ -3408,6 +3526,40 @@ mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
&req, sizeof(req), false);
}
#ifdef CONFIG_PM
int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend)
{
struct {
struct {
u8 hif_type; /* 0x0: HIF_SDIO
* 0x1: HIF_USB
* 0x2: HIF_PCIE
*/
u8 pad[3];
} __packed hdr;
struct hif_suspend_tlv {
__le16 tag;
__le16 len;
u8 suspend;
} __packed hif_suspend;
} req = {
.hif_suspend = {
.tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
.len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
.suspend = suspend,
},
};
if (mt76_is_mmio(&dev->mt76))
req.hdr.hif_type = 2;
else if (mt76_is_usb(&dev->mt76))
req.hdr.hif_type = 1;
return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL,
&req, sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend);
static int
mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif,
bool suspend, struct cfg80211_wowlan *wowlan)
@ -3542,6 +3694,32 @@ mt7615_mcu_set_gtk_rekey(struct mt7615_dev *dev,
&req, sizeof(req), true);
}
static int
mt7615_mcu_set_arp_filter(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool suspend)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt7615_arpns_tlv arpns;
} req = {
.hdr = {
.bss_idx = mvif->idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
.len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)),
.mode = suspend,
},
};
return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD,
&req, sizeof(req), true);
}
void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
@ -3554,6 +3732,7 @@ void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
mt7615_mcu_set_bss_pm(phy->dev, vif, suspend);
mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend);
mt7615_mcu_set_arp_filter(phy->dev, vif, suspend);
mt7615_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true);
@ -3653,6 +3832,53 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
sizeof(req), false);
}
int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct sk_buff *skb;
int i, len = min_t(int, info->arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt7615_arpns_tlv arp;
} req_hdr = {
.hdr = {
.bss_idx = mvif->idx,
},
.arp = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
.len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)),
.ips_num = len,
.mode = 2, /* update */
.option = 1,
},
};
if (!mt7615_firmware_offload(dev))
return 0;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
sizeof(req_hdr) + len * sizeof(__be32));
if (!skb)
return -ENOMEM;
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
for (i = 0; i < len; i++) {
u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
}
return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD_OFFLOAD, true);
}
int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@ -3674,3 +3900,32 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS,
&req, sizeof(req), false);
}
u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset)
{
struct {
__le32 addr;
__le32 val;
} __packed req = {
.addr = cpu_to_le32(offset),
};
return __mt76_mcu_send_msg(dev, MCU_CMD_REG_READ,
&req, sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_reg_rr);
void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
{
struct {
__le32 addr;
__le32 val;
} __packed req = {
.addr = cpu_to_le32(offset),
.val = cpu_to_le32(val),
};
__mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE,
&req, sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_reg_wr);

View File

@ -81,6 +81,7 @@ enum {
MCU_EVENT_GENERIC = 0x01,
MCU_EVENT_ACCESS_REG = 0x02,
MCU_EVENT_MT_PATCH_SEM = 0x04,
MCU_EVENT_REG_ACCESS = 0x05,
MCU_EVENT_SCAN_DONE = 0x0d,
MCU_EVENT_ROC = 0x10,
MCU_EVENT_BSS_ABSENCE = 0x11,
@ -238,8 +239,11 @@ enum {
#define MCU_FW_PREFIX BIT(31)
#define MCU_UNI_PREFIX BIT(30)
#define MCU_CE_PREFIX BIT(29)
#define MCU_QUERY_PREFIX BIT(28)
#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \
MCU_CE_PREFIX)
MCU_CE_PREFIX | MCU_QUERY_PREFIX)
#define MCU_QUERY_MASK BIT(16)
enum {
MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
@ -254,6 +258,7 @@ enum {
};
enum {
MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
@ -266,6 +271,7 @@ enum {
MCU_EXT_CMD_GET_TEMP = 0x2c,
MCU_EXT_CMD_WTBL_UPDATE = 0x32,
MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
MCU_EXT_CMD_ATE_CTRL = 0x3d,
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
MCU_EXT_CMD_DBDC_CTRL = 0x45,
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
@ -287,6 +293,11 @@ enum {
MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07,
};
enum {
MCU_ATE_SET_FREQ_OFFSET = 0xa,
MCU_ATE_SET_TX_POWER_CONTROL = 0x15,
};
struct mt7615_mcu_uni_event {
u8 cid;
u8 pad[3];
@ -421,6 +432,11 @@ struct nt7615_sched_scan_done {
__le16 pad;
} __packed;
struct mt7615_mcu_reg_event {
__le32 reg;
__le32 val;
} __packed;
struct mt7615_mcu_bss_event {
u8 bss_idx;
u8 is_absent;
@ -454,6 +470,13 @@ struct mt7615_bss_basic_tlv {
u8 pad[3];
} __packed;
struct mt7615_bss_qos_tlv {
__le16 tag;
__le16 len;
u8 qos;
u8 pad[3];
} __packed;
struct mt7615_wow_ctrl_tlv {
__le16 tag;
__le16 len;
@ -545,6 +568,15 @@ struct mt7615_roc_tlv {
u8 rsv1[8];
} __packed;
struct mt7615_arpns_tlv {
__le16 tag;
__le16 len;
u8 mode;
u8 ips_num;
u8 option;
u8 pad[1];
} __packed;
/* offload mcu commands */
enum {
MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
@ -557,6 +589,8 @@ enum {
MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
};
#define MCU_CMD_ACK BIT(0)
@ -569,6 +603,8 @@ enum {
UNI_BSS_INFO_BASIC = 0,
UNI_BSS_INFO_RLM = 2,
UNI_BSS_INFO_BCN_CONTENT = 7,
UNI_BSS_INFO_QBSS = 15,
UNI_BSS_INFO_UAPSD = 19,
};
enum {
@ -580,8 +616,8 @@ enum {
};
enum {
UNI_OFFLOAD_OFFLOAD_ARPNS_IPV4,
UNI_OFFLOAD_OFFLOAD_ARPNS_IPV6,
UNI_OFFLOAD_OFFLOAD_ARP,
UNI_OFFLOAD_OFFLOAD_ND,
UNI_OFFLOAD_OFFLOAD_GTK_REKEY,
UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
};
@ -882,6 +918,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_ht) + \
sizeof(struct sta_rec_vht) + \
sizeof(struct sta_rec_uapsd) + \
sizeof(struct tlv) + \
MT7615_WTBL_UPDATE_MAX_SIZE)
@ -971,6 +1008,17 @@ struct sta_rec_ba {
__le16 winsize;
} __packed;
struct sta_rec_uapsd {
__le16 tag;
__le16 len;
u8 dac_map;
u8 tac_map;
u8 max_sp;
u8 rsv0;
__le16 listen_interval;
u8 rsv1[2];
} __packed;
enum {
STA_REC_BASIC,
STA_REC_RA,

View File

@ -17,7 +17,6 @@ const u32 mt7615e_reg_map[] = {
[MT_CSR_BASE] = 0x07000,
[MT_PLE_BASE] = 0x08000,
[MT_PSE_BASE] = 0x0c000,
[MT_PHY_BASE] = 0x10000,
[MT_CFG_BASE] = 0x20200,
[MT_AGG_BASE] = 0x20a00,
[MT_TMAC_BASE] = 0x21000,
@ -44,7 +43,7 @@ const u32 mt7663e_reg_map[] = {
[MT_CSR_BASE] = 0x07000,
[MT_PLE_BASE] = 0x08000,
[MT_PSE_BASE] = 0x0c000,
[MT_PHY_BASE] = 0x10000,
[MT_PP_BASE] = 0x0e000,
[MT_CFG_BASE] = 0x20000,
[MT_AGG_BASE] = 0x22000,
[MT_TMAC_BASE] = 0x24000,
@ -140,6 +139,38 @@ static void mt7615_irq_tasklet(unsigned long data)
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
{
if (addr < 0x100000)
return addr;
return mt7615_reg_map(dev, addr);
}
static u32 mt7615_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
u32 addr = __mt7615_reg_addr(dev, offset);
return dev->bus_ops->rr(mdev, addr);
}
static void mt7615_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
u32 addr = __mt7615_reg_addr(dev, offset);
dev->bus_ops->wr(mdev, addr, val);
}
static u32 mt7615_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
u32 addr = __mt7615_reg_addr(dev, offset);
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
int irq, const u32 *map)
{
@ -159,6 +190,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
struct mt76_bus_ops *bus_ops;
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
@ -182,6 +214,19 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
if (!bus_ops) {
ret = -ENOMEM;
goto error;
}
bus_ops->rr = mt7615_rr;
bus_ops->wr = mt7615_wr;
bus_ops->rmw = mt7615_rmw;
dev->mt76.bus = bus_ops;
ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)

View File

@ -4,6 +4,7 @@
#ifndef __MT7615_H
#define __MT7615_H
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/ktime.h>
#include <linux/regmap.h>
@ -18,6 +19,7 @@
#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \
MT7615_MAX_INTERFACES)
#define MT7615_PM_TIMEOUT (HZ / 12)
#define MT7615_WATCHDOG_TIME (HZ / 10)
#define MT7615_HW_SCAN_TIMEOUT (HZ / 10)
#define MT7615_RESET_TIMEOUT (30 * HZ)
@ -31,6 +33,8 @@
#define MT7615_RX_RING_SIZE 1024
#define MT7615_RX_MCU_RING_SIZE 512
#define MT7615_DRV_OWN_RETRY_COUNT 10
#define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin"
#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin"
#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin"
@ -169,6 +173,8 @@ struct mt7615_phy {
struct mt76_phy *mt76;
struct mt7615_dev *dev;
struct ieee80211_vif *monitor_vif;
u32 rxfilter;
u32 omac_mask;
@ -240,10 +246,10 @@ struct mt7615_dev {
struct mt76_phy mphy;
};
const struct mt76_bus_ops *bus_ops;
struct tasklet_struct irq_tasklet;
struct mt7615_phy phy;
u32 vif_mask;
u32 omac_mask;
u16 chainmask;
@ -280,6 +286,37 @@ struct mt7615_dev {
struct work_struct wtbl_work;
struct list_head wd_head;
u32 debugfs_rf_wf;
u32 debugfs_rf_reg;
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 *reg_backup;
s16 last_freq_offset;
u8 last_rcpi[4];
s8 last_ib_rssi;
s8 last_wb_rssi;
} test;
#endif
struct {
bool enable;
spinlock_t txq_lock;
struct {
struct mt7615_sta *msta;
struct sk_buff *skb;
} tx_q[IEEE80211_NUM_ACS];
struct work_struct wake_work;
struct completion wake_cmpl;
struct delayed_work ps_work;
unsigned long last_activity;
unsigned long idle_timeout;
} pm;
};
enum tx_pkt_queue_idx {
@ -372,8 +409,10 @@ extern struct ieee80211_rate mt7615_rates[12];
extern const struct ieee80211_ops mt7615_ops;
extern const u32 mt7615e_reg_map[__MT_BASE_MAX];
extern const u32 mt7663e_reg_map[__MT_BASE_MAX];
extern const u32 mt7663_usb_sdio_reg_map[__MT_BASE_MAX];
extern struct pci_driver mt7615_pci_driver;
extern struct platform_driver mt7622_wmac_driver;
extern const struct mt76_testmode_ops mt7615_testmode_ops;
#ifdef CONFIG_MT7622_WMAC
int mt7622_wmac_init(struct mt7615_dev *dev);
@ -408,6 +447,11 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev);
void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable);
void mt7615_pm_wake_work(struct work_struct *work);
int mt7615_pm_wake(struct mt7615_dev *dev);
void mt7615_pm_power_save_sched(struct mt7615_dev *dev);
void mt7615_pm_power_save_work(struct work_struct *work);
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
@ -462,6 +506,20 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
return MT7615_WTBL_SIZE;
}
static inline void mt7615_mutex_acquire(struct mt7615_dev *dev)
__acquires(&dev->mt76.mutex)
{
mutex_lock(&dev->mt76.mutex);
mt7615_pm_wake(dev);
}
static inline void mt7615_mutex_release(struct mt7615_dev *dev)
__releases(&dev->mt76.mutex)
{
mt7615_pm_power_save_sched(dev);
mutex_unlock(&dev->mt76.mutex);
}
static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
{
static const u8 lmac_queue_map[] = {
@ -485,6 +543,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband);
void mt7615_phy_init(struct mt7615_dev *dev);
void mt7615_mac_init(struct mt7615_dev *dev);
int mt7615_set_channel(struct mt7615_phy *phy);
int mt7615_mcu_restart(struct mt76_dev *dev);
void mt7615_update_channel(struct mt76_dev *mdev);
@ -516,15 +575,19 @@ int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd);
void mt7615_mac_reset_work(struct work_struct *work);
u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp);
u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val);
int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
int mt7615_mcu_set_tx_power(struct mt7615_phy *phy);
void mt7615_mcu_exit(struct mt7615_dev *dev);
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq);
@ -563,6 +626,8 @@ int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
const struct mt7615_dfs_pulse *pulse);
int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
const struct mt7615_dfs_pattern *pattern);
int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
u32 val);
int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy);
int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy);
@ -579,18 +644,40 @@ int mt7615_driver_own(struct mt7615_dev *dev);
int mt7615_init_debugfs(struct mt7615_dev *dev);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend);
void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *key);
int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info);
int __mt7663_load_firmware(struct mt7615_dev *dev);
u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
/* usb */
void mt7663u_wtbl_work(struct work_struct *work);
int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
enum mt76_txq_id qid,
struct mt76_queue_entry *e);
void mt7663_usb_sdio_wtbl_work(struct work_struct *work);
int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
int mt7663u_mcu_init(struct mt7615_dev *dev);
int mt7663u_register_device(struct mt7615_dev *dev);
/* sdio */
u32 mt7663s_read_pcr(struct mt7615_dev *dev);
int mt7663s_mcu_init(struct mt7615_dev *dev);
int mt7663s_driver_own(struct mt7615_dev *dev);
int mt7663s_firmware_own(struct mt7615_dev *dev);
int mt7663s_kthread_run(void *data);
void mt7663s_sdio_irq(struct sdio_func *func);
#endif

View File

@ -75,6 +75,10 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
bool hif_suspend;
int i, err;
err = mt7615_pm_wake(dev);
if (err < 0)
return err;
hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev);
if (hif_suspend) {

View File

@ -70,6 +70,10 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
dev = container_of(mt76, struct mt7615_dev, mt76);
if (test_bit(MT76_STATE_PM, &mt76->phy.state))
return;
val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
FIELD_PREP(MT_LED_STATUS_ON, delay_on);

View File

@ -155,7 +155,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
msta->rates);
msta->rate_probe = true;
spin_unlock_bh(&dev->mt76.lock);
}

View File

@ -14,7 +14,6 @@ enum mt7615_reg_base {
MT_CSR_BASE,
MT_PLE_BASE,
MT_PSE_BASE,
MT_PHY_BASE,
MT_CFG_BASE,
MT_AGG_BASE,
MT_TMAC_BASE,
@ -29,6 +28,7 @@ enum mt7615_reg_base {
MT_PCIE_REMAP_BASE2,
MT_TOP_MISC_BASE,
MT_EFUSE_ADDR_BASE,
MT_PP_BASE,
__MT_BASE_MAX,
};
@ -153,6 +153,8 @@ enum mt7615_reg_base {
#define MT_PLE(ofs) ((dev)->reg_map[MT_PLE_BASE] + (ofs))
#define MT_PLE_PG_HIF0_GROUP MT_PLE(0x110)
#define MT_HIF0_MIN_QUOTA GENMASK(11, 0)
#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
@ -162,6 +164,10 @@ enum mt7615_reg_base {
((n) << 2))
#define MT_PSE(ofs) ((dev)->reg_map[MT_PSE_BASE] + (ofs))
#define MT_PSE_PG_HIF0_GROUP MT_PSE(0x110)
#define MT_HIF0_MIN_QUOTA GENMASK(11, 0)
#define MT_PSE_PG_HIF1_GROUP MT_PSE(0x118)
#define MT_HIF1_MIN_QUOTA GENMASK(11, 0)
#define MT_PSE_QUEUE_EMPTY MT_PSE(0x0b4)
#define MT_HIF_0_EMPTY_MASK BIT(16)
#define MT_HIF_1_EMPTY_MASK BIT(17)
@ -169,7 +175,12 @@ enum mt7615_reg_base {
#define MT_PSE_PG_INFO MT_PSE(0x194)
#define MT_PSE_SRC_CNT GENMASK(27, 16)
#define MT_WF_PHY_BASE ((dev)->reg_map[MT_PHY_BASE])
#define MT_PP(ofs) ((dev)->reg_map[MT_PP_BASE] + (ofs))
#define MT_PP_TXDWCNT MT_PP(0x0)
#define MT_PP_TXDWCNT_TX0_ADD_DW_CNT GENMASK(7, 0)
#define MT_PP_TXDWCNT_TX1_ADD_DW_CNT GENMASK(15, 8)
#define MT_WF_PHY_BASE 0x82070000
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
#define MT_WF_PHY_WF2_RFCTRL0(n) MT_WF_PHY(0x1900 + (n) * 0x400)
@ -213,6 +224,9 @@ enum mt7615_reg_base {
#define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00)
#define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2))
#define MT_WF_PHY_RFINTF3_0(_n) MT_WF_PHY(0x1100 + (_n) * 0x400)
#define MT_WF_PHY_RFINTF3_0_ANT GENMASK(7, 4)
#define MT_WF_CFG_BASE ((dev)->reg_map[MT_CFG_BASE])
#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
@ -256,6 +270,13 @@ enum mt7615_reg_base {
#define MT_WF_ARB_BASE ((dev)->reg_map[MT_ARB_BASE])
#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs))
#define MT_ARB_RQCR MT_WF_ARB(0x070)
#define MT_ARB_RQCR_RX_START BIT(0)
#define MT_ARB_RQCR_RXV_START BIT(4)
#define MT_ARB_RQCR_RXV_R_EN BIT(7)
#define MT_ARB_RQCR_RXV_T_EN BIT(8)
#define MT_ARB_RQCR_BAND_SHIFT 16
#define MT_ARB_SCR MT_WF_ARB(0x080)
#define MT_ARB_SCR_TX0_DISABLE BIT(8)
#define MT_ARB_SCR_RX0_DISABLE BIT(9)
@ -417,6 +438,7 @@ enum mt7615_reg_base {
#define MT_LPON_T0CR MT_LPON(0x010)
#define MT_LPON_T0CR_MODE GENMASK(1, 0)
#define MT_LPON_T0CR_WRITE BIT(0)
#define MT_LPON_UTTR0 MT_LPON(0x018)
#define MT_LPON_UTTR1 MT_LPON(0x01c)
@ -550,4 +572,11 @@ enum mt7615_reg_base {
#define MT_WL_RX_BUSY BIT(30)
#define MT_WL_TX_BUSY BIT(31)
#define MT_MCU_PTA_BASE 0x81060000
#define MT_MCU_PTA(_n) (MT_MCU_PTA_BASE + (_n))
#define MT_ANT_SWITCH_CON(n) MT_MCU_PTA(0x0c8)
#define MT_ANT_SWITCH_CON_MODE(_n) (GENMASK(4, 0) << (_n * 8))
#define MT_ANT_SWITCH_CON_MODE1(_n) (GENMASK(3, 0) << (_n * 8))
#endif

View File

@ -0,0 +1,478 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include "mt7615.h"
#include "sdio.h"
#include "mac.h"
static const struct sdio_device_id mt7663s_table[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7603) },
{ } /* Terminating entry */
};
static u32 mt7663s_read_whisr(struct mt76_dev *dev)
{
return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
}
u32 mt7663s_read_pcr(struct mt7615_dev *dev)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
}
static u32 mt7663s_read_mailbox(struct mt76_dev *dev, u32 offset)
{
struct sdio_func *func = dev->sdio.func;
u32 val = ~0, status;
int err;
sdio_claim_host(func);
sdio_writel(func, offset, MCR_H2DSM0R, &err);
if (err < 0) {
dev_err(dev->dev, "failed setting address [err=%d]\n", err);
goto out;
}
sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
if (err < 0) {
dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
goto out;
}
err = readx_poll_timeout(mt7663s_read_whisr, dev, status,
status & H2D_SW_INT_READ, 0, 1000000);
if (err < 0) {
dev_err(dev->dev, "query whisr timeout\n");
goto out;
}
sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
if (err < 0) {
dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
goto out;
}
val = sdio_readl(func, MCR_H2DSM0R, &err);
if (err < 0) {
dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
goto out;
}
if (val != offset) {
dev_err(dev->dev, "register mismatch\n");
val = ~0;
goto out;
}
val = sdio_readl(func, MCR_D2HRM1R, &err);
if (err < 0)
dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
out:
sdio_release_host(func);
return val;
}
static void mt7663s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
{
struct sdio_func *func = dev->sdio.func;
u32 status;
int err;
sdio_claim_host(func);
sdio_writel(func, offset, MCR_H2DSM0R, &err);
if (err < 0) {
dev_err(dev->dev, "failed setting address [err=%d]\n", err);
goto out;
}
sdio_writel(func, val, MCR_H2DSM1R, &err);
if (err < 0) {
dev_err(dev->dev,
"failed setting write value [err=%d]\n", err);
goto out;
}
sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
if (err < 0) {
dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
goto out;
}
err = readx_poll_timeout(mt7663s_read_whisr, dev, status,
status & H2D_SW_INT_WRITE, 0, 1000000);
if (err < 0) {
dev_err(dev->dev, "query whisr timeout\n");
goto out;
}
sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
if (err < 0) {
dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
goto out;
}
val = sdio_readl(func, MCR_H2DSM0R, &err);
if (err < 0) {
dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
goto out;
}
if (val != offset)
dev_err(dev->dev, "register mismatch\n");
out:
sdio_release_host(func);
}
static u32 mt7663s_rr(struct mt76_dev *dev, u32 offset)
{
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
return dev->mcu_ops->mcu_rr(dev, offset);
else
return mt7663s_read_mailbox(dev, offset);
}
static void mt7663s_wr(struct mt76_dev *dev, u32 offset, u32 val)
{
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
dev->mcu_ops->mcu_wr(dev, offset, val);
else
mt7663s_write_mailbox(dev, offset, val);
}
static u32 mt7663s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
{
val |= mt7663s_rr(dev, offset) & ~mask;
mt7663s_wr(dev, offset, val);
return val;
}
static void mt7663s_write_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
const u32 *val = data;
int i;
for (i = 0; i < len / sizeof(u32); i++) {
mt7663s_wr(dev, offset, val[i]);
offset += sizeof(u32);
}
}
static void mt7663s_read_copy(struct mt76_dev *dev, u32 offset,
void *data, int len)
{
u32 *val = data;
int i;
for (i = 0; i < len / sizeof(u32); i++) {
val[i] = mt7663s_rr(dev, offset);
offset += sizeof(u32);
}
}
static int mt7663s_wr_rp(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *data,
int len)
{
int i;
for (i = 0; i < len; i++) {
mt7663s_wr(dev, data->reg, data->value);
data++;
}
return 0;
}
static int mt7663s_rd_rp(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *data,
int len)
{
int i;
for (i = 0; i < len; i++) {
data->value = mt7663s_rr(dev, data->reg);
data++;
}
return 0;
}
static void mt7663s_init_work(struct work_struct *work)
{
struct mt7615_dev *dev;
dev = container_of(work, struct mt7615_dev, mcu_work);
if (mt7663s_mcu_init(dev))
return;
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
mt7615_mcu_del_wtbl_all(dev);
mt7615_check_offload_capability(dev);
}
static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func)
{
u32 status, ctrl;
int ret;
sdio_claim_host(func);
ret = sdio_enable_func(func);
if (ret < 0)
goto release;
/* Get ownership from the device */
sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
MCR_WHLPCR, &ret);
if (ret < 0)
goto disable_func;
ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot get ownership from device");
goto disable_func;
}
ret = sdio_set_block_size(func, 512);
if (ret < 0)
goto disable_func;
/* Enable interrupt */
sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
if (ret < 0)
goto disable_func;
ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
sdio_writel(func, ctrl, MCR_WHIER, &ret);
if (ret < 0)
goto disable_func;
/* set WHISR as read clear and Rx aggregation number as 16 */
ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
sdio_writel(func, ctrl, MCR_WHCR, &ret);
if (ret < 0)
goto disable_func;
ret = sdio_claim_irq(func, mt7663s_sdio_irq);
if (ret < 0)
goto disable_func;
sdio_release_host(func);
return 0;
disable_func:
sdio_disable_func(func);
release:
sdio_release_host(func);
return ret;
}
static int mt7663s_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt76_sdio *sdio = &mdev->sdio;
u32 pse, ple;
int err;
err = mt7615_mac_sta_add(mdev, vif, sta);
if (err < 0)
return err;
/* init sched data quota */
pse = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
mutex_lock(&sdio->sched.lock);
sdio->sched.pse_data_quota = pse;
sdio->sched.ple_data_quota = ple;
mutex_unlock(&sdio->sched.lock);
return 0;
}
static int mt7663s_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_USB_TXD_SIZE,
.drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
.tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7663s_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
static const struct mt76_bus_ops mt7663s_ops = {
.rr = mt7663s_rr,
.rmw = mt7663s_rmw,
.wr = mt7663s_wr,
.write_copy = mt7663s_write_copy,
.read_copy = mt7663s_read_copy,
.wr_rp = mt7663s_wr_rp,
.rd_rp = mt7663s_rd_rp,
.type = MT76_BUS_SDIO,
};
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
int ret;
ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
if (!ops)
return -ENOMEM;
mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt7615_dev, mt76);
INIT_WORK(&dev->mcu_work, mt7663s_init_work);
dev->reg_map = mt7663_usb_sdio_reg_map;
dev->ops = ops;
sdio_set_drvdata(func, dev);
mdev->sdio.tx_kthread = kthread_create(mt7663s_kthread_run, dev,
"mt7663s_tx");
if (IS_ERR(mdev->sdio.tx_kthread))
return PTR_ERR(mdev->sdio.tx_kthread);
ret = mt76s_init(mdev, func, &mt7663s_ops);
if (ret < 0)
goto err_free;
ret = mt7663s_hw_init(dev, func);
if (ret)
goto err_free;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
ret = mt76s_alloc_queues(&dev->mt76);
if (ret)
goto err_deinit;
ret = mt7663_usb_sdio_register_device(dev);
if (ret)
goto err_deinit;
return 0;
err_deinit:
mt76s_deinit(&dev->mt76);
err_free:
mt76_free_device(&dev->mt76);
return ret;
}
static void mt7663s_remove(struct sdio_func *func)
{
struct mt7615_dev *dev = sdio_get_drvdata(func);
if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return;
ieee80211_unregister_hw(dev->mt76.hw);
mt76s_deinit(&dev->mt76);
mt76_free_device(&dev->mt76);
}
#ifdef CONFIG_PM
static int mt7663s_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct mt7615_dev *mdev = sdio_get_drvdata(func);
if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
mt7615_firmware_offload(mdev)) {
int err;
err = mt7615_mcu_set_hif_suspend(mdev, true);
if (err < 0)
return err;
}
mt76s_stop_txrx(&mdev->mt76);
return mt7663s_firmware_own(mdev);
}
static int mt7663s_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct mt7615_dev *mdev = sdio_get_drvdata(func);
int err;
err = mt7663s_driver_own(mdev);
if (err)
return err;
if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
mt7615_firmware_offload(mdev))
err = mt7615_mcu_set_hif_suspend(mdev, false);
return err;
}
static const struct dev_pm_ops mt7663s_pm_ops = {
.suspend = mt7663s_suspend,
.resume = mt7663s_resume,
};
#endif
MODULE_DEVICE_TABLE(sdio, mt7663s_table);
MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_ROM_PATCH);
static struct sdio_driver mt7663s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7663s_probe,
.remove = mt7663s_remove,
.id_table = mt7663s_table,
#ifdef CONFIG_PM
.drv = {
.pm = &mt7663s_pm_ops,
}
#endif
};
module_sdio_driver(mt7663s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -0,0 +1,115 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Sean Wang <sean.wang@mediatek.com>
*/
#ifndef __MT76S_H
#define __MT76S_H
#define MT_PSE_PAGE_SZ 128
#define MCR_WCIR 0x0000
#define MCR_WHLPCR 0x0004
#define WHLPCR_FW_OWN_REQ_CLR BIT(9)
#define WHLPCR_FW_OWN_REQ_SET BIT(8)
#define WHLPCR_IS_DRIVER_OWN BIT(8)
#define WHLPCR_INT_EN_CLR BIT(1)
#define WHLPCR_INT_EN_SET BIT(0)
#define MCR_WSDIOCSR 0x0008
#define MCR_WHCR 0x000C
#define W_INT_CLR_CTRL BIT(1)
#define RECV_MAILBOX_RD_CLR_EN BIT(2)
#define MAX_HIF_RX_LEN_NUM GENMASK(13, 8)
#define RX_ENHANCE_MODE BIT(16)
#define MCR_WHISR 0x0010
#define MCR_WHIER 0x0014
#define WHIER_D2H_SW_INT GENMASK(31, 8)
#define WHIER_FW_OWN_BACK_INT_EN BIT(7)
#define WHIER_ABNORMAL_INT_EN BIT(6)
#define WHIER_RX1_DONE_INT_EN BIT(2)
#define WHIER_RX0_DONE_INT_EN BIT(1)
#define WHIER_TX_DONE_INT_EN BIT(0)
#define WHIER_DEFAULT (WHIER_RX0_DONE_INT_EN | \
WHIER_RX1_DONE_INT_EN | \
WHIER_TX_DONE_INT_EN | \
WHIER_ABNORMAL_INT_EN | \
WHIER_D2H_SW_INT)
#define MCR_WASR 0x0020
#define MCR_WSICR 0x0024
#define MCR_WTSR0 0x0028
#define TQ0_CNT GENMASK(7, 0)
#define TQ1_CNT GENMASK(15, 8)
#define TQ2_CNT GENMASK(23, 16)
#define TQ3_CNT GENMASK(31, 24)
#define MCR_WTSR1 0x002c
#define TQ4_CNT GENMASK(7, 0)
#define TQ5_CNT GENMASK(15, 8)
#define TQ6_CNT GENMASK(23, 16)
#define TQ7_CNT GENMASK(31, 24)
#define MCR_WTDR1 0x0034
#define MCR_WRDR0 0x0050
#define MCR_WRDR1 0x0054
#define MCR_WRDR(p) (0x0050 + 4 * (p))
#define MCR_H2DSM0R 0x0070
#define H2D_SW_INT_READ BIT(16)
#define H2D_SW_INT_WRITE BIT(17)
#define MCR_H2DSM1R 0x0074
#define MCR_D2HRM0R 0x0078
#define MCR_D2HRM1R 0x007c
#define MCR_D2HRM2R 0x0080
#define MCR_WRPLR 0x0090
#define RX0_PACKET_LENGTH GENMASK(15, 0)
#define RX1_PACKET_LENGTH GENMASK(31, 16)
#define MCR_WTMDR 0x00b0
#define MCR_WTMCR 0x00b4
#define MCR_WTMDPCR0 0x00b8
#define MCR_WTMDPCR1 0x00bc
#define MCR_WPLRCR 0x00d4
#define MCR_WSR 0x00D8
#define MCR_CLKIOCR 0x0100
#define MCR_CMDIOCR 0x0104
#define MCR_DAT0IOCR 0x0108
#define MCR_DAT1IOCR 0x010C
#define MCR_DAT2IOCR 0x0110
#define MCR_DAT3IOCR 0x0114
#define MCR_CLKDLYCR 0x0118
#define MCR_CMDDLYCR 0x011C
#define MCR_ODATDLYCR 0x0120
#define MCR_IDATDLYCR1 0x0124
#define MCR_IDATDLYCR2 0x0128
#define MCR_ILCHCR 0x012C
#define MCR_WTQCR0 0x0130
#define MCR_WTQCR1 0x0134
#define MCR_WTQCR2 0x0138
#define MCR_WTQCR3 0x013C
#define MCR_WTQCR4 0x0140
#define MCR_WTQCR5 0x0144
#define MCR_WTQCR6 0x0148
#define MCR_WTQCR7 0x014C
#define MCR_WTQCR(x) (0x130 + 4 * (x))
#define TXQ_CNT_L GENMASK(15, 0)
#define TXQ_CNT_H GENMASK(31, 16)
#define MCR_SWPCDBGR 0x0154
struct mt76s_intr {
u32 isr;
struct {
u32 wtqcr[8];
} tx;
struct {
u16 num[2];
u16 len[2][16];
} rx;
u32 rec_mb[2];
} __packed;
#endif

View File

@ -0,0 +1,162 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/mmc/sdio_func.h>
#include <linux/module.h>
#include <linux/iopoll.h>
#include "mt7615.h"
#include "mac.h"
#include "mcu.h"
#include "regs.h"
#include "sdio.h"
static int mt7663s_mcu_init_sched(struct mt7615_dev *dev)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
u32 pse0, ple, pse1, txdwcnt;
pse0 = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
pse1 = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, MT_HIF1_MIN_QUOTA);
ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT,
MT_PP_TXDWCNT_TX1_ADD_DW_CNT);
mutex_lock(&sdio->sched.lock);
sdio->sched.pse_data_quota = pse0;
sdio->sched.ple_data_quota = ple;
sdio->sched.pse_mcu_quota = pse1;
sdio->sched.deficit = txdwcnt << 2;
mutex_unlock(&sdio->sched.lock);
return 0;
}
static int
mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
int ret, seq;
mutex_lock(&mdev->mcu.mutex);
mt7615_mcu_fill_msg(dev, skb, cmd, &seq);
ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
if (ret)
goto out;
mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU].q);
if (wait_resp)
ret = mt7615_mcu_wait_response(dev, cmd, seq);
out:
mutex_unlock(&mdev->mcu.mutex);
return ret;
}
int mt7663s_driver_own(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
u32 status;
int ret;
if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
goto out;
sdio_claim_host(func);
sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, 0);
ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot get ownership from device");
set_bit(MT76_STATE_PM, &mphy->state);
sdio_release_host(func);
return ret;
}
sdio_release_host(func);
out:
dev->pm.last_activity = jiffies;
return 0;
}
int mt7663s_firmware_own(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
u32 status;
int ret;
if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
return 0;
sdio_claim_host(func);
sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, 0);
ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot set ownership to device");
clear_bit(MT76_STATE_PM, &mphy->state);
}
sdio_release_host(func);
return ret;
}
int mt7663s_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7663s_mcu_ops = {
.headroom = sizeof(struct mt7615_mcu_txd),
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7663s_mcu_send_message,
.mcu_send_msg = mt7615_mcu_msg_send,
.mcu_restart = mt7615_mcu_restart,
.mcu_rr = mt7615_mcu_reg_rr,
.mcu_wr = mt7615_mcu_reg_wr,
};
int ret;
ret = mt7663s_driver_own(dev);
if (ret)
return ret;
dev->mt76.mcu_ops = &mt7663s_mcu_ops,
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
if (ret) {
mt7615_mcu_restart(&dev->mt76);
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
MT_TOP_MISC2_FW_N9_RDY, 0, 500))
return -EIO;
}
ret = __mt7663_load_firmware(dev);
if (ret)
return ret;
ret = mt7663s_mcu_init_sched(dev);
if (ret)
return ret;
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return 0;
}

View File

@ -0,0 +1,268 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include "../trace.h"
#include "mt7615.h"
#include "sdio.h"
#include "mac.h"
static void mt7663s_refill_sched_quota(struct mt7615_dev *dev, u32 *data)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
mutex_lock(&sdio->sched.lock);
sdio->sched.pse_data_quota += FIELD_GET(TXQ_CNT_L, data[0]) + /* BK */
FIELD_GET(TXQ_CNT_H, data[0]) + /* BE */
FIELD_GET(TXQ_CNT_L, data[1]) + /* VI */
FIELD_GET(TXQ_CNT_H, data[1]); /* VO */
sdio->sched.ple_data_quota += FIELD_GET(TXQ_CNT_H, data[2]) + /* BK */
FIELD_GET(TXQ_CNT_L, data[3]) + /* BE */
FIELD_GET(TXQ_CNT_H, data[3]) + /* VI */
FIELD_GET(TXQ_CNT_L, data[4]); /* VO */
sdio->sched.pse_mcu_quota += FIELD_GET(TXQ_CNT_L, data[2]);
mutex_unlock(&sdio->sched.lock);
}
static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
int buf_len)
{
int len = min_t(int, data_len, MT_SKB_HEAD_LEN);
struct sk_buff *skb;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
skb_put_data(skb, data, len);
if (data_len > len) {
struct page *page;
data += len;
page = virt_to_head_page(data);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, data - page_address(page),
data_len - len, buf_len);
get_page(page);
}
return skb;
}
static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
struct mt76s_intr *intr)
{
struct mt76_queue *q = &dev->mt76.q_rx[qid];
struct mt76_sdio *sdio = &dev->mt76.sdio;
int len = 0, err, i, order;
struct page *page;
u8 *buf;
for (i = 0; i < intr->rx.num[qid]; i++)
len += round_up(intr->rx.len[qid][i] + 4, 4);
if (!len)
return 0;
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
order = get_order(len);
page = __dev_alloc_pages(GFP_KERNEL, order);
if (!page)
return -ENOMEM;
buf = page_address(page);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
if (err < 0) {
dev_err(dev->mt76.dev, "sdio read data failed:%d\n", err);
__free_pages(page, order);
return err;
}
for (i = 0; i < intr->rx.num[qid]; i++) {
int index = (q->tail + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
len = intr->rx.len[qid][i];
e->skb = mt7663s_build_rx_skb(buf, len, round_up(len + 4, 4));
if (!e->skb)
break;
buf += round_up(len + 4, 4);
if (q->queued + i + 1 == q->ndesc)
break;
}
__free_pages(page, order);
spin_lock_bh(&q->lock);
q->tail = (q->tail + i) % q->ndesc;
q->queued += i;
spin_unlock_bh(&q->lock);
return err;
}
static int mt7663s_tx_update_sched(struct mt7615_dev *dev,
struct mt76_queue_entry *e,
bool mcu)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
struct mt76_phy *mphy = &dev->mt76.phy;
struct ieee80211_hdr *hdr;
int size, ret = -EBUSY;
size = DIV_ROUND_UP(e->buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
if (mcu) {
if (!test_bit(MT76_STATE_MCU_RUNNING, &mphy->state))
return 0;
mutex_lock(&sdio->sched.lock);
if (sdio->sched.pse_mcu_quota > size) {
sdio->sched.pse_mcu_quota -= size;
ret = 0;
}
mutex_unlock(&sdio->sched.lock);
return ret;
}
hdr = (struct ieee80211_hdr *)(e->skb->data + MT_USB_TXD_SIZE);
if (ieee80211_is_ctl(hdr->frame_control))
return 0;
mutex_lock(&sdio->sched.lock);
if (sdio->sched.pse_data_quota > size &&
sdio->sched.ple_data_quota > 0) {
sdio->sched.pse_data_quota -= size;
sdio->sched.ple_data_quota--;
ret = 0;
}
mutex_unlock(&sdio->sched.lock);
return ret;
}
static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q)
{
bool mcu = q == dev->mt76.q_tx[MT_TXQ_MCU].q;
struct mt76_sdio *sdio = &dev->mt76.sdio;
int nframes = 0;
while (q->first != q->tail) {
struct mt76_queue_entry *e = &q->entry[q->first];
int err, len = e->skb->len;
if (mt7663s_tx_update_sched(dev, e, mcu))
break;
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
/* TODO: skb_walk_frags and then write to SDIO port */
err = sdio_writesb(sdio->func, MCR_WTDR1, e->skb->data, len);
if (err) {
dev_err(dev->mt76.dev, "sdio write failed: %d\n", err);
return -EIO;
}
e->done = true;
q->first = (q->first + 1) % q->ndesc;
nframes++;
}
return nframes;
}
static int mt7663s_tx_run_queues(struct mt7615_dev *dev)
{
int i, nframes = 0;
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
int ret;
ret = mt7663s_tx_run_queue(dev, dev->mt76.q_tx[i].q);
if (ret < 0)
return ret;
nframes += ret;
}
return nframes;
}
int mt7663s_kthread_run(void *data)
{
struct mt7615_dev *dev = data;
struct mt76_phy *mphy = &dev->mt76.phy;
while (!kthread_should_stop()) {
int ret;
cond_resched();
sdio_claim_host(dev->mt76.sdio.func);
ret = mt7663s_tx_run_queues(dev);
sdio_release_host(dev->mt76.sdio.func);
if (ret <= 0 || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
} else {
wake_up_process(dev->mt76.sdio.kthread);
}
}
return 0;
}
void mt7663s_sdio_irq(struct sdio_func *func)
{
struct mt7615_dev *dev = sdio_get_drvdata(func);
struct mt76_sdio *sdio = &dev->mt76.sdio;
struct mt76s_intr intr;
/* disable interrupt */
sdio_writel(func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0);
do {
sdio_readsb(func, &intr, MCR_WHISR, sizeof(struct mt76s_intr));
trace_dev_irq(&dev->mt76, intr.isr, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
goto out;
if (intr.isr & WHIER_RX0_DONE_INT_EN) {
mt7663s_rx_run_queue(dev, 0, &intr);
wake_up_process(sdio->kthread);
}
if (intr.isr & WHIER_RX1_DONE_INT_EN) {
mt7663s_rx_run_queue(dev, 1, &intr);
wake_up_process(sdio->kthread);
}
if (intr.isr & WHIER_TX_DONE_INT_EN) {
mt7663s_refill_sched_quota(dev, intr.tx.wtqcr);
mt7663s_tx_run_queues(dev);
wake_up_process(sdio->kthread);
}
} while (intr.isr);
out:
/* enable interrupt */
sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0);
}

View File

@ -0,0 +1,363 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
#include "mt7615.h"
#include "eeprom.h"
#include "mcu.h"
enum {
TM_CHANGED_TXPOWER_CTRL,
TM_CHANGED_TXPOWER,
TM_CHANGED_FREQ_OFFSET,
/* must be last */
NUM_TM_CHANGED
};
static const u8 tm_change_map[] = {
[TM_CHANGED_TXPOWER_CTRL] = MT76_TM_ATTR_TX_POWER_CONTROL,
[TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
[TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
};
static const u32 reg_backup_list[] = {
MT_WF_PHY_RFINTF3_0(0),
MT_WF_PHY_RFINTF3_0(1),
MT_WF_PHY_RFINTF3_0(2),
MT_WF_PHY_RFINTF3_0(3),
MT_ANT_SWITCH_CON(2),
MT_ANT_SWITCH_CON(3),
MT_ANT_SWITCH_CON(4),
MT_ANT_SWITCH_CON(6),
MT_ANT_SWITCH_CON(7),
MT_ANT_SWITCH_CON(8),
};
static const struct {
u16 wf;
u16 reg;
} rf_backup_list[] = {
{ 0, 0x48 },
{ 1, 0x48 },
{ 2, 0x48 },
{ 3, 0x48 },
};
static int
mt7615_tm_set_tx_power(struct mt7615_phy *phy)
{
struct mt7615_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
int i, ret, n_chains = hweight8(mphy->antenna_mask);
struct cfg80211_chan_def *chandef = &mphy->chandef;
int freq = chandef->center_freq1, len, target_chains;
u8 *data, *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = chandef->chan->band;
struct sk_buff *skb;
struct {
u8 center_chan;
u8 dbdc_idx;
u8 band;
u8 rsv;
} __packed req_hdr = {
.center_chan = ieee80211_frequency_to_channel(freq),
.band = band,
.dbdc_idx = phy != &dev->phy,
};
u8 *tx_power = NULL;
if (dev->mt76.test.state != MT76_TM_STATE_OFF)
tx_power = dev->mt76.test.tx_power;
len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len);
if (!skb)
return -ENOMEM;
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
data = skb_put_data(skb, eep + MT_EE_NIC_CONF_0, len);
target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < target_chains; i++) {
int index;
ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i);
if (ret < 0)
return -EINVAL;
index = ret - MT_EE_NIC_CONF_0;
if (tx_power && tx_power[i])
data[ret - MT_EE_NIC_CONF_0] = tx_power[i];
}
return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_SET_TX_POWER_CTRL, false);
}
static void
mt7615_tm_reg_backup_restore(struct mt7615_dev *dev)
{
u32 *b = dev->test.reg_backup;
int n_regs = ARRAY_SIZE(reg_backup_list);
int n_rf_regs = ARRAY_SIZE(rf_backup_list);
int i;
if (dev->mt76.test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
mt76_wr(dev, reg_backup_list[i], b[i]);
for (i = 0; i < n_rf_regs; i++)
mt7615_rf_wr(dev, rf_backup_list[i].wf,
rf_backup_list[i].reg, b[n_regs + i]);
return;
}
if (b)
return;
b = devm_kzalloc(dev->mt76.dev, 4 * (n_regs + n_rf_regs),
GFP_KERNEL);
if (!b)
return;
dev->test.reg_backup = b;
for (i = 0; i < n_regs; i++)
b[i] = mt76_rr(dev, reg_backup_list[i]);
for (i = 0; i < n_rf_regs; i++)
b[n_regs + i] = mt7615_rf_rr(dev, rf_backup_list[i].wf,
rf_backup_list[i].reg);
}
static void
mt7615_tm_init_phy(struct mt7615_dev *dev, struct mt7615_phy *phy)
{
unsigned int total_flags = ~0;
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
mutex_unlock(&dev->mt76.mutex);
mt7615_set_channel(phy);
mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0);
mutex_lock(&dev->mt76.mutex);
mt7615_tm_reg_backup_restore(dev);
}
static void
mt7615_tm_init(struct mt7615_dev *dev)
{
mt7615_tm_init_phy(dev, &dev->phy);
if (dev->mt76.phy2)
mt7615_tm_init_phy(dev, dev->mt76.phy2->priv);
}
static void
mt7615_tm_set_rx_enable(struct mt7615_dev *dev, bool en)
{
u32 rqcr_mask = (MT_ARB_RQCR_RX_START |
MT_ARB_RQCR_RXV_START |
MT_ARB_RQCR_RXV_R_EN |
MT_ARB_RQCR_RXV_T_EN) *
(BIT(0) | BIT(MT_ARB_RQCR_BAND_SHIFT));
if (en) {
mt76_clear(dev, MT_ARB_SCR,
MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE);
mt76_set(dev, MT_ARB_RQCR, rqcr_mask);
} else {
mt76_set(dev, MT_ARB_SCR,
MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE);
mt76_clear(dev, MT_ARB_RQCR, rqcr_mask);
}
}
static void
mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en)
{
struct mt76_testmode_data *td = &dev->mt76.test;
u8 mask = td->tx_antenna_mask;
int i;
if (!mask)
return;
if (!en)
mask = dev->phy.chainmask;
for (i = 0; i < 4; i++) {
mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i),
MT_WF_PHY_RFINTF3_0_ANT,
td->tx_antenna_mask & BIT(i) ? 0 : 0xa);
}
/* 2.4 GHz band */
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(3), MT_ANT_SWITCH_CON_MODE(0),
(td->tx_antenna_mask & BIT(0)) ? 0x8 : 0x1b);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(2),
(td->tx_antenna_mask & BIT(1)) ? 0xe : 0x1b);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(6), MT_ANT_SWITCH_CON_MODE1(0),
(td->tx_antenna_mask & BIT(2)) ? 0x0 : 0xf);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(2),
(td->tx_antenna_mask & BIT(3)) ? 0x6 : 0xf);
/* 5 GHz band */
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(1),
(td->tx_antenna_mask & BIT(0)) ? 0xd : 0x1b);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(2), MT_ANT_SWITCH_CON_MODE(3),
(td->tx_antenna_mask & BIT(1)) ? 0x13 : 0x1b);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(1),
(td->tx_antenna_mask & BIT(2)) ? 0x5 : 0xf);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(8), MT_ANT_SWITCH_CON_MODE1(3),
(td->tx_antenna_mask & BIT(3)) ? 0xb : 0xf);
for (i = 0; i < 4; i++) {
u32 val;
val = mt7615_rf_rr(dev, i, 0x48);
val &= ~(0x3ff << 20);
if (td->tx_antenna_mask & BIT(i))
val |= 3 << 20;
else
val |= (2 << 28) | (2 << 26) | (8 << 20);
mt7615_rf_wr(dev, i, 0x48, val);
}
}
static void
mt7615_tm_set_tx_frames(struct mt7615_dev *dev, bool en)
{
struct ieee80211_tx_info *info;
struct sk_buff *skb = dev->mt76.test.tx_skb;
mt7615_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH);
mt7615_tm_set_tx_antenna(dev, en);
mt7615_tm_set_rx_enable(dev, !en);
if (!en || !skb)
return;
info = IEEE80211_SKB_CB(skb);
info->control.vif = dev->phy.monitor_vif;
}
static void
mt7615_tm_update_params(struct mt7615_dev *dev, u32 changed)
{
struct mt76_testmode_data *td = &dev->mt76.test;
bool en = dev->mt76.test.state != MT76_TM_STATE_OFF;
if (changed & BIT(TM_CHANGED_TXPOWER_CTRL))
mt7615_mcu_set_test_param(dev, MCU_ATE_SET_TX_POWER_CONTROL,
en, en && td->tx_power_control);
if (changed & BIT(TM_CHANGED_FREQ_OFFSET))
mt7615_mcu_set_test_param(dev, MCU_ATE_SET_FREQ_OFFSET,
en, en ? td->freq_offset : 0);
if (changed & BIT(TM_CHANGED_TXPOWER))
mt7615_tm_set_tx_power(&dev->phy);
}
static int
mt7615_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt76_testmode_data *td = &mdev->test;
enum mt76_testmode_state prev_state = td->state;
mdev->test.state = state;
if (prev_state == MT76_TM_STATE_TX_FRAMES)
mt7615_tm_set_tx_frames(dev, false);
else if (state == MT76_TM_STATE_TX_FRAMES)
mt7615_tm_set_tx_frames(dev, true);
if (state <= MT76_TM_STATE_IDLE)
mt7615_tm_init(dev);
if ((state == MT76_TM_STATE_IDLE &&
prev_state == MT76_TM_STATE_OFF) ||
(state == MT76_TM_STATE_OFF &&
prev_state == MT76_TM_STATE_IDLE)) {
u32 changed = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
u16 cur = tm_change_map[i];
if (td->param_set[cur / 32] & BIT(cur % 32))
changed |= BIT(i);
}
mt7615_tm_update_params(dev, changed);
}
return 0;
}
static int
mt7615_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
enum mt76_testmode_state new_state)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt76_testmode_data *td = &dev->mt76.test;
u32 changed = 0;
int i;
BUILD_BUG_ON(NUM_TM_CHANGED >= 32);
if (new_state == MT76_TM_STATE_OFF ||
td->state == MT76_TM_STATE_OFF)
return 0;
if (td->tx_antenna_mask & ~dev->phy.chainmask)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
if (tb[tm_change_map[i]])
changed |= BIT(i);
}
mt7615_tm_update_params(dev, changed);
return 0;
}
static int
mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
void *rx, *rssi;
int i;
rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
if (!rx)
return -ENOMEM;
if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset) ||
nla_put_s32(msg, MT76_TM_RX_ATTR_IB_RSSI, dev->test.last_ib_rssi) ||
nla_put_s32(msg, MT76_TM_RX_ATTR_WB_RSSI, dev->test.last_wb_rssi))
return -ENOMEM;
rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI);
if (!rssi)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dev->test.last_rcpi); i++)
if (nla_put_u8(msg, i, dev->test.last_rcpi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
nla_nest_end(msg, rx);
return 0;
}
const struct mt76_testmode_ops mt7615_testmode_ops = {
.set_state = mt7615_tm_set_state,
.set_params = mt7615_tm_set_params,
.dump_stats = mt7615_tm_dump_stats,
};

View File

@ -15,31 +15,6 @@
#include "mcu.h"
#include "regs.h"
static const u32 mt7663u_reg_map[] = {
[MT_TOP_CFG_BASE] = 0x80020000,
[MT_HW_BASE] = 0x80000000,
[MT_DMA_SHDL_BASE] = 0x5000a000,
[MT_HIF_BASE] = 0x50000000,
[MT_CSR_BASE] = 0x40000000,
[MT_EFUSE_ADDR_BASE] = 0x78011000,
[MT_TOP_MISC_BASE] = 0x81020000,
[MT_PLE_BASE] = 0x82060000,
[MT_PSE_BASE] = 0x82068000,
[MT_PHY_BASE] = 0x82070000,
[MT_WTBL_BASE_ADDR] = 0x820e0000,
[MT_CFG_BASE] = 0x820f0000,
[MT_AGG_BASE] = 0x820f2000,
[MT_ARB_BASE] = 0x820f3000,
[MT_TMAC_BASE] = 0x820f4000,
[MT_RMAC_BASE] = 0x820f5000,
[MT_DMA_BASE] = 0x820f7000,
[MT_PF_BASE] = 0x820f8000,
[MT_WTBL_BASE_ON] = 0x820f9000,
[MT_WTBL_BASE_OFF] = 0x820f9800,
[MT_LPON_BASE] = 0x820fb000,
[MT_MIB_BASE] = 0x820fd000,
};
static const struct usb_device_id mt7615_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
{ },
@ -64,205 +39,19 @@ static void mt7663u_cleanup(struct mt7615_dev *dev)
mt76u_queues_deinit(&dev->mt76);
}
static void
mt7663u_mac_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt76_txq_id qid, struct ieee80211_sta *sta,
struct sk_buff *skb)
static void mt7663u_init_work(struct work_struct *work)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *key = info->control.hw_key;
__le32 *txwi;
int pid;
if (!wcid)
wcid = &dev->mt76.global_wcid;
pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
memset(txwi, 0, MT_USB_TXD_SIZE);
mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
skb_push(skb, MT_USB_TXD_SIZE);
}
static int
__mt7663u_mac_set_rates(struct mt7615_dev *dev,
struct mt7615_wtbl_desc *wd)
{
struct mt7615_rate_desc *rate = &wd->rate;
struct mt7615_sta *sta = wd->sta;
u32 w5, w27, addr, val;
lockdep_assert_held(&dev->mt76.mutex);
if (!sta)
return -EINVAL;
if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
return -ETIMEDOUT;
addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx);
w27 = mt76_rr(dev, addr + 27 * 4);
w27 &= ~MT_WTBL_W27_CC_BW_SEL;
w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw);
w5 = mt76_rr(dev, addr + 5 * 4);
w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
MT_WTBL_W5_MPDU_OK_COUNT |
MT_WTBL_W5_MPDU_FAIL_COUNT |
MT_WTBL_W5_RATE_IDX);
w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) |
FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
rate->bw_idx ? rate->bw_idx - 1 : 7);
mt76_wr(dev, MT_WTBL_RIUCR0, w5);
mt76_wr(dev, MT_WTBL_RIUCR1,
FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1]));
mt76_wr(dev, MT_WTBL_RIUCR2,
FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2]));
mt76_wr(dev, MT_WTBL_RIUCR3,
FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3]));
mt76_wr(dev, MT_WTBL_UPDATE,
FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) |
MT_WTBL_UPDATE_RATE_UPDATE |
MT_WTBL_UPDATE_TX_COUNT_CLEAR);
mt76_wr(dev, addr + 27 * 4, w27);
mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
val = mt76_rr(dev, MT_LPON_UTTR0);
sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates;
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
return 0;
}
static int
__mt7663u_mac_set_key(struct mt7615_dev *dev,
struct mt7615_wtbl_desc *wd)
{
struct mt7615_key_desc *key = &wd->key;
struct mt7615_sta *sta = wd->sta;
enum mt7615_cipher_type cipher;
struct mt76_wcid *wcid;
int err;
lockdep_assert_held(&dev->mt76.mutex);
if (!sta)
return -EINVAL;
cipher = mt7615_mac_get_cipher(key->cipher);
if (cipher == MT_CIPHER_NONE)
return -EOPNOTSUPP;
wcid = &wd->sta->wcid;
mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd);
err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
cipher, key->cmd);
if (err < 0)
return err;
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
key->cmd);
if (err < 0)
return err;
if (key->cmd == SET_KEY)
wcid->cipher |= BIT(cipher);
else
wcid->cipher &= ~BIT(cipher);
return 0;
}
void mt7663u_wtbl_work(struct work_struct *work)
{
struct mt7615_wtbl_desc *wd, *wd_next;
struct mt7615_dev *dev;
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
wtbl_work);
dev = container_of(work, struct mt7615_dev, mcu_work);
if (mt7663u_mcu_init(dev))
return;
list_for_each_entry_safe(wd, wd_next, &dev->wd_head, node) {
spin_lock_bh(&dev->mt76.lock);
list_del(&wd->node);
spin_unlock_bh(&dev->mt76.lock);
mutex_lock(&dev->mt76.mutex);
switch (wd->type) {
case MT7615_WTBL_RATE_DESC:
__mt7663u_mac_set_rates(dev, wd);
break;
case MT7615_WTBL_KEY_DESC:
__mt7663u_mac_set_key(dev, wd);
break;
}
mutex_unlock(&dev->mt76.mutex);
kfree(wd);
}
}
static void
mt7663u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
struct mt76_queue_entry *e)
{
skb_pull(e->skb, MT_USB_HDR_SIZE + MT_USB_TXD_SIZE);
mt76_tx_complete_skb(mdev, e->skb);
}
static int
mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
struct mt7615_sta *msta;
msta = container_of(wcid, struct mt7615_sta, wcid);
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
msta->rates);
msta->rate_probe = true;
spin_unlock_bh(&dev->mt76.lock);
}
mt7663u_mac_write_txwi(dev, wcid, qid, sta, tx_info->skb);
return mt76u_skb_dma_info(tx_info->skb, tx_info->skb->len);
}
static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mutex_lock(&dev->mt76.mutex);
mt7615_mac_sta_poll(dev);
mutex_unlock(&dev->mt76.mutex);
return 0;
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
mt7615_mcu_del_wtbl_all(dev);
mt7615_check_offload_capability(dev);
}
static int mt7663u_probe(struct usb_interface *usb_intf,
@ -271,9 +60,9 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_USB_TXD_SIZE,
.drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
.tx_prepare_skb = mt7663u_tx_prepare_skb,
.tx_complete_skb = mt7663u_tx_complete_skb,
.tx_status_data = mt7663u_tx_status_data,
.tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
@ -303,7 +92,8 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
dev->reg_map = mt7663u_reg_map;
INIT_WORK(&dev->mcu_work, mt7663u_init_work);
dev->reg_map = mt7663_usb_sdio_reg_map;
dev->ops = ops;
ret = mt76u_init(mdev, usb_intf, true);
if (ret < 0)
@ -342,7 +132,7 @@ alloc_queues:
if (ret)
goto error_free_q;
ret = mt7663u_register_device(dev);
ret = mt7663_usb_sdio_register_device(dev);
if (ret)
goto error_free_q;
@ -351,11 +141,10 @@ alloc_queues:
error_free_q:
mt76u_queues_deinit(&dev->mt76);
error:
mt76u_deinit(&dev->mt76);
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
ieee80211_free_hw(mdev->hw);
mt76_free_device(&dev->mt76);
return ret;
}
@ -373,8 +162,7 @@ static void mt7663u_disconnect(struct usb_interface *usb_intf)
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
mt76u_deinit(&dev->mt76);
ieee80211_free_hw(dev->mt76.hw);
mt76_free_device(&dev->mt76);
}
#ifdef CONFIG_PM

View File

@ -1,145 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "mt7615.h"
#include "mac.h"
#include "regs.h"
static int mt7663u_dma_sched_init(struct mt7615_dev *dev)
{
int i;
mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
/* disable refill group 5 - group 15 and raise group 2
* and 3 as high priority.
*/
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006);
mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16));
for (i = 0; i < 5; i++)
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff));
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444);
/* group pririority from high to low:
* 15 (cmd groups) > 4 > 3 > 2 > 1 > 0.
*/
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c);
mt76_wr(dev, MT_UDMA_WLCFG_1,
FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) |
FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1));
/* setup UDMA Rx Flush */
mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
/* hif reset */
mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N);
mt76_set(dev, MT_UDMA_WLCFG_0,
MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN |
MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN |
MT_WL_TX_TMOUT_FUNC_EN);
mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO,
FIELD_PREP(MT_WL_RX_AGG_LMT, 32) |
FIELD_PREP(MT_WL_RX_AGG_TO, 100));
return 0;
}
static int mt7663u_init_hardware(struct mt7615_dev *dev)
{
int ret, idx;
ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE);
if (ret < 0)
return ret;
ret = mt7663u_dma_sched_init(dev);
if (ret)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
if (idx)
return -ENOSPC;
dev->mt76.global_wcid.idx = idx;
dev->mt76.global_wcid.hw_key_idx = -1;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
return 0;
}
static void mt7663u_init_work(struct work_struct *work)
{
struct mt7615_dev *dev;
dev = container_of(work, struct mt7615_dev, mcu_work);
if (mt7663u_mcu_init(dev))
return;
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
mt7615_mcu_del_wtbl_all(dev);
mt7615_check_offload_capability(dev);
}
int mt7663u_register_device(struct mt7615_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
int err;
INIT_WORK(&dev->wtbl_work, mt7663u_wtbl_work);
INIT_WORK(&dev->mcu_work, mt7663u_init_work);
INIT_LIST_HEAD(&dev->wd_head);
mt7615_init_device(dev);
err = mt7663u_init_hardware(dev);
if (err)
return err;
hw->extra_tx_headroom += MT_USB_HDR_SIZE + MT_USB_TXD_SIZE;
/* check hw sg support in order to enable AMSDU */
hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1;
err = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (err < 0)
return err;
if (!dev->mt76.usb.sg_en) {
struct ieee80211_sta_vht_cap *vht_cap;
/* decrease max A-MSDU size if SG is not supported */
vht_cap = &dev->mphy.sband_5g.sband.vht_cap;
vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
}
ieee80211_queue_work(hw, &dev->mcu_work);
mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
return mt7615_init_debugfs(dev);
}

View File

@ -28,13 +28,13 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
ep = MT_EP_OUT_AC_BE;
ret = mt76u_skb_dma_info(skb, skb->len);
put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
ret = mt76_skb_adjust_pad(skb);
if (ret < 0)
goto out;
ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
1000, ep);
dev_kfree_skb(skb);
if (ret < 0)
goto out;
@ -43,6 +43,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
out:
mutex_unlock(&mdev->mcu.mutex);
dev_kfree_skb(skb);
return ret;
}
@ -60,6 +61,8 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
dev->mt76.mcu_ops = &mt7663u_mcu_ops,
/* usb does not support runtime-pm */
clear_bit(MT76_STATE_PM, &dev->mphy.state);
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {

View File

@ -0,0 +1,394 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include "mt7615.h"
#include "mac.h"
#include "mcu.h"
#include "regs.h"
const u32 mt7663_usb_sdio_reg_map[] = {
[MT_TOP_CFG_BASE] = 0x80020000,
[MT_HW_BASE] = 0x80000000,
[MT_DMA_SHDL_BASE] = 0x5000a000,
[MT_HIF_BASE] = 0x50000000,
[MT_CSR_BASE] = 0x40000000,
[MT_EFUSE_ADDR_BASE] = 0x78011000,
[MT_TOP_MISC_BASE] = 0x81020000,
[MT_PLE_BASE] = 0x82060000,
[MT_PSE_BASE] = 0x82068000,
[MT_PP_BASE] = 0x8206c000,
[MT_WTBL_BASE_ADDR] = 0x820e0000,
[MT_CFG_BASE] = 0x820f0000,
[MT_AGG_BASE] = 0x820f2000,
[MT_ARB_BASE] = 0x820f3000,
[MT_TMAC_BASE] = 0x820f4000,
[MT_RMAC_BASE] = 0x820f5000,
[MT_DMA_BASE] = 0x820f7000,
[MT_PF_BASE] = 0x820f8000,
[MT_WTBL_BASE_ON] = 0x820f9000,
[MT_WTBL_BASE_OFF] = 0x820f9800,
[MT_LPON_BASE] = 0x820fb000,
[MT_MIB_BASE] = 0x820fd000,
};
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map);
static void
mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt76_txq_id qid, struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *key = info->control.hw_key;
__le32 *txwi;
int pid;
if (!wcid)
wcid = &dev->mt76.global_wcid;
pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
memset(txwi, 0, MT_USB_TXD_SIZE);
mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
skb_push(skb, MT_USB_TXD_SIZE);
}
static int
mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
struct mt7615_wtbl_desc *wd)
{
struct mt7615_rate_desc *rate = &wd->rate;
struct mt7615_sta *sta = wd->sta;
u32 w5, w27, addr, val;
lockdep_assert_held(&dev->mt76.mutex);
if (!sta)
return -EINVAL;
if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
return -ETIMEDOUT;
addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx);
w27 = mt76_rr(dev, addr + 27 * 4);
w27 &= ~MT_WTBL_W27_CC_BW_SEL;
w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw);
w5 = mt76_rr(dev, addr + 5 * 4);
w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
MT_WTBL_W5_MPDU_OK_COUNT |
MT_WTBL_W5_MPDU_FAIL_COUNT |
MT_WTBL_W5_RATE_IDX);
w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) |
FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
rate->bw_idx ? rate->bw_idx - 1 : 7);
mt76_wr(dev, MT_WTBL_RIUCR0, w5);
mt76_wr(dev, MT_WTBL_RIUCR1,
FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1]));
mt76_wr(dev, MT_WTBL_RIUCR2,
FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2]));
mt76_wr(dev, MT_WTBL_RIUCR3,
FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3]));
mt76_wr(dev, MT_WTBL_UPDATE,
FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) |
MT_WTBL_UPDATE_RATE_UPDATE |
MT_WTBL_UPDATE_TX_COUNT_CLEAR);
mt76_wr(dev, addr + 27 * 4, w27);
sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
val = mt76_rr(dev, MT_LPON_UTTR0);
sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates;
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
return 0;
}
static int
mt7663_usb_sdio_set_key(struct mt7615_dev *dev,
struct mt7615_wtbl_desc *wd)
{
struct mt7615_key_desc *key = &wd->key;
struct mt7615_sta *sta = wd->sta;
enum mt7615_cipher_type cipher;
struct mt76_wcid *wcid;
int err;
lockdep_assert_held(&dev->mt76.mutex);
if (!sta) {
err = -EINVAL;
goto out;
}
cipher = mt7615_mac_get_cipher(key->cipher);
if (cipher == MT_CIPHER_NONE) {
err = -EOPNOTSUPP;
goto out;
}
wcid = &wd->sta->wcid;
mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd);
err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
cipher, key->cmd);
if (err < 0)
goto out;
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
key->cmd);
if (err < 0)
goto out;
if (key->cmd == SET_KEY)
wcid->cipher |= BIT(cipher);
else
wcid->cipher &= ~BIT(cipher);
out:
kfree(key->key);
return err;
}
void mt7663_usb_sdio_wtbl_work(struct work_struct *work)
{
struct mt7615_wtbl_desc *wd, *wd_next;
struct list_head wd_list;
struct mt7615_dev *dev;
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
wtbl_work);
INIT_LIST_HEAD(&wd_list);
spin_lock_bh(&dev->mt76.lock);
list_splice_init(&dev->wd_head, &wd_list);
spin_unlock_bh(&dev->mt76.lock);
list_for_each_entry_safe(wd, wd_next, &wd_list, node) {
list_del(&wd->node);
mt7615_mutex_acquire(dev);
switch (wd->type) {
case MT7615_WTBL_RATE_DESC:
mt7663_usb_sdio_set_rates(dev, wd);
break;
case MT7615_WTBL_KEY_DESC:
mt7663_usb_sdio_set_key(dev, wd);
break;
}
mt7615_mutex_release(dev);
kfree(wd);
}
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_wtbl_work);
bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mt7615_mutex_acquire(dev);
mt7615_mac_sta_poll(dev);
mt7615_mutex_release(dev);
return 0;
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data);
void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
enum mt76_txq_id qid,
struct mt76_queue_entry *e)
{
unsigned int headroom = MT_USB_TXD_SIZE;
if (mt76_is_usb(mdev))
headroom += MT_USB_HDR_SIZE;
skb_pull(e->skb, headroom);
mt76_tx_complete_skb(mdev, e->skb);
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb);
int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct sk_buff *skb = tx_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
!msta->rate_probe) {
/* request to configure sampling rate */
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
msta->rates);
spin_unlock_bh(&dev->mt76.lock);
}
mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
if (mt76_is_usb(mdev))
put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
return mt76_skb_adjust_pad(skb);
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
static int mt7663u_dma_sched_init(struct mt7615_dev *dev)
{
int i;
mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
/* disable refill group 5 - group 15 and raise group 2
* and 3 as high priority.
*/
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006);
mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16));
for (i = 0; i < 5; i++)
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff));
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444);
/* group pririority from high to low:
* 15 (cmd groups) > 4 > 3 > 2 > 1 > 0.
*/
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c);
mt76_wr(dev, MT_UDMA_WLCFG_1,
FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) |
FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1));
/* setup UDMA Rx Flush */
mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
/* hif reset */
mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N);
mt76_set(dev, MT_UDMA_WLCFG_0,
MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN |
MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN |
MT_WL_TX_TMOUT_FUNC_EN);
mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO,
FIELD_PREP(MT_WL_RX_AGG_LMT, 32) |
FIELD_PREP(MT_WL_RX_AGG_TO, 100));
return 0;
}
static int mt7663_usb_sdio_init_hardware(struct mt7615_dev *dev)
{
int ret, idx;
ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE);
if (ret < 0)
return ret;
if (mt76_is_usb(&dev->mt76)) {
ret = mt7663u_dma_sched_init(dev);
if (ret)
return ret;
}
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
if (idx)
return -ENOSPC;
dev->mt76.global_wcid.idx = idx;
dev->mt76.global_wcid.hw_key_idx = -1;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
return 0;
}
int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
int err;
INIT_WORK(&dev->wtbl_work, mt7663_usb_sdio_wtbl_work);
INIT_LIST_HEAD(&dev->wd_head);
mt7615_init_device(dev);
err = mt7663_usb_sdio_init_hardware(dev);
if (err)
return err;
/* check hw sg support in order to enable AMSDU */
if (dev->mt76.usb.sg_en || mt76_is_sdio(&dev->mt76))
hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
else
hw->max_tx_fragments = 1;
hw->extra_tx_headroom += MT_USB_TXD_SIZE;
if (mt76_is_usb(&dev->mt76))
hw->extra_tx_headroom += MT_USB_HDR_SIZE;
err = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (err < 0)
return err;
if (!dev->mt76.usb.sg_en) {
struct ieee80211_sta_vht_cap *vht_cap;
/* decrease max A-MSDU size if SG is not supported */
vht_cap = &dev->mphy.sband_5g.sband.vht_cap;
vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
}
ieee80211_queue_work(hw, &dev->mcu_work);
mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
return mt7615_init_debugfs(dev);
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -277,9 +277,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
err:
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
mt76u_deinit(&dev->mt76);
mt76_free_device(&dev->mt76);
ieee80211_free_hw(mdev->hw);
return ret;
}
@ -297,8 +296,7 @@ static void mt76x0_disconnect(struct usb_interface *usb_intf)
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
mt76u_deinit(&dev->mt76);
ieee80211_free_hw(dev->mt76.hw);
mt76_free_device(&dev->mt76);
}
static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,

View File

@ -80,7 +80,6 @@ struct mt76x02_dev {
struct mutex phy_mutex;
u16 vif_mask;
u16 chainmask;
u8 txdone_seq;

View File

@ -439,7 +439,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
memset(msta, 0, sizeof(*msta));
}
dev->vif_mask = 0;
dev->mphy.vif_mask = 0;
dev->mt76.beacon_mask = 0;
}

View File

@ -56,8 +56,9 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
*/
info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
return mt76u_skb_dma_info(skb, info);
return mt76_skb_adjust_pad(skb);
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,

View File

@ -87,8 +87,10 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
u32 info;
int ret;
if (test_bit(MT76_REMOVED, &dev->phy.state))
return 0;
if (test_bit(MT76_REMOVED, &dev->phy.state)) {
ret = 0;
goto out;
}
if (wait_resp) {
seq = ++dev->mcu.msg_seq & 0xf;
@ -111,6 +113,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
if (wait_resp)
ret = mt76x02u_mcu_wait_resp(dev, seq);
out:
consume_skb(skb);
return ret;

View File

@ -305,7 +305,7 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
unsigned int idx = 0;
/* Allow to change address in HW if we create first interface. */
if (!dev->vif_mask &&
if (!dev->mphy.vif_mask &&
(((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
mt76x02_mac_setaddr(dev, vif->addr);
@ -330,11 +330,11 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx += 8;
/* vif is already set or idx is 8 for AP/Mesh/... */
if (dev->vif_mask & BIT(idx) ||
if (dev->mphy.vif_mask & BIT(idx) ||
(vif->type != NL80211_IFTYPE_STATION && idx > 7))
return -EBUSY;
dev->vif_mask |= BIT(idx);
dev->mphy.vif_mask |= BIT(idx);
mt76x02_vif_init(dev, vif, idx);
return 0;
@ -348,7 +348,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mt76_txq_remove(&dev->mt76, vif->txq);
dev->vif_mask &= ~BIT(mvif->idx);
dev->mphy.vif_mask &= ~BIT(mvif->idx);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);

View File

@ -39,6 +39,7 @@ static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
extern const struct ieee80211_ops mt76x2_ops;
int mt76x2_register_device(struct mt76x02_dev *dev);
int mt76x2_resume_device(struct mt76x02_dev *dev);
void mt76x2_phy_power_on(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);

View File

@ -9,7 +9,7 @@
#include "mt76x2.h"
static const struct pci_device_id mt76pci_device_table[] = {
static const struct pci_device_id mt76x2e_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7662) },
{ PCI_DEVICE(0x14c3, 0x7612) },
{ PCI_DEVICE(0x14c3, 0x7602) },
@ -17,7 +17,7 @@ static const struct pci_device_id mt76pci_device_table[] = {
};
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
@ -93,7 +93,7 @@ error:
}
static void
mt76pci_remove(struct pci_dev *pdev)
mt76x2e_remove(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
@ -103,16 +103,72 @@ mt76pci_remove(struct pci_dev *pdev)
mt76_free_device(mdev);
}
MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
static int __maybe_unused
mt76x2e_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
int i, err;
napi_disable(&mdev->tx_napi);
tasklet_kill(&mdev->pre_tbtt_tasklet);
tasklet_kill(&mdev->tx_tasklet);
mt76_for_each_q_rx(mdev, i)
napi_disable(&mdev->napi[i]);
pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
pci_save_state(pdev);
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (err)
goto restore;
return 0;
restore:
mt76_for_each_q_rx(mdev, i)
napi_enable(&mdev->napi[i]);
napi_enable(&mdev->tx_napi);
return err;
}
static int __maybe_unused
mt76x2e_resume(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int i, err;
err = pci_set_power_state(pdev, PCI_D0);
if (err)
return err;
pci_restore_state(pdev);
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
return mt76x2_resume_device(dev);
}
MODULE_DEVICE_TABLE(pci, mt76x2e_device_table);
MODULE_FIRMWARE(MT7662_FIRMWARE);
MODULE_FIRMWARE(MT7662_ROM_PATCH);
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt76pci_device_table,
.probe = mt76pci_probe,
.remove = mt76pci_remove,
.id_table = mt76x2e_device_table,
.probe = mt76x2e_probe,
.remove = mt76x2e_remove,
#ifdef CONFIG_PM
.suspend = mt76x2e_suspend,
.resume = mt76x2e_resume,
#endif /* CONFIG_PM */
};
module_pci_driver(mt76pci_driver);

View File

@ -217,6 +217,23 @@ mt76x2_power_on(struct mt76x02_dev *dev)
mt76x2_power_on_rf(dev, 1);
}
int mt76x2_resume_device(struct mt76x02_dev *dev)
{
int err;
mt76x02_dma_disable(dev);
mt76x2_reset_wlan(dev, true);
mt76x2_power_on(dev);
err = mt76x2_mac_reset(dev, true);
if (err)
return err;
mt76x02_mac_start(dev);
return mt76x2_mcu_init(dev);
}
static int mt76x2_init_hardware(struct mt76x02_dev *dev)
{
int ret;

View File

@ -16,6 +16,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
{ USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
{ USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
{ USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
@ -74,8 +75,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
return 0;
err:
ieee80211_free_hw(mt76_hw(dev));
mt76u_deinit(&dev->mt76);
mt76_free_device(&dev->mt76);
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
@ -91,9 +91,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
set_bit(MT76_REMOVED, &dev->mphy.state);
ieee80211_unregister_hw(hw);
mt76x2u_cleanup(dev);
mt76u_deinit(&dev->mt76);
ieee80211_free_hw(hw);
mt76_free_device(&dev->mt76);
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
}

View File

@ -178,7 +178,14 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n",
FIELD_GET(MT_ETBF_TX_FB_TRI, cnt));
/* Tx SU counters */
/* Tx SU & MU counters */
cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
seq_printf(s, "Tx multi-user Beamforming counts: %ld\n",
FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt));
cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
seq_printf(s, "Tx multi-user MPDU counts: %d\n", cnt);
cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
seq_printf(s, "Tx multi-user successful MPDU counts: %d\n", cnt);
cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt);
@ -384,6 +391,7 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
return 0;
}
#ifdef CONFIG_MAC80211_DEBUGFS
/** per-station debugfs **/
/* usage: <tx mode> <ldpc> <stbc> <bw> <gi> <nss> <mcs> */
@ -461,3 +469,4 @@ void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats);
}
#endif

View File

@ -79,26 +79,27 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
static void
mt7915_tx_cleanup(struct mt7915_dev *dev)
{
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false);
mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
}
static int mt7915_poll_tx(struct napi_struct *napi, int budget)
{
static const u8 queue_map[] = {
MT_TXQ_MCU,
MT_TXQ_MCU_WA,
MT_TXQ_BE
};
struct mt7915_dev *dev;
int i;
dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false);
mt7915_tx_cleanup(dev);
if (napi_complete_done(napi, 0))
mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false);
mt7915_tx_cleanup(dev);
mt7915_mac_sta_poll(dev);

View File

@ -417,11 +417,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
he_cap_elem->mac_cap_info[0] =
IEEE80211_HE_MAC_CAP0_HTC_HE;
he_cap_elem->mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1;
he_cap_elem->mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_BSR;
he_cap_elem->mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED;
@ -443,27 +438,27 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
/* TODO: OFDMA */
switch (i) {
case NL80211_IFTYPE_AP:
he_cap_elem->mac_cap_info[0] |=
IEEE80211_HE_MAC_CAP0_TWT_RES;
he_cap_elem->mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BSR;
he_cap_elem->mac_cap_info[4] |=
IEEE80211_HE_MAC_CAP4_BQR;
he_cap_elem->mac_cap_info[5] |=
IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
he_cap_elem->phy_cap_info[3] |=
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
he_cap_elem->phy_cap_info[6] |=
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[0] |=
IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap_elem->mac_cap_info[3] |=
IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED;
he_cap_elem->mac_cap_info[1] |=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] |=
@ -473,18 +468,31 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
he_cap_elem->phy_cap_info[1] |=
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A;
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
he_cap_elem->phy_cap_info[3] |=
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
he_cap_elem->phy_cap_info[6] |=
IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
he_cap_elem->phy_cap_info[7] |=
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
break;
#endif
}
he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);

View File

@ -178,14 +178,14 @@ void mt7915_mac_sta_poll(struct mt7915_dev *dev)
static void
mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
struct mt7915_rxv *rxv,
struct ieee80211_radiotap_he *he)
struct ieee80211_radiotap_he *he,
__le32 *rxv)
{
u32 ru_h, ru_l;
u8 ru, offs = 0;
ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv->v[0]));
ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv->v[1]));
ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
ru = (u8)(ru_l | ru_h << 4);
status->bw = RATE_INFO_BW_HE_RU;
@ -228,7 +228,7 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
static void
mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
struct mt76_rx_status *status,
struct mt7915_rxv *rxv)
__le32 *rxv, u32 phy)
{
/* TODO: struct ieee80211_radiotap_he_mu */
static const struct ieee80211_radiotap_he known = {
@ -245,48 +245,45 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
HE_BITS(DATA2_TXOP_KNOWN),
};
struct ieee80211_radiotap_he *he = NULL;
__le32 v2 = rxv->v[2];
__le32 v11 = rxv->v[11];
__le32 v14 = rxv->v[14];
u32 ltf_size = le32_get_bits(v2, MT_CRXV_HE_LTF_SIZE) + 1;
u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
he = skb_push(skb, sizeof(known));
memcpy(he, &known, sizeof(known));
he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, v14) |
HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, v2);
he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, v2) |
he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
le16_encode_bits(ltf_size,
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, v14) |
HE_PREP(DATA6_DOPPLER, DOPPLER, v14);
he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
switch (rxv->phy) {
switch (phy) {
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE_KNOWN);
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, v14) |
HE_PREP(DATA3_UL_DL, UPLINK, v2);
he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11);
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
break;
case MT_PHY_TYPE_HE_EXT_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
break;
case MT_PHY_TYPE_HE_MU:
he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE_KNOWN);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2);
he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
mt7915_mac_decode_he_radiotap_ru(status, rxv, he);
mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
break;
case MT_PHY_TYPE_HE_TB:
he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
@ -295,12 +292,12 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, v11) |
HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, v11) |
HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, v11) |
HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, v11);
he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
mt7915_mac_decode_he_radiotap_ru(status, rxv, he);
mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
break;
default:
break;
@ -314,8 +311,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
struct mt7915_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
struct mt7915_rxv rxv = {};
__le32 *rxd = (__le32 *)skb->data;
__le32 *rxv = NULL;
u32 mode = 0;
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
@ -427,15 +425,14 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
u32 v0, v1, v2;
memcpy(rxv.v, rxd, sizeof(rxv.v));
rxv = rxd;
rxd += 2;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
v0 = le32_to_cpu(rxv.v[0]);
v1 = le32_to_cpu(rxv.v[1]);
v2 = le32_to_cpu(rxv.v[2]);
v0 = le32_to_cpu(rxv[0]);
v1 = le32_to_cpu(rxv[1]);
v2 = le32_to_cpu(rxv[2]);
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
@ -466,9 +463,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
return -EINVAL;
idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
rxv.phy = FIELD_GET(MT_CRXV_TX_MODE, v2);
mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
switch (rxv.phy) {
switch (mode) {
case MT_PHY_TYPE_CCK:
cck = true;
/* fall through */
@ -503,8 +500,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
status->he_gi = gi;
if (idx & MT_PRXV_TX_DCM)
status->he_dcm = true;
status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
break;
default:
return -EINVAL;
@ -515,7 +511,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
case IEEE80211_STA_RX_BW_20:
break;
case IEEE80211_STA_RX_BW_40:
if (rxv.phy & MT_PHY_TYPE_HE_EXT_SU &&
if (mode & MT_PHY_TYPE_HE_EXT_SU &&
(idx & MT_PRXV_TX_ER_SU_106T)) {
status->bw = RATE_INFO_BW_HE_RU;
status->he_ru =
@ -535,7 +531,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
if (rxv.phy < MT_PHY_TYPE_HE_SU && gi)
if (mode < MT_PHY_TYPE_HE_SU && gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
}
}
@ -548,8 +544,8 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
mt76_insert_ccmp_hdr(skb, key_id);
}
if (status->flag & RX_FLAG_RADIOTAP_HE)
mt7915_mac_decode_he_radiotap(skb, status, &rxv);
if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
hdr = mt76_skb_get_hdr(skb);
if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
@ -591,16 +587,16 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
skb_get_queue_mapping(skb);
p_fmt = MT_TX_TYPE_CT;
} else if (beacon) {
q_idx = MT_LMAC_BCN0;
if (beacon) {
p_fmt = MT_TX_TYPE_FW;
} else {
q_idx = MT_LMAC_ALTX0;
q_idx = MT_LMAC_BCN0;
} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
p_fmt = MT_TX_TYPE_CT;
q_idx = MT_LMAC_ALTX0;
} else {
p_fmt = MT_TX_TYPE_CT;
q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
@ -616,6 +612,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD1_TID,
skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
val |= MT_TXD1_TGID;

View File

@ -128,13 +128,6 @@ enum rx_pkt_type {
#define MT_CRXV_HE_BEAM_CHNG BIT(13)
#define MT_CRXV_HE_DOPPLER BIT(16)
struct mt7915_rxv {
u32 phy;
/* P-RXV: bit 0~1, C-RXV: bit 2~19 */
__le32 v[20];
};
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
@ -149,16 +142,6 @@ enum tx_pkt_type {
MT_TX_TYPE_FW,
};
enum tx_pkt_queue_idx {
MT_LMAC_AC00,
MT_LMAC_AC01,
MT_LMAC_AC02,
MT_LMAC_AC03,
MT_LMAC_ALTX0 = 0x10,
MT_LMAC_BMC0 = 0x10,
MT_LMAC_BCN0 = 0x12,
};
enum tx_port_idx {
MT_TX_PORT_IDX_LMAC,
MT_TX_PORT_IDX_MCU

View File

@ -125,7 +125,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mutex_lock(&dev->mt76.mutex);
mvif->idx = ffs(~phy->vif_mask) - 1;
mvif->idx = ffs(~phy->mt76->vif_mask) - 1;
if (mvif->idx >= MT7915_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@ -150,7 +150,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
phy->vif_mask |= BIT(mvif->idx);
phy->mt76->vif_mask |= BIT(mvif->idx);
phy->omac_mask |= BIT(mvif->omac_idx);
idx = MT7915_WTBL_RESERVED - mvif->idx;
@ -194,7 +194,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_txq_remove(&dev->mt76, vif->txq);
mutex_lock(&dev->mt76.mutex);
phy->vif_mask &= ~BIT(mvif->idx);
phy->mt76->vif_mask &= ~BIT(mvif->idx);
phy->omac_mask &= ~BIT(mvif->omac_idx);
mutex_unlock(&dev->mt76.mutex);
@ -350,13 +350,12 @@ static int
mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
mvif->wmm[queue].cw_min = params->cw_min;
mvif->wmm[queue].cw_max = params->cw_max;
mvif->wmm[queue].aifs = params->aifs;
mvif->wmm[queue].txop = params->txop;
queue = mt7915_lmac_mapping(dev, queue);
mvif->queue_params[queue] = *params;
return 0;
}

View File

@ -312,8 +312,10 @@ mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd,
struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
int ret = 0;
if (seq != rxd->seq)
return -EAGAIN;
if (seq != rxd->seq) {
ret = -EAGAIN;
goto out;
}
switch (cmd) {
case -MCU_CMD_PATCH_SEM_CONTROL:
@ -330,6 +332,7 @@ mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd,
default:
break;
}
out:
dev_kfree_skb(skb);
return ret;
@ -505,15 +508,22 @@ static void
mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data;
u16 wcidx = le16_to_cpu(ra->wlan_idx);
struct mt76_wcid *wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
struct mt7915_sta *msta = container_of(wcid, struct mt7915_sta, wcid);
struct mt7915_sta_stats *stats = &msta->stats;
struct mt76_phy *mphy = &dev->mphy;
struct rate_info rate = {}, prob_rate = {};
u16 probe = le16_to_cpu(ra->prob_up_rate);
u16 attempts = le16_to_cpu(ra->attempts);
u16 curr = le16_to_cpu(ra->curr_rate);
u16 probe = le16_to_cpu(ra->prob_up_rate);
u16 wcidx = le16_to_cpu(ra->wlan_idx);
struct mt76_phy *mphy = &dev->mphy;
struct mt7915_sta_stats *stats;
struct mt7915_sta *msta;
struct mt76_wcid *wcid;
if (wcidx >= MT76_N_WCIDS)
return;
wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
msta = container_of(wcid, struct mt7915_sta, wcid);
stats = &msta->stats;
if (msta->wcid.ext_phy && dev->mt76.phy2)
mphy = dev->mt76.phy2;
@ -1166,6 +1176,23 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
int ret;
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
MT7915_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
&skb);
mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
ret = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_STA_REC_UPDATE, true);
if (ret)
return ret;
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
MT7915_STA_UPDATE_MAX_SIZE);
@ -1173,11 +1200,6 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
return PTR_ERR(skb);
mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
&skb);
mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_STA_REC_UPDATE, true);
@ -1466,16 +1488,39 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
}
static int
mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct sk_buff *skb;
int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_muru);
if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
return 0;
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec muru */
mt7915_mcu_sta_muru_tlv(skb, sta);
return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_STA_REC_UPDATE, true);
}
static void
mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta)
{
struct tlv *tlv;
/* starec ht */
if (sta->ht_cap.ht_supported) {
struct sta_rec_ht *ht;
/* starec ht */
tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
@ -1495,10 +1540,6 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
/* starec he */
if (sta->he_cap.has_he)
mt7915_mcu_sta_he_tlv(skb, sta);
/* starec muru */
if (sta->he_cap.has_he || sta->vht_cap.vht_supported)
mt7915_mcu_sta_muru_tlv(skb, sta);
}
static void
@ -2064,6 +2105,32 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
MCU_EXT_CMD_STA_REC_UPDATE, true);
}
static int
mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
#define MT_STA_BSS_GROUP 1
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct {
__le32 action;
u8 wlan_idx_lo;
u8 status;
u8 wlan_idx_hi;
u8 rsv0[5];
__le32 val;
u8 rsv1[8];
} __packed req = {
.action = cpu_to_le32(MT_STA_BSS_GROUP),
.wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
.wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
.val = cpu_to_le32(mvif->idx),
};
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_DRR_CTRL,
&req, sizeof(req), true);
}
int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
@ -2073,10 +2140,18 @@ int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return 0;
/* must keep the order */
ret = mt7915_mcu_add_group(dev, vif, sta);
if (ret)
return ret;
ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
if (ret)
return ret;
ret = mt7915_mcu_add_mu(dev, vif, sta);
if (ret)
return ret;
if (enable)
return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
@ -2823,23 +2898,23 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
int ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
struct edca *e = &req.edca[ac];
e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS;
e->aifs = mvif->wmm[ac].aifs;
e->txop = cpu_to_le16(mvif->wmm[ac].txop);
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
if (mvif->wmm[ac].cw_min)
e->cw_min = fls(mvif->wmm[ac].cw_max);
if (q->cw_min)
e->cw_min = fls(q->cw_min);
else
e->cw_min = 5;
if (mvif->wmm[ac].cw_max)
e->cw_max = cpu_to_le16(fls(mvif->wmm[ac].cw_max));
if (q->cw_max)
e->cw_max = cpu_to_le16(fls(q->cw_max));
else
e->cw_max = cpu_to_le16(10);
}
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE,
&req, sizeof(req), true);
}

View File

@ -201,6 +201,7 @@ enum {
MCU_EXT_CMD_EDCA_UPDATE = 0x27,
MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
MCU_EXT_CMD_THERMAL_CTRL = 0x2c,
MCU_EXT_CMD_SET_DRR_CTRL = 0x36,
MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
@ -653,7 +654,7 @@ struct sta_rec_muru {
bool ofdma_ul_en;
bool mimo_dl_en;
bool mimo_ul_en;
bool rsv[4];
u8 rsv[4];
} cfg;
struct {
@ -664,7 +665,7 @@ struct sta_rec_muru {
bool lt16_sigb;
bool rx_su_comp_sigb;
bool rx_su_non_comp_sigb;
bool rsv;
u8 rsv;
} ofdma_dl;
struct {
@ -951,7 +952,6 @@ enum {
sizeof(struct sta_rec_ba) + \
sizeof(struct sta_rec_vht) + \
sizeof(struct tlv) + \
sizeof(struct sta_rec_muru) + \
MT7915_WTBL_UPDATE_MAX_SIZE)
#define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \

View File

@ -99,15 +99,10 @@ struct mt7915_vif {
u8 band_idx;
u8 wmm_idx;
struct {
u16 cw_min;
u16 cw_max;
u16 txop;
u8 aifs;
} wmm[IEEE80211_NUM_ACS];
struct mt7915_sta sta;
struct mt7915_dev *dev;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
};
struct mib_stats {
@ -125,7 +120,6 @@ struct mt7915_phy {
struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
u32 rxfilter;
u32 vif_mask;
u32 omac_mask;
u16 noise;
@ -199,6 +193,16 @@ enum {
EXT_BSSID_END
};
enum {
MT_LMAC_AC00,
MT_LMAC_AC01,
MT_LMAC_AC02,
MT_LMAC_AC03,
MT_LMAC_ALTX0 = 0x10,
MT_LMAC_BMC0,
MT_LMAC_BCN0,
};
enum {
MT_RX_SEL0,
MT_RX_SEL1,
@ -254,6 +258,21 @@ mt7915_ext_phy(struct mt7915_dev *dev)
return phy->priv;
}
static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
{
static const u8 lmac_queue_map[] = {
[IEEE80211_AC_BK] = MT_LMAC_AC00,
[IEEE80211_AC_BE] = MT_LMAC_AC01,
[IEEE80211_AC_VI] = MT_LMAC_AC02,
[IEEE80211_AC_VO] = MT_LMAC_AC03,
};
if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map)))
return MT_LMAC_AC01; /* BE */
return lmac_queue_map[ac];
}
static inline void
mt7915_set_aggr_state(struct mt7915_sta *msta, u8 tid,
enum mt7915_ampdu_state state)

View File

@ -103,7 +103,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE,
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,

View File

@ -117,11 +117,16 @@
#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))

View File

@ -3,6 +3,7 @@
* Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include "mt76.h"
#include <linux/pci.h>
void mt76_pci_disable_aspm(struct pci_dev *pdev)

View File

@ -0,0 +1,368 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc.
*
* This file is written based on mt76/usb.c.
*
* Author: Felix Fietkau <nbd@nbd.name>
* Lorenzo Bianconi <lorenzo@kernel.org>
* Sean Wang <sean.wang@mediatek.com>
*/
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include "mt76.h"
static int
mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
struct mt76_queue *q = &dev->q_rx[qid];
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
q->ndesc = MT_NUM_RX_ENTRIES;
q->head = q->tail = 0;
q->queued = 0;
return 0;
}
static int mt76s_alloc_tx(struct mt76_dev *dev)
{
struct mt76_queue *q;
int i;
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
INIT_LIST_HEAD(&dev->q_tx[i].swq);
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
if (!q)
return -ENOMEM;
spin_lock_init(&q->lock);
q->hw_idx = i;
dev->q_tx[i].q = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
q->ndesc = MT_NUM_TX_ENTRIES;
}
return 0;
}
void mt76s_stop_txrx(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
mt76_tx_status_check(dev, NULL, true);
}
EXPORT_SYMBOL_GPL(mt76s_stop_txrx);
int mt76s_alloc_queues(struct mt76_dev *dev)
{
int err;
err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
if (err < 0)
return err;
return mt76s_alloc_tx(dev);
}
EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
static struct mt76_queue_entry *
mt76s_get_next_rx_entry(struct mt76_queue *q)
{
struct mt76_queue_entry *e = NULL;
spin_lock_bh(&q->lock);
if (q->queued > 0) {
e = &q->entry[q->head];
q->head = (q->head + 1) % q->ndesc;
q->queued--;
}
spin_unlock_bh(&q->lock);
return e;
}
static int
mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
int qid = q - &dev->q_rx[MT_RXQ_MAIN];
int nframes = 0;
while (true) {
struct mt76_queue_entry *e;
if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
break;
e = mt76s_get_next_rx_entry(q);
if (!e || !e->skb)
break;
dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
e->skb = NULL;
nframes++;
}
if (qid == MT_RXQ_MAIN)
mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
return nframes;
}
static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
struct mt76_sw_queue *sq = &dev->q_tx[qid];
u32 n_dequeued = 0, n_sw_dequeued = 0;
struct mt76_queue_entry entry;
struct mt76_queue *q = sq->q;
bool wake;
while (q->queued > n_dequeued) {
if (!q->entry[q->head].done)
break;
if (q->entry[q->head].schedule) {
q->entry[q->head].schedule = false;
n_sw_dequeued++;
}
entry = q->entry[q->head];
q->entry[q->head].done = false;
q->head = (q->head + 1) % q->ndesc;
n_dequeued++;
if (qid == MT_TXQ_MCU)
dev_kfree_skb(entry.skb);
else
dev->drv->tx_complete_skb(dev, qid, &entry);
}
spin_lock_bh(&q->lock);
sq->swq_queued -= n_sw_dequeued;
q->queued -= n_dequeued;
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
spin_unlock_bh(&q->lock);
if (qid == MT_TXQ_MCU)
goto out;
mt76_txq_schedule(&dev->phy, qid);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
wake_up_process(dev->sdio.tx_kthread);
out:
return n_dequeued;
}
static void mt76s_tx_status_data(struct work_struct *work)
{
struct mt76_sdio *sdio;
struct mt76_dev *dev;
u8 update = 1;
u16 count = 0;
sdio = container_of(work, struct mt76_sdio, stat_work);
dev = container_of(sdio, struct mt76_dev, sdio);
while (true) {
if (test_bit(MT76_REMOVED, &dev->phy.state))
break;
if (!dev->drv->tx_status_data(dev, &update))
break;
count++;
}
if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
queue_work(dev->wq, &sdio->stat_work);
else
clear_bit(MT76_READING_STATS, &dev->phy.state);
}
static int
mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_tx_info tx_info = {
.skb = skb,
};
int err, len = skb->len;
u16 idx = q->tail;
if (q->queued == q->ndesc)
return -ENOSPC;
skb->prev = skb->next = NULL;
err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0)
return err;
q->entry[q->tail].skb = tx_info.skb;
q->entry[q->tail].buf_sz = len;
q->tail = (q->tail + 1) % q->ndesc;
q->queued++;
return idx;
}
static int
mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
struct mt76_queue *q = dev->q_tx[qid].q;
int ret = -ENOSPC, len = skb->len;
spin_lock_bh(&q->lock);
if (q->queued == q->ndesc)
goto out;
ret = mt76_skb_adjust_pad(skb);
if (ret)
goto out;
q->entry[q->tail].buf_sz = len;
q->entry[q->tail].skb = skb;
q->tail = (q->tail + 1) % q->ndesc;
q->queued++;
out:
spin_unlock_bh(&q->lock);
return ret;
}
static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{
struct mt76_sdio *sdio = &dev->sdio;
wake_up_process(sdio->tx_kthread);
}
static const struct mt76_queue_ops sdio_queue_ops = {
.tx_queue_skb = mt76s_tx_queue_skb,
.kick = mt76s_tx_kick,
.tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
};
static int mt76s_kthread_run(void *data)
{
struct mt76_dev *dev = data;
struct mt76_phy *mphy = &dev->phy;
while (!kthread_should_stop()) {
int i, nframes = 0;
cond_resched();
/* rx processing */
local_bh_disable();
rcu_read_lock();
mt76_for_each_q_rx(dev, i)
nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
rcu_read_unlock();
local_bh_enable();
/* tx processing */
for (i = 0; i < MT_TXQ_MCU_WA; i++)
nframes += mt76s_process_tx_queue(dev, i);
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &mphy->state))
queue_work(dev->wq, &dev->sdio.stat_work);
if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
}
return 0;
}
void mt76s_deinit(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
int i;
kthread_stop(sdio->kthread);
kthread_stop(sdio->tx_kthread);
mt76s_stop_txrx(dev);
sdio_claim_host(sdio->func);
sdio_release_irq(sdio->func);
sdio_release_host(sdio->func);
mt76_for_each_q_rx(dev, i) {
struct mt76_queue *q = &dev->q_rx[i];
int j;
for (j = 0; j < q->ndesc; j++) {
struct mt76_queue_entry *e = &q->entry[j];
if (!e->skb)
continue;
dev_kfree_skb(e->skb);
e->skb = NULL;
}
}
}
EXPORT_SYMBOL_GPL(mt76s_deinit);
int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops)
{
struct mt76_sdio *sdio = &dev->sdio;
sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s");
if (IS_ERR(sdio->kthread))
return PTR_ERR(sdio->kthread);
INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
mutex_init(&sdio->sched.lock);
dev->queue_ops = &sdio_queue_ops;
dev->bus = bus_ops;
dev->sdio.func = func;
return 0;
}
EXPORT_SYMBOL_GPL(mt76s_init);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -0,0 +1,497 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
#include "mt76.h"
static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
[MT76_TM_ATTR_RESET] = { .type = NLA_FLAG },
[MT76_TM_ATTR_STATE] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 },
[MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_RATE_SGI] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED },
[MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
};
void mt76_testmode_tx_pending(struct mt76_dev *dev)
{
struct mt76_testmode_data *td = &dev->test;
struct mt76_wcid *wcid = &dev->global_wcid;
struct sk_buff *skb = td->tx_skb;
struct mt76_queue *q;
int qid;
if (!skb || !td->tx_pending)
return;
qid = skb_get_queue_mapping(skb);
q = dev->q_tx[qid].q;
spin_lock_bh(&q->lock);
while (td->tx_pending > 0 && q->queued < q->ndesc / 2) {
int ret;
ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL);
if (ret < 0)
break;
td->tx_pending--;
td->tx_queued++;
}
dev->queue_ops->kick(dev, q);
spin_unlock_bh(&q->lock);
}
static int
mt76_testmode_tx_init(struct mt76_dev *dev)
{
struct mt76_testmode_data *td = &dev->test;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_FROMDS;
struct ieee80211_tx_rate *rate;
u8 max_nss = hweight8(dev->phy.antenna_mask);
if (td->tx_antenna_mask)
max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
dev_kfree_skb(td->tx_skb);
td->tx_skb = skb;
hdr = __skb_put_zero(skb, td->tx_msdu_len);
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, dev->macaddr, sizeof(dev->macaddr));
memcpy(hdr->addr2, dev->macaddr, sizeof(dev->macaddr));
memcpy(hdr->addr3, dev->macaddr, sizeof(dev->macaddr));
info = IEEE80211_SKB_CB(skb);
info->flags = IEEE80211_TX_CTL_INJECTED |
IEEE80211_TX_CTL_NO_ACK |
IEEE80211_TX_CTL_NO_PS_BUFFER;
rate = &info->control.rates[0];
rate->count = 1;
rate->idx = td->tx_rate_idx;
switch (td->tx_rate_mode) {
case MT76_TM_TX_MODE_CCK:
if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ)
return -EINVAL;
if (rate->idx > 4)
return -EINVAL;
break;
case MT76_TM_TX_MODE_OFDM:
if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ)
break;
if (rate->idx > 8)
return -EINVAL;
rate->idx += 4;
break;
case MT76_TM_TX_MODE_HT:
if (rate->idx > 8 * max_nss &&
!(rate->idx == 32 &&
dev->phy.chandef.width >= NL80211_CHAN_WIDTH_40))
return -EINVAL;
rate->flags |= IEEE80211_TX_RC_MCS;
break;
case MT76_TM_TX_MODE_VHT:
if (rate->idx > 9)
return -EINVAL;
if (td->tx_rate_nss > max_nss)
return -EINVAL;
ieee80211_rate_set_vht(rate, td->tx_rate_idx, td->tx_rate_nss);
rate->flags |= IEEE80211_TX_RC_VHT_MCS;
break;
default:
break;
}
if (td->tx_rate_sgi)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
if (td->tx_rate_ldpc)
info->flags |= IEEE80211_TX_CTL_LDPC;
if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT) {
switch (dev->phy.chandef.width) {
case NL80211_CHAN_WIDTH_40:
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
break;
case NL80211_CHAN_WIDTH_80:
rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
break;
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
break;
default:
break;
}
}
skb_set_queue_mapping(skb, IEEE80211_AC_BE);
return 0;
}
static void
mt76_testmode_tx_start(struct mt76_dev *dev)
{
struct mt76_testmode_data *td = &dev->test;
td->tx_queued = 0;
td->tx_done = 0;
td->tx_pending = td->tx_count;
tasklet_schedule(&dev->tx_tasklet);
}
static void
mt76_testmode_tx_stop(struct mt76_dev *dev)
{
struct mt76_testmode_data *td = &dev->test;
tasklet_disable(&dev->tx_tasklet);
td->tx_pending = 0;
tasklet_enable(&dev->tx_tasklet);
wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ);
dev_kfree_skb(td->tx_skb);
td->tx_skb = NULL;
}
static inline void
mt76_testmode_param_set(struct mt76_testmode_data *td, u16 idx)
{
td->param_set[idx / 32] |= BIT(idx % 32);
}
static inline bool
mt76_testmode_param_present(struct mt76_testmode_data *td, u16 idx)
{
return td->param_set[idx / 32] & BIT(idx % 32);
}
static void
mt76_testmode_init_defaults(struct mt76_dev *dev)
{
struct mt76_testmode_data *td = &dev->test;
if (td->tx_msdu_len > 0)
return;
td->tx_msdu_len = 1024;
td->tx_count = 1;
td->tx_rate_mode = MT76_TM_TX_MODE_OFDM;
td->tx_rate_nss = 1;
}
static int
__mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state)
{
enum mt76_testmode_state prev_state = dev->test.state;
int err;
if (prev_state == MT76_TM_STATE_TX_FRAMES)
mt76_testmode_tx_stop(dev);
if (state == MT76_TM_STATE_TX_FRAMES) {
err = mt76_testmode_tx_init(dev);
if (err)
return err;
}
err = dev->test_ops->set_state(dev, state);
if (err) {
if (state == MT76_TM_STATE_TX_FRAMES)
mt76_testmode_tx_stop(dev);
return err;
}
if (state == MT76_TM_STATE_TX_FRAMES)
mt76_testmode_tx_start(dev);
else if (state == MT76_TM_STATE_RX_FRAMES) {
memset(&dev->test.rx_stats, 0, sizeof(dev->test.rx_stats));
}
dev->test.state = state;
return 0;
}
int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state)
{
struct mt76_testmode_data *td = &dev->test;
struct ieee80211_hw *hw = dev->phy.hw;
if (state == td->state && state == MT76_TM_STATE_OFF)
return 0;
if (state > MT76_TM_STATE_OFF &&
(!test_bit(MT76_STATE_RUNNING, &dev->phy.state) ||
!(hw->conf.flags & IEEE80211_CONF_MONITOR)))
return -ENOTCONN;
if (state != MT76_TM_STATE_IDLE &&
td->state != MT76_TM_STATE_IDLE) {
int ret;
ret = __mt76_testmode_set_state(dev, MT76_TM_STATE_IDLE);
if (ret)
return ret;
}
return __mt76_testmode_set_state(dev, state);
}
EXPORT_SYMBOL(mt76_testmode_set_state);
static int
mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
{
u8 val;
if (!attr)
return 0;
val = nla_get_u8(attr);
if (val < min || val > max)
return -EINVAL;
*dest = val;
return 0;
}
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct mt76_testmode_data *td = &dev->test;
struct nlattr *tb[NUM_MT76_TM_ATTRS];
u32 state;
int err;
int i;
if (!dev->test_ops)
return -EOPNOTSUPP;
err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
mt76_tm_policy, NULL);
if (err)
return err;
err = -EINVAL;
mutex_lock(&dev->mutex);
if (tb[MT76_TM_ATTR_RESET]) {
mt76_testmode_set_state(dev, MT76_TM_STATE_OFF);
memset(td, 0, sizeof(*td));
}
mt76_testmode_init_defaults(dev);
if (tb[MT76_TM_ATTR_TX_COUNT])
td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]);
if (tb[MT76_TM_ATTR_TX_LENGTH]) {
u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]);
if (val > IEEE80211_MAX_FRAME_LEN ||
val < sizeof(struct ieee80211_hdr))
goto out;
td->tx_msdu_len = val;
}
if (tb[MT76_TM_ATTR_TX_RATE_IDX])
td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]);
if (mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_MODE], &td->tx_rate_mode,
0, MT76_TM_TX_MODE_MAX) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_NSS], &td->tx_rate_nss,
1, hweight8(phy->antenna_mask)) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask, 1,
phy->antenna_mask) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
&td->tx_power_control, 0, 1))
goto out;
if (tb[MT76_TM_ATTR_FREQ_OFFSET])
td->freq_offset = nla_get_u32(tb[MT76_TM_ATTR_FREQ_OFFSET]);
if (tb[MT76_TM_ATTR_STATE]) {
state = nla_get_u32(tb[MT76_TM_ATTR_STATE]);
if (state > MT76_TM_STATE_MAX)
goto out;
} else {
state = td->state;
}
if (tb[MT76_TM_ATTR_TX_POWER]) {
struct nlattr *cur;
int idx = 0;
int rem;
nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
if (nla_len(cur) != 1 ||
idx >= ARRAY_SIZE(td->tx_power))
goto out;
td->tx_power[idx++] = nla_get_u8(cur);
}
}
if (dev->test_ops->set_params) {
err = dev->test_ops->set_params(dev, tb, state);
if (err)
goto out;
}
for (i = MT76_TM_ATTR_STATE; i < ARRAY_SIZE(tb); i++)
if (tb[i])
mt76_testmode_param_set(td, i);
err = 0;
if (tb[MT76_TM_ATTR_STATE])
err = mt76_testmode_set_state(dev, state);
out:
mutex_unlock(&dev->mutex);
return err;
}
EXPORT_SYMBOL(mt76_testmode_cmd);
static int
mt76_testmode_dump_stats(struct mt76_dev *dev, struct sk_buff *msg)
{
struct mt76_testmode_data *td = &dev->test;
u64 rx_packets = 0;
u64 rx_fcs_error = 0;
int i;
for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) {
rx_packets += td->rx_stats.packets[i];
rx_fcs_error += td->rx_stats.fcs_error[i];
}
if (nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_PENDING, td->tx_pending) ||
nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_QUEUED, td->tx_queued) ||
nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_DONE, td->tx_done) ||
nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_PACKETS, rx_packets,
MT76_TM_STATS_ATTR_PAD) ||
nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_FCS_ERROR, rx_fcs_error,
MT76_TM_STATS_ATTR_PAD))
return -EMSGSIZE;
if (dev->test_ops->dump_stats)
return dev->test_ops->dump_stats(dev, msg);
return 0;
}
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct netlink_callback *cb, void *data, int len)
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct mt76_testmode_data *td = &dev->test;
struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
int err = 0;
void *a;
int i;
if (!dev->test_ops)
return -EOPNOTSUPP;
if (cb->args[2]++ > 0)
return -ENOENT;
if (data) {
err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
mt76_tm_policy, NULL);
if (err)
return err;
}
mutex_lock(&dev->mutex);
if (tb[MT76_TM_ATTR_STATS]) {
a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
err = mt76_testmode_dump_stats(dev, msg);
nla_nest_end(msg, a);
goto out;
}
mt76_testmode_init_defaults(dev);
err = -EMSGSIZE;
if (nla_put_u32(msg, MT76_TM_ATTR_STATE, td->state))
goto out;
if (td->mtd_name &&
(nla_put_string(msg, MT76_TM_ATTR_MTD_PART, td->mtd_name) ||
nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, td->mtd_offset)))
goto out;
if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
(mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) &&
nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) ||
(mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) &&
nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) ||
(mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) &&
nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
goto out;
if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
if (!a)
goto out;
for (i = 0; i < ARRAY_SIZE(td->tx_power); i++)
if (nla_put_u8(msg, i, td->tx_power[i]))
goto out;
nla_nest_end(msg, a);
}
err = 0;
out:
mutex_unlock(&dev->mutex);
return err;
}
EXPORT_SYMBOL(mt76_testmode_dump);

View File

@ -0,0 +1,156 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
*/
#ifndef __MT76_TESTMODE_H
#define __MT76_TESTMODE_H
/**
* enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
*
* @MT76_TM_ATTR_UNSPEC: (invalid attribute)
*
* @MT76_TM_ATTR_RESET: reset parameters to default (flag)
* @MT76_TM_ATTR_STATE: test state (u32), see &enum mt76_testmode_state
*
* @MT76_TM_ATTR_MTD_PART: mtd partition used for eeprom data (string)
* @MT76_TM_ATTR_MTD_OFFSET: offset of eeprom data within the partition (u32)
*
* @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting
* state to MT76_TM_STATE_TX_FRAMES (u32)
* @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32)
* @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32)
* @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode)
* @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8)
* @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8)
* @MT76_TM_ATTR_TX_RATE_SGI: packet tx use short guard interval (u8)
* @MT76_TM_ATTR_TX_RATE_LDPC: packet tx enable LDPC (u8)
*
* @MT76_TM_ATTR_TX_ANTENNA: tx antenna mask (u8)
* @MT76_TM_ATTR_TX_POWER_CONTROL: enable tx power control (u8)
* @MT76_TM_ATTR_TX_POWER: per-antenna tx power array (nested, u8 attrs)
*
* @MT76_TM_ATTR_FREQ_OFFSET: RF frequency offset (u32)
*
* @MT76_TM_ATTR_STATS: statistics (nested, see &enum mt76_testmode_stats_attr)
*/
enum mt76_testmode_attr {
MT76_TM_ATTR_UNSPEC,
MT76_TM_ATTR_RESET,
MT76_TM_ATTR_STATE,
MT76_TM_ATTR_MTD_PART,
MT76_TM_ATTR_MTD_OFFSET,
MT76_TM_ATTR_TX_COUNT,
MT76_TM_ATTR_TX_LENGTH,
MT76_TM_ATTR_TX_RATE_MODE,
MT76_TM_ATTR_TX_RATE_NSS,
MT76_TM_ATTR_TX_RATE_IDX,
MT76_TM_ATTR_TX_RATE_SGI,
MT76_TM_ATTR_TX_RATE_LDPC,
MT76_TM_ATTR_TX_ANTENNA,
MT76_TM_ATTR_TX_POWER_CONTROL,
MT76_TM_ATTR_TX_POWER,
MT76_TM_ATTR_FREQ_OFFSET,
MT76_TM_ATTR_STATS,
/* keep last */
NUM_MT76_TM_ATTRS,
MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1,
};
/**
* enum mt76_testmode_state - statistics attributes
*
* @MT76_TM_STATS_ATTR_TX_PENDING: pending tx frames (u32)
* @MT76_TM_STATS_ATTR_TX_QUEUED: queued tx frames (u32)
* @MT76_TM_STATS_ATTR_TX_QUEUED: completed tx frames (u32)
*
* @MT76_TM_STATS_ATTR_RX_PACKETS: number of rx packets (u64)
* @MT76_TM_STATS_ATTR_RX_FCS_ERROR: number of rx packets with FCS error (u64)
* @MT76_TM_STATS_ATTR_LAST_RX: information about the last received packet
* see &enum mt76_testmode_rx_attr
*/
enum mt76_testmode_stats_attr {
MT76_TM_STATS_ATTR_UNSPEC,
MT76_TM_STATS_ATTR_PAD,
MT76_TM_STATS_ATTR_TX_PENDING,
MT76_TM_STATS_ATTR_TX_QUEUED,
MT76_TM_STATS_ATTR_TX_DONE,
MT76_TM_STATS_ATTR_RX_PACKETS,
MT76_TM_STATS_ATTR_RX_FCS_ERROR,
MT76_TM_STATS_ATTR_LAST_RX,
/* keep last */
NUM_MT76_TM_STATS_ATTRS,
MT76_TM_STATS_ATTR_MAX = NUM_MT76_TM_STATS_ATTRS - 1,
};
/**
* enum mt76_testmode_rx_attr - packet rx information
*
* @MT76_TM_RX_ATTR_FREQ_OFFSET: frequency offset (s32)
* @MT76_TM_RX_ATTR_RCPI: received channel power indicator (array, u8)
* @MT76_TM_RX_ATTR_IB_RSSI: internal inband RSSI (s8)
* @MT76_TM_RX_ATTR_WB_RSSI: internal wideband RSSI (s8)
*/
enum mt76_testmode_rx_attr {
MT76_TM_RX_ATTR_UNSPEC,
MT76_TM_RX_ATTR_FREQ_OFFSET,
MT76_TM_RX_ATTR_RCPI,
MT76_TM_RX_ATTR_IB_RSSI,
MT76_TM_RX_ATTR_WB_RSSI,
/* keep last */
NUM_MT76_TM_RX_ATTRS,
MT76_TM_RX_ATTR_MAX = NUM_MT76_TM_RX_ATTRS - 1,
};
/**
* enum mt76_testmode_state - phy test state
*
* @MT76_TM_STATE_OFF: test mode disabled (normal operation)
* @MT76_TM_STATE_IDLE: test mode enabled, but idle
* @MT76_TM_STATE_TX_FRAMES: send a fixed number of test frames
* @MT76_TM_STATE_RX_FRAMES: receive packets and keep statistics
*/
enum mt76_testmode_state {
MT76_TM_STATE_OFF,
MT76_TM_STATE_IDLE,
MT76_TM_STATE_TX_FRAMES,
MT76_TM_STATE_RX_FRAMES,
/* keep last */
NUM_MT76_TM_STATES,
MT76_TM_STATE_MAX = NUM_MT76_TM_STATES - 1,
};
/**
* enum mt76_testmode_tx_mode - packet tx phy mode
*
* @MT76_TM_TX_MODE_CCK: legacy CCK mode
* @MT76_TM_TX_MODE_OFDM: legacy OFDM mode
* @MT76_TM_TX_MODE_HT: 802.11n MCS
* @MT76_TM_TX_MODE_VHT: 802.11ac MCS
*/
enum mt76_testmode_tx_mode {
MT76_TM_TX_MODE_CCK,
MT76_TM_TX_MODE_OFDM,
MT76_TM_TX_MODE_HT,
MT76_TM_TX_MODE_VHT,
/* keep last */
NUM_MT76_TM_TX_MODES,
MT76_TM_TX_MODE_MAX = NUM_MT76_TM_TX_MODES - 1,
};
#endif

View File

@ -236,6 +236,14 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
struct ieee80211_hw *hw;
struct sk_buff_head list;
#ifdef CONFIG_NL80211_TESTMODE
if (skb == dev->test.tx_skb) {
dev->test.tx_done++;
if (dev->test.tx_queued == dev->test.tx_done)
wake_up(&dev->tx_wait);
}
#endif
if (!skb->prev) {
hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_free_txskb(hw, skb);
@ -259,6 +267,11 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
int qid = skb_get_queue_mapping(skb);
bool ext_phy = phy != &dev->phy;
if (mt76_testmode_enabled(dev)) {
ieee80211_free_txskb(phy->hw, skb);
return;
}
if (WARN_ON(qid >= MT_TXQ_PSD)) {
qid = MT_TXQ_BE;
skb_set_queue_mapping(skb, qid);
@ -579,6 +592,11 @@ void mt76_tx_tasklet(unsigned long data)
mt76_txq_schedule_all(&dev->phy);
if (dev->phy2)
mt76_txq_schedule_all(dev->phy2);
#ifdef CONFIG_NL80211_TESTMODE
if (dev->test.tx_pending)
mt76_testmode_tx_pending(dev);
#endif
}
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
@ -659,3 +677,32 @@ u8 mt76_ac_to_hwq(u8 ac)
return wmm_queue_map[ac];
}
EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
int mt76_skb_adjust_pad(struct sk_buff *skb)
{
struct sk_buff *iter, *last = skb;
u32 pad;
/* Add zero pad of 4 - 7 bytes */
pad = round_up(skb->len, 4) + 4 - skb->len;
/* First packet of a A-MSDU burst keeps track of the whole burst
* length, need to update length of it and the last packet.
*/
skb_walk_frags(skb, iter) {
last = iter;
if (!iter->next) {
skb->data_len += pad;
skb->len += pad;
break;
}
}
if (skb_pad(last, pad))
return -ENOMEM;
__skb_put(last, pad);
return 0;
}
EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);

View File

@ -672,17 +672,11 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
static void mt76u_rx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
struct mt76_queue *q;
int i;
rcu_read_lock();
for (i = 0; i < __MT_RXQ_MAX; i++) {
q = &dev->q_rx[i];
if (!q->ndesc)
continue;
mt76u_process_rx_queue(dev, q);
}
mt76_for_each_q_rx(dev, i)
mt76u_process_rx_queue(dev, &dev->q_rx[i]);
rcu_read_unlock();
}
@ -756,27 +750,19 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
static void mt76u_free_rx(struct mt76_dev *dev)
{
struct mt76_queue *q;
int i;
for (i = 0; i < __MT_RXQ_MAX; i++) {
q = &dev->q_rx[i];
if (!q->ndesc)
continue;
mt76u_free_rx_queue(dev, q);
}
mt76_for_each_q_rx(dev, i)
mt76u_free_rx_queue(dev, &dev->q_rx[i]);
}
void mt76u_stop_rx(struct mt76_dev *dev)
{
struct mt76_queue *q;
int i, j;
int i;
for (i = 0; i < __MT_RXQ_MAX; i++) {
q = &dev->q_rx[i];
if (!q->ndesc)
continue;
mt76_for_each_q_rx(dev, i) {
struct mt76_queue *q = &dev->q_rx[i];
int j;
for (j = 0; j < q->ndesc; j++)
usb_poison_urb(q->entry[j].urb);
@ -788,14 +774,11 @@ EXPORT_SYMBOL_GPL(mt76u_stop_rx);
int mt76u_resume_rx(struct mt76_dev *dev)
{
struct mt76_queue *q;
int i, j, err;
int i;
for (i = 0; i < __MT_RXQ_MAX; i++) {
q = &dev->q_rx[i];
if (!q->ndesc)
continue;
mt76_for_each_q_rx(dev, i) {
struct mt76_queue *q = &dev->q_rx[i];
int err, j;
for (j = 0; j < q->ndesc; j++)
usb_unpoison_urb(q->entry[j].urb);
@ -859,7 +842,7 @@ static void mt76u_tx_tasklet(unsigned long data)
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->usb.wq, &dev->usb.stat_work);
queue_work(dev->wq, &dev->usb.stat_work);
if (wake)
ieee80211_wake_queue(dev->hw, i);
}
@ -885,7 +868,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
}
if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
queue_work(usb->wq, &usb->stat_work);
queue_work(dev->wq, &usb->stat_work);
else
clear_bit(MT76_READING_STATS, &dev->phy.state);
}
@ -921,35 +904,6 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
return urb->num_sgs;
}
int mt76u_skb_dma_info(struct sk_buff *skb, u32 info)
{
struct sk_buff *iter, *last = skb;
u32 pad;
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
/* Add zero pad of 4 - 7 bytes */
pad = round_up(skb->len, 4) + 4 - skb->len;
/* First packet of a A-MSDU burst keeps track of the whole burst
* length, need to update length of it and the last packet.
*/
skb_walk_frags(skb, iter) {
last = iter;
if (!iter->next) {
skb->data_len += pad;
skb->len += pad;
break;
}
}
if (skb_pad(last, pad))
return -ENOMEM;
__skb_put(last, pad);
return 0;
}
EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
static int
mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
@ -1161,15 +1115,6 @@ static const struct mt76_queue_ops usb_queue_ops = {
.kick = mt76u_tx_kick,
};
void mt76u_deinit(struct mt76_dev *dev)
{
if (dev->usb.wq) {
destroy_workqueue(dev->usb.wq);
dev->usb.wq = NULL;
}
}
EXPORT_SYMBOL_GPL(mt76u_deinit);
int mt76u_init(struct mt76_dev *dev,
struct usb_interface *intf, bool ext)
{
@ -1192,10 +1137,6 @@ int mt76u_init(struct mt76_dev *dev,
tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
usb->wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
if (!usb->wq)
return -ENOMEM;
usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
if (usb->data_len < 32)
usb->data_len = 32;
@ -1219,7 +1160,8 @@ int mt76u_init(struct mt76_dev *dev,
return 0;
error:
mt76u_deinit(dev);
destroy_workqueue(dev->wq);
return err;
}
EXPORT_SYMBOL_GPL(mt76u_init);

View File

@ -13,7 +13,7 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
timeout /= 10;
do {
cur = dev->bus->rr(dev, offset) & mask;
cur = __mt76_rr(dev, offset) & mask;
if (cur == val)
return true;
@ -31,7 +31,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
timeout /= 10;
do {
cur = dev->bus->rr(dev, offset) & mask;
cur = __mt76_rr(dev, offset) & mask;
if (cur == val)
return true;

View File

@ -116,8 +116,10 @@ mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
int sent, ret;
u8 seq = 0;
if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
consume_skb(skb);
return 0;
}
mutex_lock(&dev->mcu.mutex);

View File

@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
#include <linux/of_irq.h>
@ -15,11 +16,8 @@
#define SDIO_MODALIAS "wilc1000_sdio"
#define SDIO_VENDOR_ID_WILC 0x0296
#define SDIO_DEVICE_ID_WILC 0x5347
static const struct sdio_device_id wilc_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_WILC, SDIO_DEVICE_ID_WILC) },
{ SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) },
{ },
};

View File

@ -446,8 +446,11 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
}
wiphy = qtnf_wiphy_allocate(bus, pdev);
if (!wiphy)
if (!wiphy) {
if (pdev)
platform_device_unregister(pdev);
return ERR_PTR(-ENOMEM);
}
mac = wiphy_priv(wiphy);

View File

@ -1834,8 +1834,7 @@ static struct pci_driver rt2400pci_driver = {
.id_table = rt2400pci_device_table,
.probe = rt2400pci_probe,
.remove = rt2x00pci_remove,
.suspend = rt2x00pci_suspend,
.resume = rt2x00pci_resume,
.driver.pm = &rt2x00pci_pm_ops,
};
module_pci_driver(rt2400pci_driver);

Some files were not shown because too many files have changed in this diff Show More