2012-12-20 21:13:19 +00:00
|
|
|
/*
|
2014-08-06 07:31:51 +00:00
|
|
|
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
|
2012-12-20 21:13:19 +00:00
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <net/ieee80211_radiotap.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/moduleparam.h>
|
2013-07-21 08:34:37 +00:00
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/ipv6.h>
|
|
|
|
#include <net/ipv6.h>
|
2014-01-14 18:31:26 +00:00
|
|
|
#include <linux/prefetch.h>
|
2012-12-20 21:13:19 +00:00
|
|
|
|
|
|
|
#include "wil6210.h"
|
|
|
|
#include "wmi.h"
|
|
|
|
#include "txrx.h"
|
2013-05-12 11:43:35 +00:00
|
|
|
#include "trace.h"
|
2012-12-20 21:13:19 +00:00
|
|
|
|
|
|
|
static bool rtap_include_phy_info;
|
|
|
|
module_param(rtap_include_phy_info, bool, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(rtap_include_phy_info,
|
|
|
|
" Include PHY info in the radiotap header, default - no");
|
|
|
|
|
2015-03-15 14:00:19 +00:00
|
|
|
bool rx_align_2;
|
|
|
|
module_param(rx_align_2, bool, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
|
|
|
|
|
|
|
|
static inline uint wil_rx_snaplen(void)
|
|
|
|
{
|
|
|
|
return rx_align_2 ? 6 : 0;
|
|
|
|
}
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
static inline int wil_vring_is_empty(struct vring *vring)
|
|
|
|
{
|
|
|
|
return vring->swhead == vring->swtail;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 wil_vring_next_tail(struct vring *vring)
|
|
|
|
{
|
|
|
|
return (vring->swtail + 1) % vring->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void wil_vring_advance_head(struct vring *vring, int n)
|
|
|
|
{
|
|
|
|
vring->swhead = (vring->swhead + n) % vring->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int wil_vring_is_full(struct vring *vring)
|
|
|
|
{
|
|
|
|
return wil_vring_next_tail(vring) == vring->swhead;
|
|
|
|
}
|
2014-09-10 13:34:34 +00:00
|
|
|
|
2015-02-15 12:02:34 +00:00
|
|
|
/* Used space in Tx Vring */
|
|
|
|
static inline int wil_vring_used_tx(struct vring *vring)
|
2012-12-20 21:13:19 +00:00
|
|
|
{
|
|
|
|
u32 swhead = vring->swhead;
|
|
|
|
u32 swtail = vring->swtail;
|
2015-02-15 12:02:34 +00:00
|
|
|
return (vring->size + swhead - swtail) % vring->size;
|
|
|
|
}
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2015-02-15 12:02:34 +00:00
|
|
|
/* Available space in Tx Vring */
|
|
|
|
static inline int wil_vring_avail_tx(struct vring *vring)
|
|
|
|
{
|
|
|
|
return vring->size - wil_vring_used_tx(vring) - 1;
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
|
2015-02-15 12:02:34 +00:00
|
|
|
/* wil_vring_wmark_low - low watermark for available descriptor space */
|
2014-05-27 11:45:46 +00:00
|
|
|
static inline int wil_vring_wmark_low(struct vring *vring)
|
|
|
|
{
|
|
|
|
return vring->size/8;
|
|
|
|
}
|
|
|
|
|
2015-02-15 12:02:34 +00:00
|
|
|
/* wil_vring_wmark_high - high watermark for available descriptor space */
|
2014-05-27 11:45:46 +00:00
|
|
|
static inline int wil_vring_wmark_high(struct vring *vring)
|
|
|
|
{
|
|
|
|
return vring->size/4;
|
|
|
|
}
|
|
|
|
|
2015-02-15 12:02:34 +00:00
|
|
|
/* wil_val_in_range - check if value in [min,max) */
|
|
|
|
static inline bool wil_val_in_range(int val, int min, int max)
|
|
|
|
{
|
|
|
|
return val >= min && val < max;
|
|
|
|
}
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
|
|
|
|
{
|
|
|
|
struct device *dev = wil_to_dev(wil);
|
|
|
|
size_t sz = vring->size * sizeof(vring->va[0]);
|
|
|
|
uint i;
|
|
|
|
|
2014-09-10 13:34:36 +00:00
|
|
|
wil_dbg_misc(wil, "%s()\n", __func__);
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
|
|
|
|
|
|
|
|
vring->swhead = 0;
|
|
|
|
vring->swtail = 0;
|
2013-07-11 15:03:40 +00:00
|
|
|
vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
|
2012-12-20 21:13:19 +00:00
|
|
|
if (!vring->ctx) {
|
|
|
|
vring->va = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2015-02-15 12:02:34 +00:00
|
|
|
/* vring->va should be aligned on its size rounded up to power of 2
|
2012-12-20 21:13:19 +00:00
|
|
|
* This is granted by the dma_alloc_coherent
|
|
|
|
*/
|
|
|
|
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
|
|
|
|
if (!vring->va) {
|
|
|
|
kfree(vring->ctx);
|
|
|
|
vring->ctx = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
/* initially, all descriptors are SW owned
|
|
|
|
* For Tx and Rx, ownership bit is at the same location, thus
|
|
|
|
* we can use any
|
|
|
|
*/
|
|
|
|
for (i = 0; i < vring->size; i++) {
|
2014-09-10 13:34:34 +00:00
|
|
|
volatile struct vring_tx_desc *_d = &vring->va[i].tx;
|
|
|
|
|
2013-05-12 11:43:37 +00:00
|
|
|
_d->dma.status = TX_DMA_STATUS_DU;
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
|
2014-05-27 11:45:49 +00:00
|
|
|
wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
|
|
|
|
vring->va, &vring->pa, vring->ctx);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-03-17 13:34:09 +00:00
|
|
|
static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
|
|
|
|
struct wil_ctx *ctx)
|
|
|
|
{
|
|
|
|
dma_addr_t pa = wil_desc_addr(&d->dma.addr);
|
|
|
|
u16 dmalen = le16_to_cpu(d->dma.length);
|
2014-09-10 13:34:34 +00:00
|
|
|
|
2014-03-17 13:34:09 +00:00
|
|
|
switch (ctx->mapped_as) {
|
|
|
|
case wil_mapped_as_single:
|
|
|
|
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
|
|
|
|
break;
|
|
|
|
case wil_mapped_as_page:
|
|
|
|
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
|
|
|
|
int tx)
|
|
|
|
{
|
|
|
|
struct device *dev = wil_to_dev(wil);
|
|
|
|
size_t sz = vring->size * sizeof(vring->va[0]);
|
|
|
|
|
2014-09-10 13:34:31 +00:00
|
|
|
if (tx) {
|
|
|
|
int vring_index = vring - wil->vring_tx;
|
|
|
|
|
|
|
|
wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
|
|
|
|
vring_index, vring->size, vring->va,
|
|
|
|
&vring->pa, vring->ctx);
|
|
|
|
} else {
|
|
|
|
wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
|
|
|
|
vring->size, vring->va,
|
|
|
|
&vring->pa, vring->ctx);
|
|
|
|
}
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
while (!wil_vring_is_empty(vring)) {
|
2013-05-12 11:43:37 +00:00
|
|
|
dma_addr_t pa;
|
2013-05-12 11:43:32 +00:00
|
|
|
u16 dmalen;
|
2013-07-11 15:03:40 +00:00
|
|
|
struct wil_ctx *ctx;
|
2013-05-12 11:43:37 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
if (tx) {
|
2013-05-12 11:43:37 +00:00
|
|
|
struct vring_tx_desc dd, *d = ⅆ
|
|
|
|
volatile struct vring_tx_desc *_d =
|
2012-12-20 21:13:19 +00:00
|
|
|
&vring->va[vring->swtail].tx;
|
2013-05-12 11:43:37 +00:00
|
|
|
|
2013-07-11 15:03:40 +00:00
|
|
|
ctx = &vring->ctx[vring->swtail];
|
2013-05-12 11:43:37 +00:00
|
|
|
*d = *_d;
|
2014-03-17 13:34:09 +00:00
|
|
|
wil_txdesc_unmap(dev, d, ctx);
|
2013-07-11 15:03:40 +00:00
|
|
|
if (ctx->skb)
|
|
|
|
dev_kfree_skb_any(ctx->skb);
|
2012-12-20 21:13:19 +00:00
|
|
|
vring->swtail = wil_vring_next_tail(vring);
|
|
|
|
} else { /* rx */
|
2013-05-12 11:43:37 +00:00
|
|
|
struct vring_rx_desc dd, *d = ⅆ
|
|
|
|
volatile struct vring_rx_desc *_d =
|
2013-07-11 15:03:38 +00:00
|
|
|
&vring->va[vring->swhead].rx;
|
2013-05-12 11:43:37 +00:00
|
|
|
|
2013-07-11 15:03:40 +00:00
|
|
|
ctx = &vring->ctx[vring->swhead];
|
2013-05-12 11:43:37 +00:00
|
|
|
*d = *_d;
|
|
|
|
pa = wil_desc_addr(&d->dma.addr);
|
2013-05-12 11:43:32 +00:00
|
|
|
dmalen = le16_to_cpu(d->dma.length);
|
|
|
|
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
|
2013-07-11 15:03:40 +00:00
|
|
|
kfree_skb(ctx->skb);
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_vring_advance_head(vring, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
|
|
|
|
kfree(vring->ctx);
|
|
|
|
vring->pa = 0;
|
|
|
|
vring->va = NULL;
|
|
|
|
vring->ctx = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Allocate one skb for Rx VRING
|
|
|
|
*
|
|
|
|
* Safe to call from IRQ
|
|
|
|
*/
|
|
|
|
static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
|
|
|
|
u32 i, int headroom)
|
|
|
|
{
|
|
|
|
struct device *dev = wil_to_dev(wil);
|
2015-03-15 14:00:19 +00:00
|
|
|
unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
|
2013-05-12 11:43:37 +00:00
|
|
|
struct vring_rx_desc dd, *d = ⅆ
|
2014-09-10 13:34:34 +00:00
|
|
|
volatile struct vring_rx_desc *_d = &vring->va[i].rx;
|
2012-12-20 21:13:19 +00:00
|
|
|
dma_addr_t pa;
|
|
|
|
struct sk_buff *skb = dev_alloc_skb(sz + headroom);
|
2014-09-10 13:34:34 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
if (unlikely(!skb))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
skb_reserve(skb, headroom);
|
|
|
|
skb_put(skb, sz);
|
|
|
|
|
|
|
|
pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
|
|
|
|
if (unlikely(dma_mapping_error(dev, pa))) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
|
2013-05-12 11:43:37 +00:00
|
|
|
wil_desc_addr_set(&d->dma.addr, pa);
|
2012-12-20 21:13:19 +00:00
|
|
|
/* ip_length don't care */
|
|
|
|
/* b11 don't care */
|
|
|
|
/* error don't care */
|
|
|
|
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
|
2013-05-12 11:43:32 +00:00
|
|
|
d->dma.length = cpu_to_le16(sz);
|
2013-05-12 11:43:37 +00:00
|
|
|
*_d = *d;
|
2013-07-11 15:03:40 +00:00
|
|
|
vring->ctx[i].skb = skb;
|
2012-12-20 21:13:19 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Adds radiotap header
|
|
|
|
*
|
|
|
|
* Any error indicated as "Bad FCS"
|
|
|
|
*
|
|
|
|
* Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
|
|
|
|
* - Rx descriptor: 32 bytes
|
|
|
|
* - Phy info
|
|
|
|
*/
|
|
|
|
static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
|
2013-04-18 11:33:50 +00:00
|
|
|
struct sk_buff *skb)
|
2012-12-20 21:13:19 +00:00
|
|
|
{
|
|
|
|
struct wireless_dev *wdev = wil->wdev;
|
|
|
|
struct wil6210_rtap {
|
|
|
|
struct ieee80211_radiotap_header rthdr;
|
|
|
|
/* fields should be in the order of bits in rthdr.it_present */
|
|
|
|
/* flags */
|
|
|
|
u8 flags;
|
|
|
|
/* channel */
|
|
|
|
__le16 chnl_freq __aligned(2);
|
|
|
|
__le16 chnl_flags;
|
|
|
|
/* MCS */
|
|
|
|
u8 mcs_present;
|
|
|
|
u8 mcs_flags;
|
|
|
|
u8 mcs_index;
|
|
|
|
} __packed;
|
|
|
|
struct wil6210_rtap_vendor {
|
|
|
|
struct wil6210_rtap rtap;
|
|
|
|
/* vendor */
|
|
|
|
u8 vendor_oui[3] __aligned(2);
|
|
|
|
u8 vendor_ns;
|
|
|
|
__le16 vendor_skip;
|
|
|
|
u8 vendor_data[0];
|
|
|
|
} __packed;
|
2013-04-18 11:33:50 +00:00
|
|
|
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
|
2012-12-20 21:13:19 +00:00
|
|
|
struct wil6210_rtap_vendor *rtap_vendor;
|
|
|
|
int rtap_len = sizeof(struct wil6210_rtap);
|
|
|
|
int phy_length = 0; /* phy info header size, bytes */
|
|
|
|
static char phy_data[128];
|
|
|
|
struct ieee80211_channel *ch = wdev->preset_chandef.chan;
|
|
|
|
|
|
|
|
if (rtap_include_phy_info) {
|
|
|
|
rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
|
|
|
|
/* calculate additional length */
|
|
|
|
if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
|
|
|
|
/**
|
|
|
|
* PHY info starts from 8-byte boundary
|
|
|
|
* there are 8-byte lines, last line may be partially
|
|
|
|
* written (HW bug), thus FW configures for last line
|
|
|
|
* to be excessive. Driver skips this last line.
|
|
|
|
*/
|
|
|
|
int len = min_t(int, 8 + sizeof(phy_data),
|
|
|
|
wil_rxdesc_phy_length(d));
|
2014-09-10 13:34:34 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
if (len > 8) {
|
|
|
|
void *p = skb_tail_pointer(skb);
|
|
|
|
void *pa = PTR_ALIGN(p, 8);
|
2014-09-10 13:34:34 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
if (skb_tailroom(skb) >= len + (pa - p)) {
|
|
|
|
phy_length = len - 8;
|
|
|
|
memcpy(phy_data, pa, phy_length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rtap_len += phy_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_headroom(skb) < rtap_len &&
|
|
|
|
pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
|
|
|
|
wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtap_vendor = (void *)skb_push(skb, rtap_len);
|
|
|
|
memset(rtap_vendor, 0, rtap_len);
|
|
|
|
|
|
|
|
rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
|
|
|
|
rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
|
|
|
|
rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
|
|
|
|
(1 << IEEE80211_RADIOTAP_FLAGS) |
|
|
|
|
(1 << IEEE80211_RADIOTAP_CHANNEL) |
|
|
|
|
(1 << IEEE80211_RADIOTAP_MCS));
|
|
|
|
if (d->dma.status & RX_DMA_STATUS_ERROR)
|
|
|
|
rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
|
|
|
|
|
|
|
|
rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
|
|
|
|
rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
|
|
|
|
|
|
|
|
rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
|
|
|
|
rtap_vendor->rtap.mcs_flags = 0;
|
|
|
|
rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
|
|
|
|
|
|
|
|
if (rtap_include_phy_info) {
|
|
|
|
rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
|
|
|
|
IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
|
|
|
|
/* OUI for Wilocity 04:ce:14 */
|
|
|
|
rtap_vendor->vendor_oui[0] = 0x04;
|
|
|
|
rtap_vendor->vendor_oui[1] = 0xce;
|
|
|
|
rtap_vendor->vendor_oui[2] = 0x14;
|
|
|
|
rtap_vendor->vendor_ns = 1;
|
|
|
|
/* Rx descriptor + PHY data */
|
|
|
|
rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
|
|
|
|
phy_length);
|
|
|
|
memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
|
|
|
|
memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
|
|
|
|
phy_length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* reap 1 frame from @swhead
|
|
|
|
*
|
2013-04-18 11:33:50 +00:00
|
|
|
* Rx descriptor copied to skb->cb
|
|
|
|
*
|
2012-12-20 21:13:19 +00:00
|
|
|
* Safe to call from IRQ
|
|
|
|
*/
|
|
|
|
static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
|
|
|
|
struct vring *vring)
|
|
|
|
{
|
|
|
|
struct device *dev = wil_to_dev(wil);
|
|
|
|
struct net_device *ndev = wil_to_ndev(wil);
|
2013-05-12 11:43:37 +00:00
|
|
|
volatile struct vring_rx_desc *_d;
|
|
|
|
struct vring_rx_desc *d;
|
2012-12-20 21:13:19 +00:00
|
|
|
struct sk_buff *skb;
|
|
|
|
dma_addr_t pa;
|
2015-03-15 14:00:19 +00:00
|
|
|
unsigned int snaplen = wil_rx_snaplen();
|
|
|
|
unsigned int sz = mtu_max + ETH_HLEN + snaplen;
|
2013-05-12 11:43:32 +00:00
|
|
|
u16 dmalen;
|
2012-12-20 21:13:19 +00:00
|
|
|
u8 ftype;
|
2014-02-27 14:20:49 +00:00
|
|
|
int cid;
|
2015-03-15 14:00:16 +00:00
|
|
|
int i = (int)vring->swhead;
|
2014-02-27 14:20:49 +00:00
|
|
|
struct wil_net_stats *stats;
|
|
|
|
|
2013-04-18 11:33:50 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
|
|
|
|
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(wil_vring_is_empty(vring)))
|
2012-12-20 21:13:19 +00:00
|
|
|
return NULL;
|
|
|
|
|
2015-03-15 14:00:16 +00:00
|
|
|
_d = &vring->va[i].rx;
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
|
2012-12-20 21:13:19 +00:00
|
|
|
/* it is not error, we just reached end of Rx done area */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-15 14:00:16 +00:00
|
|
|
skb = vring->ctx[i].skb;
|
|
|
|
vring->ctx[i].skb = NULL;
|
|
|
|
wil_vring_advance_head(vring, 1);
|
|
|
|
if (!skb) {
|
|
|
|
wil_err(wil, "No Rx skb at [%d]\n", i);
|
|
|
|
return NULL;
|
|
|
|
}
|
2013-05-12 11:43:37 +00:00
|
|
|
d = wil_skb_rxdesc(skb);
|
|
|
|
*d = *_d;
|
|
|
|
pa = wil_desc_addr(&d->dma.addr);
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
|
2013-05-12 11:43:37 +00:00
|
|
|
dmalen = le16_to_cpu(d->dma.length);
|
|
|
|
|
2015-03-15 14:00:16 +00:00
|
|
|
trace_wil6210_rx(i, d);
|
|
|
|
wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
|
2013-05-12 11:43:37 +00:00
|
|
|
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
|
|
|
|
(const void *)d, sizeof(*d), false);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(dmalen > sz)) {
|
2013-05-12 11:43:33 +00:00
|
|
|
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
|
2013-05-23 09:10:43 +00:00
|
|
|
kfree_skb(skb);
|
2013-05-12 11:43:33 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2013-05-12 11:43:32 +00:00
|
|
|
skb_trim(skb, dmalen);
|
2013-04-18 11:33:50 +00:00
|
|
|
|
2014-01-08 09:50:49 +00:00
|
|
|
prefetch(skb->data);
|
|
|
|
|
2013-05-12 11:43:34 +00:00
|
|
|
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
|
|
|
|
skb->data, skb_headlen(skb), false);
|
|
|
|
|
2014-02-27 14:20:49 +00:00
|
|
|
cid = wil_rxdesc_cid(d);
|
|
|
|
stats = &wil->sta[cid].stats;
|
|
|
|
stats->last_mcs_rx = wil_rxdesc_mcs(d);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
|
|
|
/* use radiotap header only if required */
|
|
|
|
if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
|
2013-04-18 11:33:50 +00:00
|
|
|
wil_rx_add_radiotap_header(wil, skb);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
|
|
|
/* no extra checks if in sniffer mode */
|
|
|
|
if (ndev->type != ARPHRD_ETHER)
|
|
|
|
return skb;
|
|
|
|
/*
|
|
|
|
* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
|
|
|
|
* Driver should recognize it by frame type, that is found
|
|
|
|
* in Rx descriptor. If type is not data, it is 802.11 frame as is
|
|
|
|
*/
|
2013-05-12 11:43:37 +00:00
|
|
|
ftype = wil_rxdesc_ftype(d) << 2;
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
|
2013-01-28 16:31:06 +00:00
|
|
|
wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
|
2012-12-20 21:13:19 +00:00
|
|
|
/* TODO: process it */
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-15 14:00:19 +00:00
|
|
|
if (unlikely(skb->len < ETH_HLEN + snaplen)) {
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_err(wil, "Short frame, len = %d\n", skb->len);
|
|
|
|
/* TODO: process it (i.e. BAR) */
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-07-21 08:34:37 +00:00
|
|
|
/* L4 IDENT is on when HW calculated checksum, check status
|
|
|
|
* and in case of error drop the packet
|
|
|
|
* higher stack layers will handle retransmission (if required)
|
|
|
|
*/
|
2015-02-15 12:02:33 +00:00
|
|
|
if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
|
2013-07-21 08:34:37 +00:00
|
|
|
/* L4 protocol identified, csum calculated */
|
2015-02-15 12:02:33 +00:00
|
|
|
if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
|
2013-07-21 08:34:37 +00:00
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
2013-08-13 12:25:32 +00:00
|
|
|
/* If HW reports bad checksum, let IP stack re-check it
|
|
|
|
* For example, HW don't understand Microsoft IP stack that
|
|
|
|
* mis-calculates TCP checksum - if it should be 0x0,
|
|
|
|
* it writes 0xffff in violation of RFC 1624
|
|
|
|
*/
|
2013-07-21 08:34:37 +00:00
|
|
|
}
|
|
|
|
|
2015-03-15 14:00:19 +00:00
|
|
|
if (snaplen) {
|
|
|
|
/* Packet layout
|
|
|
|
* +-------+-------+---------+------------+------+
|
|
|
|
* | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
|
|
|
|
* +-------+-------+---------+------------+------+
|
|
|
|
* Need to remove SNAP, shifting SA and DA forward
|
|
|
|
*/
|
|
|
|
memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
|
|
|
|
skb_pull(skb, snaplen);
|
|
|
|
}
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* allocate and fill up to @count buffers in rx ring
|
|
|
|
* buffers posted at @swtail
|
|
|
|
*/
|
|
|
|
static int wil_rx_refill(struct wil6210_priv *wil, int count)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = wil_to_ndev(wil);
|
|
|
|
struct vring *v = &wil->vring_rx;
|
|
|
|
u32 next_tail;
|
|
|
|
int rc = 0;
|
|
|
|
int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
|
|
|
|
WIL6210_RTAP_SIZE : 0;
|
|
|
|
|
|
|
|
for (; next_tail = wil_vring_next_tail(v),
|
|
|
|
(next_tail != v->swhead) && (count-- > 0);
|
|
|
|
v->swtail = next_tail) {
|
|
|
|
rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(rc)) {
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
|
|
|
|
rc, v->swtail);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pass Rx packet to the netif. Update statistics.
|
2013-05-12 11:43:36 +00:00
|
|
|
* Called in softirq context (NAPI poll).
|
2012-12-20 21:13:19 +00:00
|
|
|
*/
|
2014-02-27 14:20:44 +00:00
|
|
|
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
|
2012-12-20 21:13:19 +00:00
|
|
|
{
|
2015-03-08 13:42:02 +00:00
|
|
|
gro_result_t rc = GRO_NORMAL;
|
2014-02-27 14:20:49 +00:00
|
|
|
struct wil6210_priv *wil = ndev_to_wil(ndev);
|
2015-03-08 13:42:02 +00:00
|
|
|
struct wireless_dev *wdev = wil_to_wdev(wil);
|
2012-12-20 21:13:19 +00:00
|
|
|
unsigned int len = skb->len;
|
2014-02-27 14:20:49 +00:00
|
|
|
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
|
2015-03-15 14:00:23 +00:00
|
|
|
int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
|
2015-03-08 13:42:02 +00:00
|
|
|
struct ethhdr *eth = (void *)skb->data;
|
|
|
|
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
|
|
|
|
* is not suitable, need to look at data
|
|
|
|
*/
|
|
|
|
int mcast = is_multicast_ether_addr(eth->h_dest);
|
2014-02-27 14:20:49 +00:00
|
|
|
struct wil_net_stats *stats = &wil->sta[cid].stats;
|
2015-03-08 13:42:02 +00:00
|
|
|
struct sk_buff *xmit_skb = NULL;
|
|
|
|
static const char * const gro_res_str[] = {
|
|
|
|
[GRO_MERGED] = "GRO_MERGED",
|
|
|
|
[GRO_MERGED_FREE] = "GRO_MERGED_FREE",
|
|
|
|
[GRO_HELD] = "GRO_HELD",
|
|
|
|
[GRO_NORMAL] = "GRO_NORMAL",
|
|
|
|
[GRO_DROP] = "GRO_DROP",
|
|
|
|
};
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2013-01-28 16:31:02 +00:00
|
|
|
skb_orphan(skb);
|
|
|
|
|
2015-03-08 13:42:03 +00:00
|
|
|
if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
|
2015-03-08 13:42:02 +00:00
|
|
|
if (mcast) {
|
|
|
|
/* send multicast frames both to higher layers in
|
|
|
|
* local net stack and back to the wireless medium
|
|
|
|
*/
|
|
|
|
xmit_skb = skb_copy(skb, GFP_ATOMIC);
|
|
|
|
} else {
|
|
|
|
int xmit_cid = wil_find_cid(wil, eth->h_dest);
|
|
|
|
|
|
|
|
if (xmit_cid >= 0) {
|
|
|
|
/* The destination station is associated to
|
|
|
|
* this AP (in this VLAN), so send the frame
|
|
|
|
* directly to it and do not pass it to local
|
|
|
|
* net stack.
|
|
|
|
*/
|
|
|
|
xmit_skb = skb;
|
|
|
|
skb = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (xmit_skb) {
|
|
|
|
/* Send to wireless media and increase priority by 256 to
|
|
|
|
* keep the received priority instead of reclassifying
|
|
|
|
* the frame (see cfg80211_classify8021d).
|
|
|
|
*/
|
|
|
|
xmit_skb->dev = ndev;
|
|
|
|
xmit_skb->priority += 256;
|
|
|
|
xmit_skb->protocol = htons(ETH_P_802_3);
|
|
|
|
skb_reset_network_header(xmit_skb);
|
|
|
|
skb_reset_mac_header(xmit_skb);
|
|
|
|
wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
|
|
|
|
dev_queue_xmit(xmit_skb);
|
|
|
|
}
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2015-03-08 13:42:02 +00:00
|
|
|
if (skb) { /* deliver to local stack */
|
|
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, ndev);
|
|
|
|
rc = napi_gro_receive(&wil->napi_rx, skb);
|
|
|
|
wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
|
|
|
|
len, gro_res_str[rc]);
|
|
|
|
}
|
|
|
|
/* statistics. rc set to GRO_NORMAL for AP bridging */
|
2014-03-17 13:34:22 +00:00
|
|
|
if (unlikely(rc == GRO_DROP)) {
|
|
|
|
ndev->stats.rx_dropped++;
|
|
|
|
stats->rx_dropped++;
|
|
|
|
wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
|
|
|
|
} else {
|
2012-12-20 21:13:19 +00:00
|
|
|
ndev->stats.rx_packets++;
|
2014-02-27 14:20:49 +00:00
|
|
|
stats->rx_packets++;
|
2012-12-20 21:13:19 +00:00
|
|
|
ndev->stats.rx_bytes += len;
|
2014-02-27 14:20:49 +00:00
|
|
|
stats->rx_bytes += len;
|
2015-03-08 13:42:02 +00:00
|
|
|
if (mcast)
|
|
|
|
ndev->stats.multicast++;
|
2014-06-16 16:37:14 +00:00
|
|
|
}
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Proceed all completed skb's from Rx VRING
|
|
|
|
*
|
2013-05-12 11:43:36 +00:00
|
|
|
* Safe to call from NAPI poll, i.e. softirq with interrupts enabled
|
2012-12-20 21:13:19 +00:00
|
|
|
*/
|
2013-05-12 11:43:36 +00:00
|
|
|
void wil_rx_handle(struct wil6210_priv *wil, int *quota)
|
2012-12-20 21:13:19 +00:00
|
|
|
{
|
|
|
|
struct net_device *ndev = wil_to_ndev(wil);
|
|
|
|
struct vring *v = &wil->vring_rx;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(!v->va)) {
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
|
|
|
|
return;
|
|
|
|
}
|
2013-01-28 16:31:06 +00:00
|
|
|
wil_dbg_txrx(wil, "%s()\n", __func__);
|
2013-05-12 11:43:36 +00:00
|
|
|
while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
|
|
|
|
(*quota)--;
|
2012-12-20 21:13:19 +00:00
|
|
|
|
|
|
|
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
|
|
|
|
skb->dev = ndev;
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
skb->pkt_type = PACKET_OTHERHOST;
|
|
|
|
skb->protocol = htons(ETH_P_802_2);
|
2014-02-27 14:20:44 +00:00
|
|
|
wil_netif_rx_any(skb, ndev);
|
2012-12-20 21:13:19 +00:00
|
|
|
} else {
|
2014-12-23 07:47:21 +00:00
|
|
|
wil_rx_reorder(wil, skb);
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
wil_rx_refill(wil, v->size);
|
|
|
|
}
|
|
|
|
|
2014-12-01 13:35:02 +00:00
|
|
|
int wil_rx_init(struct wil6210_priv *wil, u16 size)
|
2012-12-20 21:13:19 +00:00
|
|
|
{
|
|
|
|
struct vring *vring = &wil->vring_rx;
|
|
|
|
int rc;
|
|
|
|
|
2014-09-10 13:34:36 +00:00
|
|
|
wil_dbg_misc(wil, "%s()\n", __func__);
|
|
|
|
|
2014-03-17 13:34:17 +00:00
|
|
|
if (vring->va) {
|
|
|
|
wil_err(wil, "Rx ring already allocated\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-12-01 13:35:02 +00:00
|
|
|
vring->size = size;
|
2012-12-20 21:13:19 +00:00
|
|
|
rc = wil_vring_alloc(wil, vring);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2013-01-28 16:30:59 +00:00
|
|
|
rc = wmi_rx_chain_add(wil, vring);
|
2012-12-20 21:13:19 +00:00
|
|
|
if (rc)
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
rc = wil_rx_refill(wil, vring->size);
|
|
|
|
if (rc)
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err_free:
|
|
|
|
wil_vring_free(wil, vring, 0);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void wil_rx_fini(struct wil6210_priv *wil)
|
|
|
|
{
|
|
|
|
struct vring *vring = &wil->vring_rx;
|
|
|
|
|
2014-09-10 13:34:36 +00:00
|
|
|
wil_dbg_misc(wil, "%s()\n", __func__);
|
|
|
|
|
2013-01-28 16:31:08 +00:00
|
|
|
if (vring->va)
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_vring_free(wil, vring, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
|
|
|
|
int cid, int tid)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct wmi_vring_cfg_cmd cmd = {
|
|
|
|
.action = cpu_to_le32(WMI_VRING_CMD_ADD),
|
|
|
|
.vring_cfg = {
|
|
|
|
.tx_sw_ring = {
|
2014-10-28 14:51:27 +00:00
|
|
|
.max_mpdu_size =
|
2014-12-23 07:47:11 +00:00
|
|
|
cpu_to_le16(wil_mtu2macbuf(mtu_max)),
|
2013-04-18 11:33:51 +00:00
|
|
|
.ring_size = cpu_to_le16(size),
|
2012-12-20 21:13:19 +00:00
|
|
|
},
|
|
|
|
.ringid = id,
|
2014-03-17 13:34:05 +00:00
|
|
|
.cidxtid = mk_cidxtid(cid, tid),
|
2012-12-20 21:13:19 +00:00
|
|
|
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
|
|
|
|
.mac_ctrl = 0,
|
|
|
|
.to_resolution = 0,
|
wil6210: ADDBA/DELBA flows
Introduce BACK establishment procedures; decision logic is not implemented
yet; debugfs entry 'addba' used to manually trigger addba/delba for ringid 0.
debugfs usage:
to establish BACK with agg_wsize 16:
echo 16 > /sys/kernel/debug/ieee80211/phy0/wil6210/addba
to delete BACK:
echo 0 > /sys/kernel/debug/ieee80211/phy0/wil6210/addba
to change agg_wsize, one need to delete BACK and establish it anew
ADDBA flow for:
- originator
Tx side (initiator) sends WMI_VRING_BA_EN_CMDID providing
agg_wsize and timeout parameters.
Eventually, it gets event confirming BACK agreement - WMI_BA_STATUS_EVENTID
with negotiated parameters. On this event, update Tx vring data
(struct vring_tx_data) and display BACK parameters on debugfs
- recipient
Rx side (recipient) firmware informs driver about ADDBA with
WMI_RCP_ADDBA_REQ_EVENTID, driver process it in service work
queue wq_service. It adjusts parameters and sends response
with WMI_RCP_ADDBA_RESP_CMDID, and final confirmation provided
by firmware with WMI_ADDBA_RESP_SENT_EVENTID. In case of success,
driver updates Rx BACK reorder buffer.
policy for BACK parameters:
- aggregation size (agg_wsize * MPDUsize)) to not exceed 64Kbytes
DELBA flow for:
- originator
driver decides to terminate BACK, it sends WMI_VRING_BA_DIS_CMDID
and updates struct vring_tx_data associated with vring; ignore
WMI_DELBA_EVENTID.
- recipient
firmware informs driver with WMI_DELBA_EVENTID,
driver deletes correspondent reorder buffer
ADDBA request processing requires sending WMI command, therefore
it is processed in work queue context. Same work queue used as for
connect, it get renamed to wq_service
Signed-off-by: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
2014-12-23 07:47:03 +00:00
|
|
|
.agg_max_wsize = 0,
|
2012-12-20 21:13:19 +00:00
|
|
|
.schd_params = {
|
|
|
|
.priority = cpu_to_le16(0),
|
|
|
|
.timeslot_us = cpu_to_le16(0xfff),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct {
|
|
|
|
struct wil6210_mbox_hdr_wmi wmi;
|
|
|
|
struct wmi_vring_cfg_done_event cmd;
|
|
|
|
} __packed reply;
|
|
|
|
struct vring *vring = &wil->vring_tx[id];
|
2014-03-17 13:34:25 +00:00
|
|
|
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2014-09-10 13:34:45 +00:00
|
|
|
wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
|
|
|
|
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
|
2014-09-10 13:34:36 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
if (vring->va) {
|
|
|
|
wil_err(wil, "Tx ring [%d] already allocated\n", id);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-03-17 13:34:25 +00:00
|
|
|
memset(txdata, 0, sizeof(*txdata));
|
2015-02-01 08:55:13 +00:00
|
|
|
spin_lock_init(&txdata->lock);
|
2012-12-20 21:13:19 +00:00
|
|
|
vring->size = size;
|
|
|
|
rc = wil_vring_alloc(wil, vring);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
2014-02-27 14:20:51 +00:00
|
|
|
wil->vring2cid_tid[id][0] = cid;
|
|
|
|
wil->vring2cid_tid[id][1] = tid;
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
|
|
|
|
|
|
|
|
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
|
|
|
|
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
|
|
|
|
if (rc)
|
|
|
|
goto out_free;
|
|
|
|
|
2013-03-13 12:12:50 +00:00
|
|
|
if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_err(wil, "Tx config failed, status 0x%02x\n",
|
|
|
|
reply.cmd.status);
|
2013-01-28 16:31:09 +00:00
|
|
|
rc = -EINVAL;
|
2012-12-20 21:13:19 +00:00
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
|
|
|
|
|
2014-03-17 13:34:25 +00:00
|
|
|
txdata->enabled = 1;
|
2014-12-23 07:47:05 +00:00
|
|
|
if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
|
|
|
|
wil_addba_tx_request(wil, id, agg_wsize);
|
2014-03-17 13:34:25 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
return 0;
|
|
|
|
out_free:
|
|
|
|
wil_vring_free(wil, vring, 1);
|
|
|
|
out:
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-03-15 14:00:23 +00:00
|
|
|
int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct wmi_bcast_vring_cfg_cmd cmd = {
|
|
|
|
.action = cpu_to_le32(WMI_VRING_CMD_ADD),
|
|
|
|
.vring_cfg = {
|
|
|
|
.tx_sw_ring = {
|
|
|
|
.max_mpdu_size =
|
|
|
|
cpu_to_le16(wil_mtu2macbuf(mtu_max)),
|
|
|
|
.ring_size = cpu_to_le16(size),
|
|
|
|
},
|
|
|
|
.ringid = id,
|
|
|
|
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct {
|
|
|
|
struct wil6210_mbox_hdr_wmi wmi;
|
|
|
|
struct wmi_vring_cfg_done_event cmd;
|
|
|
|
} __packed reply;
|
|
|
|
struct vring *vring = &wil->vring_tx[id];
|
|
|
|
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
|
|
|
|
|
|
|
|
wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
|
|
|
|
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
|
|
|
|
|
|
|
|
if (vring->va) {
|
|
|
|
wil_err(wil, "Tx ring [%d] already allocated\n", id);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(txdata, 0, sizeof(*txdata));
|
|
|
|
spin_lock_init(&txdata->lock);
|
|
|
|
vring->size = size;
|
|
|
|
rc = wil_vring_alloc(wil, vring);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
|
|
|
|
wil->vring2cid_tid[id][1] = 0; /* TID */
|
|
|
|
|
|
|
|
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
|
|
|
|
|
|
|
|
rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
|
|
|
|
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
|
|
|
|
if (rc)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
|
|
|
|
wil_err(wil, "Tx config failed, status 0x%02x\n",
|
|
|
|
reply.cmd.status);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
|
|
|
|
|
|
|
|
txdata->enabled = 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
out_free:
|
|
|
|
wil_vring_free(wil, vring, 1);
|
|
|
|
out:
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
|
|
|
|
{
|
|
|
|
struct vring *vring = &wil->vring_tx[id];
|
2014-12-23 07:47:04 +00:00
|
|
|
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2014-03-17 13:34:25 +00:00
|
|
|
WARN_ON(!mutex_is_locked(&wil->mutex));
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
if (!vring->va)
|
|
|
|
return;
|
|
|
|
|
2014-09-10 13:34:36 +00:00
|
|
|
wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
|
|
|
|
|
2015-02-01 08:55:13 +00:00
|
|
|
spin_lock_bh(&txdata->lock);
|
|
|
|
txdata->enabled = 0; /* no Tx can be in progress or start anew */
|
|
|
|
spin_unlock_bh(&txdata->lock);
|
2014-03-17 13:34:25 +00:00
|
|
|
/* make sure NAPI won't touch this vring */
|
2014-12-23 07:47:14 +00:00
|
|
|
if (test_bit(wil_status_napi_en, wil->status))
|
2014-03-17 13:34:25 +00:00
|
|
|
napi_synchronize(&wil->napi_tx);
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_vring_free(wil, vring, 1);
|
2014-12-23 07:47:04 +00:00
|
|
|
memset(txdata, 0, sizeof(*txdata));
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
|
2015-03-15 14:00:23 +00:00
|
|
|
static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
|
2012-12-20 21:13:19 +00:00
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
2014-02-27 14:20:43 +00:00
|
|
|
int i;
|
|
|
|
struct ethhdr *eth = (void *)skb->data;
|
|
|
|
int cid = wil_find_cid(wil, eth->h_dest);
|
|
|
|
|
|
|
|
if (cid < 0)
|
|
|
|
return NULL;
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2014-03-17 13:34:06 +00:00
|
|
|
if (!wil->sta[cid].data_port_open &&
|
|
|
|
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
|
|
|
|
return NULL;
|
|
|
|
|
2014-02-27 14:20:43 +00:00
|
|
|
/* TODO: fix for multiple TID */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
|
|
|
|
if (wil->vring2cid_tid[i][0] == cid) {
|
|
|
|
struct vring *v = &wil->vring_tx[i];
|
2014-09-10 13:34:34 +00:00
|
|
|
|
2014-02-27 14:20:43 +00:00
|
|
|
wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
|
|
|
|
__func__, eth->h_dest, i);
|
|
|
|
if (v->va) {
|
|
|
|
return v;
|
|
|
|
} else {
|
|
|
|
wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-12-20 21:13:19 +00:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-02-27 14:20:46 +00:00
|
|
|
static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|
|
|
struct sk_buff *skb);
|
2014-12-23 07:47:15 +00:00
|
|
|
|
|
|
|
static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct vring *v;
|
|
|
|
int i;
|
|
|
|
u8 cid;
|
|
|
|
|
|
|
|
/* In the STA mode, it is expected to have only 1 VRING
|
|
|
|
* for the AP we connected to.
|
|
|
|
* find 1-st vring and see whether it is eligible for data
|
|
|
|
*/
|
|
|
|
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
|
|
|
|
v = &wil->vring_tx[i];
|
|
|
|
if (!v->va)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
cid = wil->vring2cid_tid[i][0];
|
2015-03-15 14:00:23 +00:00
|
|
|
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
|
|
|
|
continue;
|
|
|
|
|
2014-12-23 07:47:15 +00:00
|
|
|
if (!wil->sta[cid].data_port_open &&
|
|
|
|
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
|
|
|
|
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
wil_dbg_txrx(wil, "Tx while no vrings active?\n");
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-15 14:00:24 +00:00
|
|
|
/* Use one of 2 strategies:
|
|
|
|
*
|
|
|
|
* 1. New (real broadcast):
|
|
|
|
* use dedicated broadcast vring
|
|
|
|
* 2. Old (pseudo-DMS):
|
|
|
|
* Find 1-st vring and return it;
|
|
|
|
* duplicate skb and send it to other active vrings;
|
|
|
|
* in all cases override dest address to unicast peer's address
|
|
|
|
* Use old strategy when new is not supported yet:
|
|
|
|
* - for PBSS
|
|
|
|
* - for secure link
|
|
|
|
*/
|
|
|
|
static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
|
|
|
|
struct sk_buff *skb)
|
2014-02-27 14:20:46 +00:00
|
|
|
{
|
2015-03-15 14:00:23 +00:00
|
|
|
struct vring *v;
|
|
|
|
int i = wil->bcast_vring;
|
2014-03-17 13:34:06 +00:00
|
|
|
|
2015-03-15 14:00:23 +00:00
|
|
|
if (i < 0)
|
|
|
|
return NULL;
|
|
|
|
v = &wil->vring_tx[i];
|
|
|
|
if (!v->va)
|
|
|
|
return NULL;
|
2014-02-27 14:20:46 +00:00
|
|
|
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2015-03-15 14:00:24 +00:00
|
|
|
static void wil_set_da_for_vring(struct wil6210_priv *wil,
|
|
|
|
struct sk_buff *skb, int vring_index)
|
|
|
|
{
|
|
|
|
struct ethhdr *eth = (void *)skb->data;
|
|
|
|
int cid = wil->vring2cid_tid[vring_index][0];
|
|
|
|
|
|
|
|
ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct vring *v, *v2;
|
|
|
|
struct sk_buff *skb2;
|
|
|
|
int i;
|
|
|
|
u8 cid;
|
|
|
|
struct ethhdr *eth = (void *)skb->data;
|
|
|
|
char *src = eth->h_source;
|
|
|
|
|
|
|
|
/* find 1-st vring eligible for data */
|
|
|
|
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
|
|
|
|
v = &wil->vring_tx[i];
|
|
|
|
if (!v->va)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
cid = wil->vring2cid_tid[i][0];
|
|
|
|
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
|
|
|
|
continue;
|
|
|
|
if (!wil->sta[cid].data_port_open)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* don't Tx back to source when re-routing Rx->Tx at the AP */
|
|
|
|
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
|
|
|
|
wil_dbg_txrx(wil, "Tx while no vrings active?\n");
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
found:
|
|
|
|
wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
|
|
|
|
wil_set_da_for_vring(wil, skb, i);
|
|
|
|
|
|
|
|
/* find other active vrings and duplicate skb for each */
|
|
|
|
for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
|
|
|
|
v2 = &wil->vring_tx[i];
|
|
|
|
if (!v2->va)
|
|
|
|
continue;
|
|
|
|
cid = wil->vring2cid_tid[i][0];
|
|
|
|
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
|
|
|
|
continue;
|
|
|
|
if (!wil->sta[cid].data_port_open)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
skb2 = skb_copy(skb, GFP_ATOMIC);
|
|
|
|
if (skb2) {
|
|
|
|
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
|
|
|
|
wil_set_da_for_vring(wil, skb2, i);
|
|
|
|
wil_tx_vring(wil, v2, skb2);
|
|
|
|
} else {
|
|
|
|
wil_err(wil, "skb_copy failed\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct wireless_dev *wdev = wil->wdev;
|
|
|
|
|
|
|
|
if (wdev->iftype != NL80211_IFTYPE_AP)
|
|
|
|
return wil_find_tx_bcast_2(wil, skb);
|
|
|
|
|
|
|
|
if (wil->privacy)
|
|
|
|
return wil_find_tx_bcast_2(wil, skb);
|
|
|
|
|
|
|
|
return wil_find_tx_bcast_1(wil, skb);
|
|
|
|
}
|
|
|
|
|
2013-06-23 09:59:34 +00:00
|
|
|
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
|
|
|
|
int vring_index)
|
2012-12-20 21:13:19 +00:00
|
|
|
{
|
2013-05-12 11:43:37 +00:00
|
|
|
wil_desc_addr_set(&d->dma.addr, pa);
|
2012-12-20 21:13:19 +00:00
|
|
|
d->dma.ip_length = 0;
|
|
|
|
/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
|
|
|
|
d->dma.b11 = 0/*14 | BIT(7)*/;
|
|
|
|
d->dma.error = 0;
|
|
|
|
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
|
2013-05-12 11:43:32 +00:00
|
|
|
d->dma.length = cpu_to_le16((u16)len);
|
2013-06-23 09:59:34 +00:00
|
|
|
d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
|
2012-12-20 21:13:19 +00:00
|
|
|
d->mac.d[0] = 0;
|
|
|
|
d->mac.d[1] = 0;
|
|
|
|
d->mac.d[2] = 0;
|
|
|
|
d->mac.ucode_cmd = 0;
|
|
|
|
/* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
|
|
|
|
d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
|
|
|
|
(1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-03-17 13:34:08 +00:00
|
|
|
static inline
|
|
|
|
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
|
|
|
|
{
|
|
|
|
d->mac.d[2] |= ((nr_frags + 1) <<
|
|
|
|
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
|
|
|
|
}
|
|
|
|
|
2013-07-21 08:34:37 +00:00
|
|
|
static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
|
2014-09-10 13:34:34 +00:00
|
|
|
struct vring_tx_desc *d,
|
|
|
|
struct sk_buff *skb)
|
2013-07-21 08:34:37 +00:00
|
|
|
{
|
|
|
|
int protocol;
|
|
|
|
|
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
|
|
return 0;
|
|
|
|
|
2014-01-08 09:50:48 +00:00
|
|
|
d->dma.b11 = ETH_HLEN; /* MAC header length */
|
|
|
|
|
2013-07-21 08:34:37 +00:00
|
|
|
switch (skb->protocol) {
|
|
|
|
case cpu_to_be16(ETH_P_IP):
|
|
|
|
protocol = ip_hdr(skb)->protocol;
|
2014-01-08 09:50:48 +00:00
|
|
|
d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
|
2013-07-21 08:34:37 +00:00
|
|
|
break;
|
|
|
|
case cpu_to_be16(ETH_P_IPV6):
|
|
|
|
protocol = ipv6_hdr(skb)->nexthdr;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (protocol) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
|
|
|
|
/* L4 header len: TCP header length */
|
|
|
|
d->dma.d0 |=
|
|
|
|
(tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
/* L4 header len: UDP header length */
|
|
|
|
d->dma.d0 |=
|
|
|
|
(sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
d->dma.ip_length = skb_network_header_len(skb);
|
|
|
|
/* Enable TCP/UDP checksum */
|
|
|
|
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
|
|
|
|
/* Calculate pseudo-header */
|
|
|
|
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-01 08:55:13 +00:00
|
|
|
static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|
|
|
struct sk_buff *skb)
|
2012-12-20 21:13:19 +00:00
|
|
|
{
|
|
|
|
struct device *dev = wil_to_dev(wil);
|
2013-05-12 11:43:37 +00:00
|
|
|
struct vring_tx_desc dd, *d = ⅆ
|
|
|
|
volatile struct vring_tx_desc *_d;
|
2012-12-20 21:13:19 +00:00
|
|
|
u32 swhead = vring->swhead;
|
|
|
|
int avail = wil_vring_avail_tx(vring);
|
|
|
|
int nr_frags = skb_shinfo(skb)->nr_frags;
|
2013-07-21 08:34:37 +00:00
|
|
|
uint f = 0;
|
2012-12-20 21:13:19 +00:00
|
|
|
int vring_index = vring - wil->vring_tx;
|
2014-06-16 16:37:05 +00:00
|
|
|
struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
|
2012-12-20 21:13:19 +00:00
|
|
|
uint i = swhead;
|
|
|
|
dma_addr_t pa;
|
2015-02-15 12:02:34 +00:00
|
|
|
int used;
|
2015-03-15 14:00:23 +00:00
|
|
|
bool mcast = (vring_index == wil->bcast_vring);
|
|
|
|
uint len = skb_headlen(skb);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2013-01-28 16:31:06 +00:00
|
|
|
wil_dbg_txrx(wil, "%s()\n", __func__);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2015-02-01 08:55:13 +00:00
|
|
|
if (unlikely(!txdata->enabled))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(avail < 1 + nr_frags)) {
|
2014-12-01 13:36:03 +00:00
|
|
|
wil_err_ratelimited(wil,
|
2015-02-01 08:55:14 +00:00
|
|
|
"Tx ring[%2d] full. No space for %d fragments\n",
|
|
|
|
vring_index, 1 + nr_frags);
|
2012-12-20 21:13:19 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-09-10 13:34:34 +00:00
|
|
|
_d = &vring->va[i].tx;
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2014-09-10 13:34:34 +00:00
|
|
|
pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2015-02-01 08:55:14 +00:00
|
|
|
wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
|
|
|
|
skb_headlen(skb), skb->data, &pa);
|
2013-01-28 16:31:06 +00:00
|
|
|
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
|
2012-12-20 21:13:19 +00:00
|
|
|
skb->data, skb_headlen(skb), false);
|
|
|
|
|
|
|
|
if (unlikely(dma_mapping_error(dev, pa)))
|
|
|
|
return -EINVAL;
|
2014-03-17 13:34:09 +00:00
|
|
|
vring->ctx[i].mapped_as = wil_mapped_as_single;
|
2012-12-20 21:13:19 +00:00
|
|
|
/* 1-st segment */
|
2015-03-15 14:00:23 +00:00
|
|
|
wil_tx_desc_map(d, pa, len, vring_index);
|
|
|
|
if (unlikely(mcast)) {
|
|
|
|
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
|
|
|
|
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
|
|
|
|
/* set MCS 1 */
|
|
|
|
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
|
|
|
|
/* packet mode 2 */
|
|
|
|
d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
|
|
|
|
(2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
|
|
|
|
}
|
|
|
|
}
|
2013-07-21 08:34:37 +00:00
|
|
|
/* Process TCP/UDP checksum offloading */
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
|
2015-02-01 08:55:14 +00:00
|
|
|
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
|
2013-07-21 08:34:37 +00:00
|
|
|
vring_index);
|
|
|
|
goto dma_error;
|
|
|
|
}
|
|
|
|
|
2014-03-17 13:34:08 +00:00
|
|
|
vring->ctx[i].nr_frags = nr_frags;
|
|
|
|
wil_tx_desc_set_nr_frags(d, nr_frags);
|
2013-05-12 11:43:37 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
/* middle segments */
|
2013-07-21 08:34:37 +00:00
|
|
|
for (; f < nr_frags; f++) {
|
2012-12-20 21:13:19 +00:00
|
|
|
const struct skb_frag_struct *frag =
|
|
|
|
&skb_shinfo(skb)->frags[f];
|
|
|
|
int len = skb_frag_size(frag);
|
2014-09-10 13:34:34 +00:00
|
|
|
|
2015-02-01 08:55:12 +00:00
|
|
|
*_d = *d;
|
2015-02-01 08:55:14 +00:00
|
|
|
wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
|
|
|
|
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
|
|
|
|
(const void *)d, sizeof(*d), false);
|
2012-12-20 21:13:19 +00:00
|
|
|
i = (swhead + f + 1) % vring->size;
|
2014-09-10 13:34:34 +00:00
|
|
|
_d = &vring->va[i].tx;
|
2012-12-20 21:13:19 +00:00
|
|
|
pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
|
2014-09-10 13:34:34 +00:00
|
|
|
DMA_TO_DEVICE);
|
2012-12-20 21:13:19 +00:00
|
|
|
if (unlikely(dma_mapping_error(dev, pa)))
|
|
|
|
goto dma_error;
|
2014-03-17 13:34:09 +00:00
|
|
|
vring->ctx[i].mapped_as = wil_mapped_as_page;
|
2013-06-23 09:59:34 +00:00
|
|
|
wil_tx_desc_map(d, pa, len, vring_index);
|
2014-03-17 13:34:08 +00:00
|
|
|
/* no need to check return code -
|
|
|
|
* if it succeeded for 1-st descriptor,
|
|
|
|
* it will succeed here too
|
|
|
|
*/
|
|
|
|
wil_tx_desc_offload_cksum_set(wil, d, skb);
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
/* for the last seg only */
|
|
|
|
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
|
2013-06-23 09:59:35 +00:00
|
|
|
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
|
2012-12-20 21:13:19 +00:00
|
|
|
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
|
2013-05-12 11:43:37 +00:00
|
|
|
*_d = *d;
|
2015-02-01 08:55:14 +00:00
|
|
|
wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
|
|
|
|
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
|
|
|
|
(const void *)d, sizeof(*d), false);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2013-07-11 15:03:41 +00:00
|
|
|
/* hold reference to skb
|
|
|
|
* to prevent skb release before accounting
|
|
|
|
* in case of immediate "tx done"
|
|
|
|
*/
|
|
|
|
vring->ctx[i].skb = skb_get(skb);
|
|
|
|
|
2015-02-15 12:02:34 +00:00
|
|
|
/* performance monitoring */
|
|
|
|
used = wil_vring_used_tx(vring);
|
|
|
|
if (wil_val_in_range(vring_idle_trsh,
|
|
|
|
used, used + nr_frags + 1)) {
|
2014-06-16 16:37:05 +00:00
|
|
|
txdata->idle += get_cycles() - txdata->last_idle;
|
2015-02-15 12:02:34 +00:00
|
|
|
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
|
|
|
|
vring_index, used, used + nr_frags + 1);
|
|
|
|
}
|
2014-06-16 16:37:05 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
/* advance swhead */
|
|
|
|
wil_vring_advance_head(vring, nr_frags + 1);
|
2015-02-01 08:55:14 +00:00
|
|
|
wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
|
|
|
|
vring->swhead);
|
2013-05-12 11:43:35 +00:00
|
|
|
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
|
2012-12-20 21:13:19 +00:00
|
|
|
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
dma_error:
|
|
|
|
/* unmap what we have mapped */
|
2013-07-21 08:34:36 +00:00
|
|
|
nr_frags = f + 1; /* frags mapped + one for skb head */
|
|
|
|
for (f = 0; f < nr_frags; f++) {
|
|
|
|
struct wil_ctx *ctx;
|
2013-05-12 11:43:32 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
i = (swhead + f) % vring->size;
|
2013-07-21 08:34:36 +00:00
|
|
|
ctx = &vring->ctx[i];
|
2014-09-10 13:34:34 +00:00
|
|
|
_d = &vring->va[i].tx;
|
2013-05-12 11:43:37 +00:00
|
|
|
*d = *_d;
|
|
|
|
_d->dma.status = TX_DMA_STATUS_DU;
|
2014-03-17 13:34:09 +00:00
|
|
|
wil_txdesc_unmap(dev, d, ctx);
|
2013-07-11 15:03:40 +00:00
|
|
|
|
|
|
|
if (ctx->skb)
|
|
|
|
dev_kfree_skb_any(ctx->skb);
|
|
|
|
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-02-01 08:55:13 +00:00
|
|
|
static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int vring_index = vring - wil->vring_tx;
|
|
|
|
struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
spin_lock(&txdata->lock);
|
|
|
|
rc = __wil_tx_vring(wil, vring, skb);
|
|
|
|
spin_unlock(&txdata->lock);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct wil6210_priv *wil = ndev_to_wil(ndev);
|
2014-02-27 14:20:43 +00:00
|
|
|
struct ethhdr *eth = (void *)skb->data;
|
2015-03-15 14:00:23 +00:00
|
|
|
bool bcast = is_multicast_ether_addr(eth->h_dest);
|
2012-12-20 21:13:19 +00:00
|
|
|
struct vring *vring;
|
2014-03-17 13:34:11 +00:00
|
|
|
static bool pr_once_fw;
|
2012-12-20 21:13:19 +00:00
|
|
|
int rc;
|
|
|
|
|
2013-01-28 16:31:06 +00:00
|
|
|
wil_dbg_txrx(wil, "%s()\n", __func__);
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
|
2014-03-17 13:34:11 +00:00
|
|
|
if (!pr_once_fw) {
|
|
|
|
wil_err(wil, "FW not ready\n");
|
|
|
|
pr_once_fw = true;
|
|
|
|
}
|
2012-12-20 21:13:19 +00:00
|
|
|
goto drop;
|
|
|
|
}
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_err(wil, "FW not connected\n");
|
|
|
|
goto drop;
|
|
|
|
}
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_err(wil, "Xmit in monitor mode not supported\n");
|
|
|
|
goto drop;
|
|
|
|
}
|
2014-03-17 13:34:11 +00:00
|
|
|
pr_once_fw = false;
|
2013-06-09 06:12:54 +00:00
|
|
|
|
|
|
|
/* find vring */
|
2014-12-23 07:47:15 +00:00
|
|
|
if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
|
|
|
|
/* in STA mode (ESS), all to same VRING */
|
|
|
|
vring = wil_find_tx_vring_sta(wil, skb);
|
|
|
|
} else { /* direct communication, find matching VRING */
|
2015-03-15 14:00:23 +00:00
|
|
|
vring = bcast ? wil_find_tx_bcast(wil, skb) :
|
|
|
|
wil_find_tx_ucast(wil, skb);
|
2014-12-23 07:47:15 +00:00
|
|
|
}
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(!vring)) {
|
2014-06-16 16:37:15 +00:00
|
|
|
wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
|
2013-06-09 06:12:54 +00:00
|
|
|
goto drop;
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
2013-06-09 06:12:54 +00:00
|
|
|
/* set up vring entry */
|
|
|
|
rc = wil_tx_vring(wil, vring, skb);
|
|
|
|
|
2014-03-17 13:34:09 +00:00
|
|
|
/* do we still have enough room in the vring? */
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
|
2014-03-17 13:34:09 +00:00
|
|
|
netif_tx_stop_all_queues(wil_to_ndev(wil));
|
2014-06-16 16:37:23 +00:00
|
|
|
wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
|
|
|
|
}
|
2014-03-17 13:34:09 +00:00
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
switch (rc) {
|
|
|
|
case 0:
|
2013-01-28 16:31:00 +00:00
|
|
|
/* statistics will be updated on the tx_complete */
|
2012-12-20 21:13:19 +00:00
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
case -ENOMEM:
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
default:
|
2013-01-28 16:31:07 +00:00
|
|
|
break; /* goto drop; */
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
drop:
|
|
|
|
ndev->stats.tx_dropped++;
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
|
|
|
return NET_XMIT_DROP;
|
|
|
|
}
|
|
|
|
|
2015-01-25 08:52:49 +00:00
|
|
|
static inline bool wil_need_txstat(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ethhdr *eth = (void *)skb->data;
|
|
|
|
|
|
|
|
return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
|
|
|
|
(skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
|
|
|
|
{
|
|
|
|
if (unlikely(wil_need_txstat(skb)))
|
|
|
|
skb_complete_wifi_ack(skb, acked);
|
|
|
|
else
|
|
|
|
acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
|
|
|
|
}
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
/**
|
|
|
|
* Clean up transmitted skb's from the Tx VRING
|
|
|
|
*
|
2013-05-12 11:43:36 +00:00
|
|
|
* Return number of descriptors cleared
|
|
|
|
*
|
2012-12-20 21:13:19 +00:00
|
|
|
* Safe to call from IRQ
|
|
|
|
*/
|
2013-05-12 11:43:36 +00:00
|
|
|
int wil_tx_complete(struct wil6210_priv *wil, int ringid)
|
2012-12-20 21:13:19 +00:00
|
|
|
{
|
2013-01-28 16:31:00 +00:00
|
|
|
struct net_device *ndev = wil_to_ndev(wil);
|
2012-12-20 21:13:19 +00:00
|
|
|
struct device *dev = wil_to_dev(wil);
|
|
|
|
struct vring *vring = &wil->vring_tx[ringid];
|
2014-03-17 13:34:25 +00:00
|
|
|
struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
|
2013-05-12 11:43:36 +00:00
|
|
|
int done = 0;
|
2014-02-27 14:20:49 +00:00
|
|
|
int cid = wil->vring2cid_tid[ringid][0];
|
2015-03-15 14:00:23 +00:00
|
|
|
struct wil_net_stats *stats = NULL;
|
2014-03-17 13:34:08 +00:00
|
|
|
volatile struct vring_tx_desc *_d;
|
2015-02-15 12:02:34 +00:00
|
|
|
int used_before_complete;
|
|
|
|
int used_new;
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(!vring->va)) {
|
2012-12-20 21:13:19 +00:00
|
|
|
wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
|
2013-05-12 11:43:36 +00:00
|
|
|
return 0;
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(!txdata->enabled)) {
|
2014-03-17 13:34:25 +00:00
|
|
|
wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-28 16:31:06 +00:00
|
|
|
wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2015-02-15 12:02:34 +00:00
|
|
|
used_before_complete = wil_vring_used_tx(vring);
|
|
|
|
|
2015-03-15 14:00:23 +00:00
|
|
|
if (cid < WIL6210_MAX_CID)
|
|
|
|
stats = &wil->sta[cid].stats;
|
|
|
|
|
2012-12-20 21:13:19 +00:00
|
|
|
while (!wil_vring_is_empty(vring)) {
|
2014-03-17 13:34:08 +00:00
|
|
|
int new_swtail;
|
2013-07-11 15:03:40 +00:00
|
|
|
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
|
2014-03-17 13:34:08 +00:00
|
|
|
/**
|
|
|
|
* For the fragmented skb, HW will set DU bit only for the
|
|
|
|
* last fragment. look for it
|
|
|
|
*/
|
|
|
|
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
|
|
|
|
/* TODO: check we are not past head */
|
2013-04-18 11:33:52 +00:00
|
|
|
|
2014-03-17 13:34:08 +00:00
|
|
|
_d = &vring->va[lf].tx;
|
2015-02-15 12:02:33 +00:00
|
|
|
if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
|
2012-12-20 21:13:19 +00:00
|
|
|
break;
|
|
|
|
|
2014-03-17 13:34:08 +00:00
|
|
|
new_swtail = (lf + 1) % vring->size;
|
|
|
|
while (vring->swtail != new_swtail) {
|
|
|
|
struct vring_tx_desc dd, *d = ⅆ
|
|
|
|
u16 dmalen;
|
2014-07-14 06:49:39 +00:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
ctx = &vring->ctx[vring->swtail];
|
|
|
|
skb = ctx->skb;
|
2014-03-17 13:34:08 +00:00
|
|
|
_d = &vring->va[vring->swtail].tx;
|
2012-12-20 21:13:19 +00:00
|
|
|
|
2014-03-17 13:34:08 +00:00
|
|
|
*d = *_d;
|
2013-07-11 15:03:40 +00:00
|
|
|
|
2014-03-17 13:34:08 +00:00
|
|
|
dmalen = le16_to_cpu(d->dma.length);
|
|
|
|
trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
|
|
|
|
d->dma.error);
|
|
|
|
wil_dbg_txrx(wil,
|
2015-02-01 08:55:14 +00:00
|
|
|
"TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
|
|
|
|
ringid, vring->swtail, dmalen,
|
|
|
|
d->dma.status, d->dma.error);
|
|
|
|
wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
|
2014-03-17 13:34:08 +00:00
|
|
|
(const void *)d, sizeof(*d), false);
|
2013-01-28 16:31:00 +00:00
|
|
|
|
2014-03-17 13:34:09 +00:00
|
|
|
wil_txdesc_unmap(dev, d, ctx);
|
2014-03-17 13:34:08 +00:00
|
|
|
|
|
|
|
if (skb) {
|
2015-02-15 12:02:33 +00:00
|
|
|
if (likely(d->dma.error == 0)) {
|
2014-03-17 13:34:08 +00:00
|
|
|
ndev->stats.tx_packets++;
|
|
|
|
ndev->stats.tx_bytes += skb->len;
|
2015-03-15 14:00:23 +00:00
|
|
|
if (stats) {
|
|
|
|
stats->tx_packets++;
|
|
|
|
stats->tx_bytes += skb->len;
|
|
|
|
}
|
2014-03-17 13:34:08 +00:00
|
|
|
} else {
|
|
|
|
ndev->stats.tx_errors++;
|
2015-03-15 14:00:23 +00:00
|
|
|
if (stats)
|
|
|
|
stats->tx_errors++;
|
2014-03-17 13:34:08 +00:00
|
|
|
}
|
2015-01-25 08:52:49 +00:00
|
|
|
wil_consume_skb(skb, d->dma.error == 0);
|
2014-03-17 13:34:08 +00:00
|
|
|
}
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
|
|
|
/* There is no need to touch HW descriptor:
|
|
|
|
* - ststus bit TX_DMA_STATUS_DU is set by design,
|
|
|
|
* so hardware will not try to process this desc.,
|
|
|
|
* - rest of descriptor will be initialized on Tx.
|
|
|
|
*/
|
|
|
|
vring->swtail = wil_vring_next_tail(vring);
|
|
|
|
done++;
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|
|
|
|
}
|
2014-06-16 16:37:04 +00:00
|
|
|
|
2015-02-15 12:02:34 +00:00
|
|
|
/* performance monitoring */
|
|
|
|
used_new = wil_vring_used_tx(vring);
|
|
|
|
if (wil_val_in_range(vring_idle_trsh,
|
|
|
|
used_new, used_before_complete)) {
|
|
|
|
wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
|
|
|
|
ringid, used_before_complete, used_new);
|
2014-06-16 16:37:05 +00:00
|
|
|
txdata->last_idle = get_cycles();
|
|
|
|
}
|
2014-06-16 16:37:04 +00:00
|
|
|
|
2014-06-16 16:37:23 +00:00
|
|
|
if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) {
|
|
|
|
wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n");
|
2012-12-20 21:13:19 +00:00
|
|
|
netif_tx_wake_all_queues(wil_to_ndev(wil));
|
2014-06-16 16:37:23 +00:00
|
|
|
}
|
2013-05-12 11:43:36 +00:00
|
|
|
|
|
|
|
return done;
|
2012-12-20 21:13:19 +00:00
|
|
|
}
|