2011-06-01 03:29:18 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) ST-Ericsson AB 2010
|
2013-04-22 23:57:02 +00:00
|
|
|
* Author: Daniel Martensson
|
2013-04-22 23:57:03 +00:00
|
|
|
* Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no
|
2011-06-01 03:29:18 +00:00
|
|
|
* License terms: GNU General Public License (GPL) version 2.
|
|
|
|
*/
|
|
|
|
|
2012-04-12 08:27:27 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME fmt
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/timer.h>
|
2012-06-25 07:49:41 +00:00
|
|
|
#include <net/rtnetlink.h>
|
2012-04-12 08:27:25 +00:00
|
|
|
#include <linux/pkt_sched.h>
|
2011-06-01 03:29:18 +00:00
|
|
|
#include <net/caif/caif_layer.h>
|
|
|
|
#include <net/caif/caif_hsi.h>
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2013-04-22 23:57:02 +00:00
|
|
|
MODULE_AUTHOR("Daniel Martensson");
|
2011-06-01 03:29:18 +00:00
|
|
|
MODULE_DESCRIPTION("CAIF HSI driver");
|
|
|
|
|
|
|
|
/* Returns the number of padding bytes for alignment. */
|
|
|
|
#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
|
|
|
|
(((pow)-((x)&((pow)-1)))))
|
|
|
|
|
2012-06-25 07:49:43 +00:00
|
|
|
static const struct cfhsi_config hsi_default_config = {
|
2011-10-13 11:29:27 +00:00
|
|
|
|
2012-06-25 07:49:43 +00:00
|
|
|
/* Inactivity timeout on HSI, ms */
|
|
|
|
.inactivity_timeout = HZ,
|
2012-04-12 08:27:25 +00:00
|
|
|
|
2012-06-25 07:49:43 +00:00
|
|
|
/* Aggregation timeout (ms) of zero means no aggregation is done*/
|
|
|
|
.aggregation_timeout = 1,
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:43 +00:00
|
|
|
/*
|
|
|
|
* HSI link layer flow-control thresholds.
|
|
|
|
* Threshold values for the HSI packet queue. Flow-control will be
|
|
|
|
* asserted when the number of packets exceeds q_high_mark. It will
|
|
|
|
* not be de-asserted before the number of packets drops below
|
|
|
|
* q_low_mark.
|
|
|
|
* Warning: A high threshold value might increase throughput but it
|
|
|
|
* will at the same time prevent channel prioritization and increase
|
|
|
|
* the risk of flooding the modem. The high threshold should be above
|
|
|
|
* the low.
|
|
|
|
*/
|
|
|
|
.q_high_mark = 100,
|
|
|
|
.q_low_mark = 50,
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:43 +00:00
|
|
|
/*
|
|
|
|
* HSI padding options.
|
|
|
|
* Warning: must be a base of 2 (& operation used) and can not be zero !
|
|
|
|
*/
|
|
|
|
.head_align = 4,
|
|
|
|
.tail_align = 4,
|
|
|
|
};
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
#define ON 1
|
|
|
|
#define OFF 0
|
|
|
|
|
|
|
|
static LIST_HEAD(cfhsi_list);
|
|
|
|
|
|
|
|
static void cfhsi_inactivity_tout(unsigned long arg)
|
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = (struct cfhsi *)arg;
|
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
/* Schedule power down work queue. */
|
|
|
|
if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
queue_work(cfhsi->wq, &cfhsi->wake_down_work);
|
|
|
|
}
|
|
|
|
|
2012-04-12 08:27:25 +00:00
|
|
|
static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
|
|
|
|
const struct sk_buff *skb,
|
|
|
|
int direction)
|
|
|
|
{
|
|
|
|
struct caif_payload_info *info;
|
|
|
|
int hpad, tpad, len;
|
|
|
|
|
|
|
|
info = (struct caif_payload_info *)&skb->cb;
|
2012-06-25 07:49:43 +00:00
|
|
|
hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
|
|
|
|
tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
|
2012-04-12 08:27:25 +00:00
|
|
|
len = skb->len + hpad + tpad;
|
|
|
|
|
|
|
|
if (direction > 0)
|
|
|
|
cfhsi->aggregation_len += len;
|
|
|
|
else if (direction < 0)
|
|
|
|
cfhsi->aggregation_len -= len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2012-06-25 07:49:43 +00:00
|
|
|
if (cfhsi->cfg.aggregation_timeout == 0)
|
2012-04-12 08:27:25 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
|
|
|
|
if (cfhsi->qhead[i].qlen)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: Use aggregation_len instead */
|
|
|
|
if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
|
|
|
|
skb = skb_dequeue(&cfhsi->qhead[i]);
|
|
|
|
if (skb)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
|
|
|
|
{
|
|
|
|
int i, len = 0;
|
|
|
|
for (i = 0; i < CFHSI_PRIO_LAST; ++i)
|
|
|
|
len += skb_queue_len(&cfhsi->qhead[i]);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
static void cfhsi_abort_tx(struct cfhsi *cfhsi)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
spin_lock_bh(&cfhsi->lock);
|
2012-04-12 08:27:25 +00:00
|
|
|
skb = cfhsi_dequeue(cfhsi);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (!skb)
|
|
|
|
break;
|
|
|
|
|
|
|
|
cfhsi->ndev->stats.tx_errors++;
|
|
|
|
cfhsi->ndev->stats.tx_dropped++;
|
2012-04-12 08:27:25 +00:00
|
|
|
cfhsi_update_aggregation_stats(cfhsi, skb, -1);
|
2011-06-01 03:29:18 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
|
|
|
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
|
|
|
|
if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
2012-04-12 08:27:25 +00:00
|
|
|
mod_timer(&cfhsi->inactivity_timer,
|
2012-06-25 07:49:43 +00:00
|
|
|
jiffies + cfhsi->cfg.inactivity_timeout);
|
2011-06-01 03:29:18 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
|
|
|
|
{
|
|
|
|
char buffer[32]; /* Any reasonable value */
|
|
|
|
size_t fifo_occupancy;
|
|
|
|
int ret;
|
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
do {
|
2012-06-25 07:49:42 +00:00
|
|
|
ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
|
2011-06-01 03:29:18 +00:00
|
|
|
&fifo_occupancy);
|
|
|
|
if (ret) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_warn(cfhsi->ndev,
|
2011-06-01 03:29:18 +00:00
|
|
|
"%s: can't get FIFO occupancy: %d.\n",
|
|
|
|
__func__, ret);
|
|
|
|
break;
|
|
|
|
} else if (!fifo_occupancy)
|
|
|
|
/* No more data, exitting normally */
|
|
|
|
break;
|
|
|
|
|
|
|
|
fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
|
|
|
|
set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
|
2012-06-25 07:49:42 +00:00
|
|
|
ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
|
|
|
|
cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (ret) {
|
|
|
|
clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_warn(cfhsi->ndev,
|
2011-06-01 03:29:18 +00:00
|
|
|
"%s: can't read data: %d.\n",
|
|
|
|
__func__, ret);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 5 * HZ;
|
2011-10-13 11:29:25 +00:00
|
|
|
ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
|
2011-06-01 03:29:18 +00:00
|
|
|
!test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_warn(cfhsi->ndev,
|
2011-06-01 03:29:18 +00:00
|
|
|
"%s: can't wait for flush complete: %d.\n",
|
|
|
|
__func__, ret);
|
|
|
|
break;
|
|
|
|
} else if (!ret) {
|
|
|
|
ret = -ETIMEDOUT;
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_warn(cfhsi->ndev,
|
2011-06-01 03:29:18 +00:00
|
|
|
"%s: timeout waiting for flush complete.\n",
|
|
|
|
__func__);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
|
|
|
|
{
|
|
|
|
int nfrms = 0;
|
|
|
|
int pld_len = 0;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
|
|
|
|
|
2012-04-12 08:27:25 +00:00
|
|
|
skb = cfhsi_dequeue(cfhsi);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (!skb)
|
|
|
|
return 0;
|
|
|
|
|
2011-10-13 11:29:22 +00:00
|
|
|
/* Clear offset. */
|
|
|
|
desc->offset = 0;
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
/* Check if we can embed a CAIF frame. */
|
|
|
|
if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
|
|
|
|
struct caif_payload_info *info;
|
2012-06-25 07:49:39 +00:00
|
|
|
int hpad;
|
|
|
|
int tpad;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Calculate needed head alignment and tail alignment. */
|
|
|
|
info = (struct caif_payload_info *)&skb->cb;
|
|
|
|
|
2012-06-25 07:49:43 +00:00
|
|
|
hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
|
|
|
|
tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Check if frame still fits with added alignment. */
|
|
|
|
if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
|
|
|
|
u8 *pemb = desc->emb_frm;
|
|
|
|
desc->offset = CFHSI_DESC_SHORT_SZ;
|
|
|
|
*pemb = (u8)(hpad - 1);
|
|
|
|
pemb += hpad;
|
|
|
|
|
|
|
|
/* Update network statistics. */
|
2012-04-12 08:27:25 +00:00
|
|
|
spin_lock_bh(&cfhsi->lock);
|
2011-06-01 03:29:18 +00:00
|
|
|
cfhsi->ndev->stats.tx_packets++;
|
|
|
|
cfhsi->ndev->stats.tx_bytes += skb->len;
|
2012-04-12 08:27:25 +00:00
|
|
|
cfhsi_update_aggregation_stats(cfhsi, skb, -1);
|
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Copy in embedded CAIF frame. */
|
|
|
|
skb_copy_bits(skb, 0, pemb, skb->len);
|
2012-04-12 08:27:25 +00:00
|
|
|
|
|
|
|
/* Consume the SKB */
|
2011-06-01 03:29:18 +00:00
|
|
|
consume_skb(skb);
|
|
|
|
skb = NULL;
|
|
|
|
}
|
2011-10-13 11:29:22 +00:00
|
|
|
}
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Create payload CAIF frames. */
|
|
|
|
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
|
|
|
|
while (nfrms < CFHSI_MAX_PKTS) {
|
|
|
|
struct caif_payload_info *info;
|
2012-06-25 07:49:39 +00:00
|
|
|
int hpad;
|
|
|
|
int tpad;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
if (!skb)
|
2012-04-12 08:27:25 +00:00
|
|
|
skb = cfhsi_dequeue(cfhsi);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
if (!skb)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Calculate needed head alignment and tail alignment. */
|
|
|
|
info = (struct caif_payload_info *)&skb->cb;
|
|
|
|
|
2012-06-25 07:49:43 +00:00
|
|
|
hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
|
|
|
|
tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Fill in CAIF frame length in descriptor. */
|
|
|
|
desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
|
|
|
|
|
|
|
|
/* Fill head padding information. */
|
|
|
|
*pfrm = (u8)(hpad - 1);
|
|
|
|
pfrm += hpad;
|
|
|
|
|
|
|
|
/* Update network statistics. */
|
2012-04-12 08:27:25 +00:00
|
|
|
spin_lock_bh(&cfhsi->lock);
|
2011-06-01 03:29:18 +00:00
|
|
|
cfhsi->ndev->stats.tx_packets++;
|
|
|
|
cfhsi->ndev->stats.tx_bytes += skb->len;
|
2012-04-12 08:27:25 +00:00
|
|
|
cfhsi_update_aggregation_stats(cfhsi, skb, -1);
|
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Copy in CAIF frame. */
|
|
|
|
skb_copy_bits(skb, 0, pfrm, skb->len);
|
|
|
|
|
|
|
|
/* Update payload length. */
|
|
|
|
pld_len += desc->cffrm_len[nfrms];
|
|
|
|
|
|
|
|
/* Update frame pointer. */
|
|
|
|
pfrm += skb->len + tpad;
|
2012-04-12 08:27:25 +00:00
|
|
|
|
|
|
|
/* Consume the SKB */
|
2011-06-01 03:29:18 +00:00
|
|
|
consume_skb(skb);
|
|
|
|
skb = NULL;
|
|
|
|
|
|
|
|
/* Update number of frames. */
|
|
|
|
nfrms++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unused length fields should be zero-filled (according to SPEC). */
|
|
|
|
while (nfrms < CFHSI_MAX_PKTS) {
|
|
|
|
desc->cffrm_len[nfrms] = 0x0000;
|
|
|
|
nfrms++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we can piggy-back another descriptor. */
|
2012-04-12 08:27:25 +00:00
|
|
|
if (cfhsi_can_send_aggregate(cfhsi))
|
2011-06-01 03:29:18 +00:00
|
|
|
desc->header |= CFHSI_PIGGY_DESC;
|
|
|
|
else
|
|
|
|
desc->header &= ~CFHSI_PIGGY_DESC;
|
|
|
|
|
|
|
|
return CFHSI_DESC_SZ + pld_len;
|
|
|
|
}
|
|
|
|
|
2012-04-12 08:27:25 +00:00
|
|
|
static void cfhsi_start_tx(struct cfhsi *cfhsi)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
2012-04-12 08:27:25 +00:00
|
|
|
struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
|
|
|
|
int len, res;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
return;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/* Create HSI frame. */
|
2012-04-12 08:27:25 +00:00
|
|
|
len = cfhsi_tx_frm(desc, cfhsi);
|
|
|
|
if (!len) {
|
|
|
|
spin_lock_bh(&cfhsi->lock);
|
|
|
|
if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
|
2011-10-13 11:29:23 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
2012-04-12 08:27:25 +00:00
|
|
|
res = -EAGAIN;
|
|
|
|
continue;
|
2011-10-13 11:29:23 +00:00
|
|
|
}
|
2012-04-12 08:27:25 +00:00
|
|
|
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
|
|
|
|
/* Start inactivity timer. */
|
|
|
|
mod_timer(&cfhsi->inactivity_timer,
|
2012-06-25 07:49:43 +00:00
|
|
|
jiffies + cfhsi->cfg.inactivity_timeout);
|
2012-04-12 08:27:25 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
break;
|
|
|
|
}
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Set up new transfer. */
|
2012-06-25 07:49:42 +00:00
|
|
|
res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
|
2012-04-12 08:27:25 +00:00
|
|
|
if (WARN_ON(res < 0))
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__, res);
|
|
|
|
} while (res < 0);
|
2012-04-12 08:27:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cfhsi_tx_done(struct cfhsi *cfhsi)
|
|
|
|
{
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
|
2012-04-12 08:27:25 +00:00
|
|
|
|
|
|
|
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send flow on if flow off has been previously signalled
|
|
|
|
* and number of packets is below low water mark.
|
|
|
|
*/
|
|
|
|
spin_lock_bh(&cfhsi->lock);
|
|
|
|
if (cfhsi->flow_off_sent &&
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
|
2012-04-12 08:27:25 +00:00
|
|
|
cfhsi->cfdev.flowctrl) {
|
|
|
|
|
|
|
|
cfhsi->flow_off_sent = 0;
|
|
|
|
cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfhsi_can_send_aggregate(cfhsi)) {
|
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
cfhsi_start_tx(cfhsi);
|
|
|
|
} else {
|
|
|
|
mod_timer(&cfhsi->aggregation_timer,
|
2012-06-25 07:49:43 +00:00
|
|
|
jiffies + cfhsi->cfg.aggregation_timeout);
|
2012-04-12 08:27:25 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
}
|
2011-10-13 11:29:23 +00:00
|
|
|
|
|
|
|
return;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi;
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
return;
|
2011-10-13 11:29:25 +00:00
|
|
|
cfhsi_tx_done(cfhsi);
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2011-10-13 11:29:28 +00:00
|
|
|
static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
|
|
|
int xfer_sz = 0;
|
|
|
|
int nfrms = 0;
|
|
|
|
u16 *plen = NULL;
|
|
|
|
u8 *pfrm = NULL;
|
|
|
|
|
|
|
|
if ((desc->header & ~CFHSI_PIGGY_DESC) ||
|
|
|
|
(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
2011-10-13 11:29:28 +00:00
|
|
|
return -EPROTO;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for embedded CAIF frame. */
|
|
|
|
if (desc->offset) {
|
|
|
|
struct sk_buff *skb;
|
2011-10-13 11:29:25 +00:00
|
|
|
int len = 0;
|
2011-06-01 03:29:18 +00:00
|
|
|
pfrm = ((u8 *)desc) + desc->offset;
|
|
|
|
|
|
|
|
/* Remove offset padding. */
|
|
|
|
pfrm += *pfrm + 1;
|
|
|
|
|
|
|
|
/* Read length of CAIF frame (little endian). */
|
|
|
|
len = *pfrm;
|
|
|
|
len |= ((*(pfrm+1)) << 8) & 0xFF00;
|
|
|
|
len += 2; /* Add FCS fields. */
|
|
|
|
|
2011-10-13 11:29:28 +00:00
|
|
|
/* Sanity check length of CAIF frame. */
|
|
|
|
if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
|
2011-10-13 11:29:28 +00:00
|
|
|
__func__);
|
|
|
|
return -EPROTO;
|
|
|
|
}
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Allocate SKB (OK even in IRQ context). */
|
2011-10-13 11:29:25 +00:00
|
|
|
skb = alloc_skb(len + 1, GFP_ATOMIC);
|
|
|
|
if (!skb) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
|
2011-10-13 11:29:25 +00:00
|
|
|
__func__);
|
|
|
|
return -ENOMEM;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
caif_assert(skb != NULL);
|
|
|
|
|
2017-06-18 14:52:04 +00:00
|
|
|
skb_put_data(skb, pfrm, len);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
skb->protocol = htons(ETH_P_CAIF);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
skb->dev = cfhsi->ndev;
|
|
|
|
|
|
|
|
/*
|
2012-06-25 07:49:42 +00:00
|
|
|
* We are in a callback handler and
|
|
|
|
* unfortunately we don't know what context we're
|
2011-06-01 03:29:18 +00:00
|
|
|
* running in.
|
|
|
|
*/
|
|
|
|
if (in_interrupt())
|
|
|
|
netif_rx(skb);
|
|
|
|
else
|
|
|
|
netif_rx_ni(skb);
|
|
|
|
|
|
|
|
/* Update network statistics. */
|
|
|
|
cfhsi->ndev->stats.rx_packets++;
|
|
|
|
cfhsi->ndev->stats.rx_bytes += len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Calculate transfer length. */
|
|
|
|
plen = desc->cffrm_len;
|
|
|
|
while (nfrms < CFHSI_MAX_PKTS && *plen) {
|
|
|
|
xfer_sz += *plen;
|
|
|
|
plen++;
|
|
|
|
nfrms++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for piggy-backed descriptor. */
|
|
|
|
if (desc->header & CFHSI_PIGGY_DESC)
|
|
|
|
xfer_sz += CFHSI_DESC_SZ;
|
|
|
|
|
2011-10-13 11:29:28 +00:00
|
|
|
if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev,
|
2011-06-01 03:29:18 +00:00
|
|
|
"%s: Invalid payload len: %d, ignored.\n",
|
|
|
|
__func__, xfer_sz);
|
2011-10-13 11:29:28 +00:00
|
|
|
return -EPROTO;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
return xfer_sz;
|
|
|
|
}
|
|
|
|
|
2012-02-03 04:36:21 +00:00
|
|
|
static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
|
|
|
|
{
|
|
|
|
int xfer_sz = 0;
|
|
|
|
int nfrms = 0;
|
|
|
|
u16 *plen;
|
|
|
|
|
|
|
|
if ((desc->header & ~CFHSI_PIGGY_DESC) ||
|
|
|
|
(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
|
|
|
|
|
|
|
|
pr_err("Invalid descriptor. %x %x\n", desc->header,
|
|
|
|
desc->offset);
|
|
|
|
return -EPROTO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Calculate transfer length. */
|
|
|
|
plen = desc->cffrm_len;
|
|
|
|
while (nfrms < CFHSI_MAX_PKTS && *plen) {
|
|
|
|
xfer_sz += *plen;
|
|
|
|
plen++;
|
|
|
|
nfrms++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xfer_sz % 4) {
|
|
|
|
pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
|
|
|
|
return -EPROTO;
|
|
|
|
}
|
|
|
|
return xfer_sz;
|
|
|
|
}
|
|
|
|
|
2011-10-13 11:29:28 +00:00
|
|
|
static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
|
|
|
int rx_sz = 0;
|
|
|
|
int nfrms = 0;
|
|
|
|
u16 *plen = NULL;
|
|
|
|
u8 *pfrm = NULL;
|
|
|
|
|
|
|
|
/* Sanity check header and offset. */
|
|
|
|
if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
|
|
|
|
(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
2011-10-13 11:29:28 +00:00
|
|
|
return -EPROTO;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set frame pointer to start of payload. */
|
|
|
|
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
|
|
|
|
plen = desc->cffrm_len;
|
2011-10-13 11:29:25 +00:00
|
|
|
|
|
|
|
/* Skip already processed frames. */
|
|
|
|
while (nfrms < cfhsi->rx_state.nfrms) {
|
|
|
|
pfrm += *plen;
|
|
|
|
rx_sz += *plen;
|
|
|
|
plen++;
|
|
|
|
nfrms++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse payload. */
|
2011-06-01 03:29:18 +00:00
|
|
|
while (nfrms < CFHSI_MAX_PKTS && *plen) {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
u8 *pcffrm = NULL;
|
2012-06-25 07:49:39 +00:00
|
|
|
int len;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* CAIF frame starts after head padding. */
|
|
|
|
pcffrm = pfrm + *pfrm + 1;
|
|
|
|
|
|
|
|
/* Read length of CAIF frame (little endian). */
|
|
|
|
len = *pcffrm;
|
|
|
|
len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
|
|
|
|
len += 2; /* Add FCS fields. */
|
|
|
|
|
2011-10-13 11:29:28 +00:00
|
|
|
/* Sanity check length of CAIF frames. */
|
|
|
|
if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
|
2011-10-13 11:29:28 +00:00
|
|
|
__func__);
|
|
|
|
return -EPROTO;
|
|
|
|
}
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
/* Allocate SKB (OK even in IRQ context). */
|
2011-10-13 11:29:25 +00:00
|
|
|
skb = alloc_skb(len + 1, GFP_ATOMIC);
|
|
|
|
if (!skb) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
|
2011-10-13 11:29:25 +00:00
|
|
|
__func__);
|
|
|
|
cfhsi->rx_state.nfrms = nfrms;
|
|
|
|
return -ENOMEM;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
caif_assert(skb != NULL);
|
|
|
|
|
2017-06-18 14:52:04 +00:00
|
|
|
skb_put_data(skb, pcffrm, len);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
skb->protocol = htons(ETH_P_CAIF);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
skb->dev = cfhsi->ndev;
|
|
|
|
|
|
|
|
/*
|
2012-06-25 07:49:42 +00:00
|
|
|
* We're called in callback from HSI
|
2011-06-01 03:29:18 +00:00
|
|
|
* and don't know the context we're running in.
|
|
|
|
*/
|
|
|
|
if (in_interrupt())
|
|
|
|
netif_rx(skb);
|
|
|
|
else
|
|
|
|
netif_rx_ni(skb);
|
|
|
|
|
|
|
|
/* Update network statistics. */
|
|
|
|
cfhsi->ndev->stats.rx_packets++;
|
|
|
|
cfhsi->ndev->stats.rx_bytes += len;
|
|
|
|
|
|
|
|
pfrm += *plen;
|
|
|
|
rx_sz += *plen;
|
|
|
|
plen++;
|
|
|
|
nfrms++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rx_sz;
|
|
|
|
}
|
|
|
|
|
2011-10-13 11:29:25 +00:00
|
|
|
static void cfhsi_rx_done(struct cfhsi *cfhsi)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
|
|
|
int res;
|
2012-02-03 04:36:21 +00:00
|
|
|
int desc_pld_len = 0, rx_len, rx_state;
|
2011-06-01 03:29:18 +00:00
|
|
|
struct cfhsi_desc *desc = NULL;
|
2012-02-03 04:36:21 +00:00
|
|
|
u8 *rx_ptr, *rx_buf;
|
|
|
|
struct cfhsi_desc *piggy_desc = NULL;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
desc = (struct cfhsi_desc *)cfhsi->rx_buf;
|
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s\n", __func__);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Update inactivity timer if pending. */
|
2011-10-13 11:29:24 +00:00
|
|
|
spin_lock_bh(&cfhsi->lock);
|
2012-04-12 08:27:25 +00:00
|
|
|
mod_timer_pending(&cfhsi->inactivity_timer,
|
2012-06-25 07:49:43 +00:00
|
|
|
jiffies + cfhsi->cfg.inactivity_timeout);
|
2011-10-13 11:29:24 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2011-10-13 11:29:25 +00:00
|
|
|
if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
|
2012-02-03 04:36:21 +00:00
|
|
|
desc_pld_len = cfhsi_rx_desc_len(desc);
|
|
|
|
|
|
|
|
if (desc_pld_len < 0)
|
2011-10-13 11:29:28 +00:00
|
|
|
goto out_of_sync;
|
2012-02-03 04:36:21 +00:00
|
|
|
|
|
|
|
rx_buf = cfhsi->rx_buf;
|
|
|
|
rx_len = desc_pld_len;
|
|
|
|
if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
|
|
|
|
rx_len += CFHSI_DESC_SZ;
|
|
|
|
if (desc_pld_len == 0)
|
|
|
|
rx_buf = cfhsi->rx_flip_buf;
|
2011-06-01 03:29:18 +00:00
|
|
|
} else {
|
2012-02-03 04:36:21 +00:00
|
|
|
rx_buf = cfhsi->rx_flip_buf;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-02-03 04:36:21 +00:00
|
|
|
rx_len = CFHSI_DESC_SZ;
|
|
|
|
if (cfhsi->rx_state.pld_len > 0 &&
|
|
|
|
(desc->header & CFHSI_PIGGY_DESC)) {
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
piggy_desc = (struct cfhsi_desc *)
|
|
|
|
(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
|
2012-02-03 04:36:21 +00:00
|
|
|
cfhsi->rx_state.pld_len);
|
|
|
|
|
2011-10-13 11:29:25 +00:00
|
|
|
cfhsi->rx_state.piggy_desc = true;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-02-03 04:36:21 +00:00
|
|
|
/* Extract payload len from piggy-backed descriptor. */
|
|
|
|
desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
|
|
|
|
if (desc_pld_len < 0)
|
|
|
|
goto out_of_sync;
|
|
|
|
|
2012-06-25 07:49:36 +00:00
|
|
|
if (desc_pld_len > 0) {
|
2012-02-03 04:36:21 +00:00
|
|
|
rx_len = desc_pld_len;
|
2012-06-25 07:49:36 +00:00
|
|
|
if (piggy_desc->header & CFHSI_PIGGY_DESC)
|
|
|
|
rx_len += CFHSI_DESC_SZ;
|
|
|
|
}
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy needed information from the piggy-backed
|
|
|
|
* descriptor to the descriptor in the start.
|
|
|
|
*/
|
2012-02-03 04:36:21 +00:00
|
|
|
memcpy(rx_buf, (u8 *)piggy_desc,
|
2011-06-01 03:29:18 +00:00
|
|
|
CFHSI_DESC_SHORT_SZ);
|
2011-10-13 11:29:28 +00:00
|
|
|
}
|
2011-10-13 11:29:25 +00:00
|
|
|
}
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
if (desc_pld_len) {
|
2012-02-03 04:36:21 +00:00
|
|
|
rx_state = CFHSI_RX_STATE_PAYLOAD;
|
|
|
|
rx_ptr = rx_buf + CFHSI_DESC_SZ;
|
2011-06-01 03:29:18 +00:00
|
|
|
} else {
|
2012-02-03 04:36:21 +00:00
|
|
|
rx_state = CFHSI_RX_STATE_DESC;
|
|
|
|
rx_ptr = rx_buf;
|
|
|
|
rx_len = CFHSI_DESC_SZ;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2012-02-03 04:36:21 +00:00
|
|
|
/* Initiate next read */
|
2011-06-01 03:29:18 +00:00
|
|
|
if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
|
|
|
|
/* Set up new transfer. */
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
|
2012-02-03 04:36:21 +00:00
|
|
|
__func__);
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
|
|
|
|
cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (WARN_ON(res < 0)) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__, res);
|
|
|
|
cfhsi->ndev->stats.rx_errors++;
|
|
|
|
cfhsi->ndev->stats.rx_dropped++;
|
|
|
|
}
|
|
|
|
}
|
2011-10-13 11:29:25 +00:00
|
|
|
|
2012-02-03 04:36:21 +00:00
|
|
|
if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
|
|
|
|
/* Extract payload from descriptor */
|
|
|
|
if (cfhsi_rx_desc(desc, cfhsi) < 0)
|
|
|
|
goto out_of_sync;
|
|
|
|
} else {
|
|
|
|
/* Extract payload */
|
|
|
|
if (cfhsi_rx_pld(desc, cfhsi) < 0)
|
|
|
|
goto out_of_sync;
|
|
|
|
if (piggy_desc) {
|
|
|
|
/* Extract any payload in piggyback descriptor. */
|
|
|
|
if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
|
|
|
|
goto out_of_sync;
|
2012-06-24 11:01:37 +00:00
|
|
|
/* Mark no embedded frame after extracting it */
|
|
|
|
piggy_desc->offset = 0;
|
2012-02-03 04:36:21 +00:00
|
|
|
}
|
2011-10-13 11:29:25 +00:00
|
|
|
}
|
2012-02-03 04:36:21 +00:00
|
|
|
|
|
|
|
/* Update state info */
|
|
|
|
memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
|
|
|
|
cfhsi->rx_state.state = rx_state;
|
|
|
|
cfhsi->rx_ptr = rx_ptr;
|
|
|
|
cfhsi->rx_len = rx_len;
|
|
|
|
cfhsi->rx_state.pld_len = desc_pld_len;
|
|
|
|
cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
|
|
|
|
|
|
|
|
if (rx_buf != cfhsi->rx_buf)
|
|
|
|
swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
|
2011-10-13 11:29:28 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
out_of_sync:
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
|
2011-10-13 11:29:28 +00:00
|
|
|
print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
|
|
|
|
cfhsi->rx_buf, CFHSI_DESC_SZ);
|
|
|
|
schedule_work(&cfhsi->out_of_sync_work);
|
2011-10-13 11:29:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cfhsi_rx_slowpath(unsigned long arg)
|
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = (struct cfhsi *)arg;
|
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n",
|
2011-10-13 11:29:25 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
cfhsi_rx_done(cfhsi);
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi;
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
|
|
|
|
wake_up_interruptible(&cfhsi->flush_fifo_wait);
|
|
|
|
else
|
2011-10-13 11:29:25 +00:00
|
|
|
cfhsi_rx_done(cfhsi);
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cfhsi_wake_up(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = NULL;
|
|
|
|
int res;
|
|
|
|
int len;
|
|
|
|
long ret;
|
|
|
|
|
|
|
|
cfhsi = container_of(work, struct cfhsi, wake_up_work);
|
|
|
|
|
|
|
|
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
|
|
|
|
/* It happenes when wakeup is requested by
|
|
|
|
* both ends at the same time. */
|
|
|
|
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
|
2011-10-13 11:29:29 +00:00
|
|
|
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
|
2011-06-01 03:29:18 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Activate wake line. */
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
/* Wait for acknowledge. */
|
2011-10-13 11:29:25 +00:00
|
|
|
ret = CFHSI_WAKE_TOUT;
|
|
|
|
ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
|
|
|
|
test_and_clear_bit(CFHSI_WAKE_UP_ACK,
|
2011-06-01 03:29:18 +00:00
|
|
|
&cfhsi->bits), ret);
|
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
/* Interrupted by signal. */
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__, ret);
|
2011-10-13 11:29:29 +00:00
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
return;
|
|
|
|
} else if (!ret) {
|
2011-10-13 11:29:29 +00:00
|
|
|
bool ca_wake = false;
|
|
|
|
size_t fifo_occupancy = 0;
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
/* Wakeup timeout */
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
2011-10-13 11:29:29 +00:00
|
|
|
|
|
|
|
/* Check FIFO to check if modem has sent something. */
|
2012-06-25 07:49:42 +00:00
|
|
|
WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
|
2011-10-13 11:29:29 +00:00
|
|
|
&fifo_occupancy));
|
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
|
2011-10-13 11:29:29 +00:00
|
|
|
__func__, (unsigned) fifo_occupancy);
|
|
|
|
|
|
|
|
/* Check if we misssed the interrupt. */
|
2012-06-25 07:49:42 +00:00
|
|
|
WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
|
2011-10-13 11:29:29 +00:00
|
|
|
&ca_wake));
|
|
|
|
|
|
|
|
if (ca_wake) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
|
2011-10-13 11:29:29 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
|
|
|
|
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
|
|
|
|
|
|
|
|
/* Continue execution. */
|
|
|
|
goto wake_ack;
|
|
|
|
}
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
return;
|
|
|
|
}
|
2011-10-13 11:29:29 +00:00
|
|
|
wake_ack:
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
/* Clear power up bit. */
|
|
|
|
set_bit(CFHSI_AWAKE, &cfhsi->bits);
|
|
|
|
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
|
|
|
|
|
|
|
|
/* Resume read operation. */
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
|
2012-06-25 07:49:42 +00:00
|
|
|
res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
|
2011-10-13 11:29:25 +00:00
|
|
|
|
|
|
|
if (WARN_ON(res < 0))
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Clear power up acknowledment. */
|
|
|
|
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
|
|
|
|
|
|
|
|
spin_lock_bh(&cfhsi->lock);
|
|
|
|
|
2012-04-12 08:27:25 +00:00
|
|
|
/* Resume transmit if queues are not empty. */
|
|
|
|
if (!cfhsi_tx_queue_len(cfhsi)) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
/* Start inactivity timer. */
|
2012-04-12 08:27:25 +00:00
|
|
|
mod_timer(&cfhsi->inactivity_timer,
|
2012-06-25 07:49:43 +00:00
|
|
|
jiffies + cfhsi->cfg.inactivity_timeout);
|
2011-06-01 03:29:18 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
|
|
|
|
/* Create HSI frame. */
|
|
|
|
len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
|
|
|
|
|
|
|
|
if (likely(len > 0)) {
|
|
|
|
/* Set up new transfer. */
|
2012-06-25 07:49:42 +00:00
|
|
|
res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (WARN_ON(res < 0)) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__, res);
|
|
|
|
cfhsi_abort_tx(cfhsi);
|
|
|
|
}
|
|
|
|
} else {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev,
|
2011-06-01 03:29:18 +00:00
|
|
|
"%s: Failed to create HSI frame: %d.\n",
|
|
|
|
__func__, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cfhsi_wake_down(struct work_struct *work)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
struct cfhsi *cfhsi = NULL;
|
2011-10-13 11:29:25 +00:00
|
|
|
size_t fifo_occupancy = 0;
|
|
|
|
int retry = CFHSI_WAKE_TOUT;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
cfhsi = container_of(work, struct cfhsi, wake_down_work);
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Deactivate wake line. */
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Wait for acknowledge. */
|
2011-10-13 11:29:25 +00:00
|
|
|
ret = CFHSI_WAKE_TOUT;
|
2011-06-01 03:29:18 +00:00
|
|
|
ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
|
2011-10-13 11:29:25 +00:00
|
|
|
test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
|
|
|
|
&cfhsi->bits), ret);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
/* Interrupted by signal. */
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__, ret);
|
|
|
|
return;
|
|
|
|
} else if (!ret) {
|
2011-10-13 11:29:29 +00:00
|
|
|
bool ca_wake = true;
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
/* Timeout */
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
|
2011-10-13 11:29:29 +00:00
|
|
|
|
|
|
|
/* Check if we misssed the interrupt. */
|
2012-06-25 07:49:42 +00:00
|
|
|
WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
|
2011-10-13 11:29:29 +00:00
|
|
|
&ca_wake));
|
|
|
|
if (!ca_wake)
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
|
2011-10-13 11:29:29 +00:00
|
|
|
__func__);
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2011-10-13 11:29:25 +00:00
|
|
|
/* Check FIFO occupancy. */
|
|
|
|
while (retry) {
|
2012-06-25 07:49:42 +00:00
|
|
|
WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
|
2011-10-13 11:29:25 +00:00
|
|
|
&fifo_occupancy));
|
|
|
|
|
|
|
|
if (!fifo_occupancy)
|
|
|
|
break;
|
|
|
|
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
schedule_timeout(1);
|
|
|
|
retry--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!retry)
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
|
2011-10-13 11:29:25 +00:00
|
|
|
|
|
|
|
/* Clear AWAKE condition. */
|
2011-06-01 03:29:18 +00:00
|
|
|
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
|
|
|
|
|
2011-10-13 11:29:25 +00:00
|
|
|
/* Cancel pending RX requests. */
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2011-10-13 11:29:28 +00:00
|
|
|
static void cfhsi_out_of_sync(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = NULL;
|
|
|
|
|
|
|
|
cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
dev_close(cfhsi->ndev);
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = NULL;
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
|
|
|
|
wake_up_interruptible(&cfhsi->wake_up_wait);
|
|
|
|
|
|
|
|
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Schedule wake up work queue if the peer initiates. */
|
|
|
|
if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
|
|
|
|
queue_work(cfhsi->wq, &cfhsi->wake_up_work);
|
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = NULL;
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
/* Initiating low power is only permitted by the host (us). */
|
|
|
|
set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
|
|
|
|
wake_up_interruptible(&cfhsi->wake_down_wait);
|
|
|
|
}
|
|
|
|
|
2012-04-12 08:27:25 +00:00
|
|
|
static void cfhsi_aggregation_tout(unsigned long arg)
|
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = (struct cfhsi *)arg;
|
|
|
|
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_dbg(cfhsi->ndev, "%s.\n",
|
2012-04-12 08:27:25 +00:00
|
|
|
__func__);
|
|
|
|
|
|
|
|
cfhsi_start_tx(cfhsi);
|
|
|
|
}
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = NULL;
|
|
|
|
int start_xfer = 0;
|
|
|
|
int timer_active;
|
2012-04-12 08:27:25 +00:00
|
|
|
int prio;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cfhsi = netdev_priv(dev);
|
|
|
|
|
2012-04-12 08:27:25 +00:00
|
|
|
switch (skb->priority) {
|
|
|
|
case TC_PRIO_BESTEFFORT:
|
|
|
|
case TC_PRIO_FILLER:
|
|
|
|
case TC_PRIO_BULK:
|
|
|
|
prio = CFHSI_PRIO_BEBK;
|
|
|
|
break;
|
|
|
|
case TC_PRIO_INTERACTIVE_BULK:
|
|
|
|
prio = CFHSI_PRIO_VI;
|
|
|
|
break;
|
|
|
|
case TC_PRIO_INTERACTIVE:
|
|
|
|
prio = CFHSI_PRIO_VO;
|
|
|
|
break;
|
|
|
|
case TC_PRIO_CONTROL:
|
|
|
|
default:
|
|
|
|
prio = CFHSI_PRIO_CTL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
spin_lock_bh(&cfhsi->lock);
|
|
|
|
|
2012-04-12 08:27:25 +00:00
|
|
|
/* Update aggregation statistics */
|
|
|
|
cfhsi_update_aggregation_stats(cfhsi, skb, 1);
|
|
|
|
|
|
|
|
/* Queue the SKB */
|
|
|
|
skb_queue_tail(&cfhsi->qhead[prio], skb);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Sanity check; xmit should not be called after unregister_netdev */
|
|
|
|
if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
|
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
cfhsi_abort_tx(cfhsi);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send flow off if number of packets is above high water mark. */
|
|
|
|
if (!cfhsi->flow_off_sent &&
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
|
2011-06-01 03:29:18 +00:00
|
|
|
cfhsi->cfdev.flowctrl) {
|
|
|
|
cfhsi->flow_off_sent = 1;
|
|
|
|
cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
|
|
|
|
cfhsi->tx_state = CFHSI_TX_STATE_XFER;
|
|
|
|
start_xfer = 1;
|
|
|
|
}
|
|
|
|
|
2011-10-13 11:29:24 +00:00
|
|
|
if (!start_xfer) {
|
2012-04-12 08:27:25 +00:00
|
|
|
/* Send aggregate if it is possible */
|
|
|
|
bool aggregate_ready =
|
|
|
|
cfhsi_can_send_aggregate(cfhsi) &&
|
|
|
|
del_timer(&cfhsi->aggregation_timer) > 0;
|
2011-10-13 11:29:24 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
2012-04-12 08:27:25 +00:00
|
|
|
if (aggregate_ready)
|
|
|
|
cfhsi_start_tx(cfhsi);
|
2011-06-01 03:29:18 +00:00
|
|
|
return 0;
|
2011-10-13 11:29:24 +00:00
|
|
|
}
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Delete inactivity timer if started. */
|
2012-04-12 08:27:25 +00:00
|
|
|
timer_active = del_timer_sync(&cfhsi->inactivity_timer);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2011-10-13 11:29:24 +00:00
|
|
|
spin_unlock_bh(&cfhsi->lock);
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
if (timer_active) {
|
|
|
|
struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
|
|
|
|
int len;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
/* Create HSI frame. */
|
|
|
|
len = cfhsi_tx_frm(desc, cfhsi);
|
2011-12-06 12:15:44 +00:00
|
|
|
WARN_ON(!len);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Set up new transfer. */
|
2012-06-25 07:49:42 +00:00
|
|
|
res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (WARN_ON(res < 0)) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__, res);
|
|
|
|
cfhsi_abort_tx(cfhsi);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Schedule wake up work queue if the we initiate. */
|
|
|
|
if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
|
|
|
|
queue_work(cfhsi->wq, &cfhsi->wake_up_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
static const struct net_device_ops cfhsi_netdevops;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
static void cfhsi_setup(struct net_device *dev)
|
|
|
|
{
|
2012-04-12 08:27:25 +00:00
|
|
|
int i;
|
2011-06-01 03:29:18 +00:00
|
|
|
struct cfhsi *cfhsi = netdev_priv(dev);
|
|
|
|
dev->features = 0;
|
|
|
|
dev->type = ARPHRD_CAIF;
|
|
|
|
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
|
2012-03-04 08:38:58 +00:00
|
|
|
dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
|
2015-08-18 08:30:46 +00:00
|
|
|
dev->priv_flags |= IFF_NO_QUEUE;
|
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-08 16:52:56 +00:00
|
|
|
dev->needs_free_netdev = true;
|
2012-06-25 07:49:42 +00:00
|
|
|
dev->netdev_ops = &cfhsi_netdevops;
|
2012-04-12 08:27:25 +00:00
|
|
|
for (i = 0; i < CFHSI_PRIO_LAST; ++i)
|
|
|
|
skb_queue_head_init(&cfhsi->qhead[i]);
|
2011-06-01 03:29:18 +00:00
|
|
|
cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
|
|
|
|
cfhsi->cfdev.use_frag = false;
|
|
|
|
cfhsi->cfdev.use_stx = false;
|
|
|
|
cfhsi->cfdev.use_fcs = false;
|
|
|
|
cfhsi->ndev = dev;
|
2012-06-28 20:36:20 +00:00
|
|
|
cfhsi->cfg = hsi_default_config;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2012-04-12 08:27:27 +00:00
|
|
|
static int cfhsi_open(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = netdev_priv(ndev);
|
|
|
|
int res;
|
|
|
|
|
|
|
|
clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
|
|
|
|
|
2011-06-01 03:29:18 +00:00
|
|
|
/* Initialize state vaiables. */
|
|
|
|
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
|
2011-10-13 11:29:25 +00:00
|
|
|
cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Set flow info */
|
|
|
|
cfhsi->flow_off_sent = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a TX buffer with the size of a HSI packet descriptors
|
|
|
|
* and the necessary room for CAIF payload frames.
|
|
|
|
*/
|
|
|
|
cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
|
|
|
|
if (!cfhsi->tx_buf) {
|
|
|
|
res = -ENODEV;
|
|
|
|
goto err_alloc_tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a RX buffer with the size of two HSI packet descriptors and
|
|
|
|
* the necessary room for CAIF payload frames.
|
|
|
|
*/
|
|
|
|
cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
|
|
|
|
if (!cfhsi->rx_buf) {
|
|
|
|
res = -ENODEV;
|
|
|
|
goto err_alloc_rx;
|
|
|
|
}
|
|
|
|
|
2012-02-03 04:36:21 +00:00
|
|
|
cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
|
|
|
|
if (!cfhsi->rx_flip_buf) {
|
|
|
|
res = -ENODEV;
|
|
|
|
goto err_alloc_rx_flip;
|
|
|
|
}
|
|
|
|
|
2012-04-12 08:27:25 +00:00
|
|
|
/* Initialize aggregation timeout */
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
|
2012-04-12 08:27:25 +00:00
|
|
|
|
2011-10-13 11:29:27 +00:00
|
|
|
/* Initialize recieve vaiables. */
|
2011-06-01 03:29:18 +00:00
|
|
|
cfhsi->rx_ptr = cfhsi->rx_buf;
|
|
|
|
cfhsi->rx_len = CFHSI_DESC_SZ;
|
|
|
|
|
|
|
|
/* Initialize spin locks. */
|
|
|
|
spin_lock_init(&cfhsi->lock);
|
|
|
|
|
|
|
|
/* Set up the driver. */
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
|
|
|
|
cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
|
|
|
|
cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
|
|
|
|
cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Initialize the work queues. */
|
|
|
|
INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
|
|
|
|
INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
|
2011-10-13 11:29:28 +00:00
|
|
|
INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Clear all bit fields. */
|
|
|
|
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
|
|
|
|
clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
|
|
|
|
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
|
|
|
|
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
|
|
|
|
|
|
|
|
/* Create work thread. */
|
2016-07-25 13:10:57 +00:00
|
|
|
cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (!cfhsi->wq) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__);
|
|
|
|
res = -ENODEV;
|
|
|
|
goto err_create_wq;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize wait queues. */
|
|
|
|
init_waitqueue_head(&cfhsi->wake_up_wait);
|
|
|
|
init_waitqueue_head(&cfhsi->wake_down_wait);
|
|
|
|
init_waitqueue_head(&cfhsi->flush_fifo_wait);
|
|
|
|
|
|
|
|
/* Setup the inactivity timer. */
|
2012-04-12 08:27:25 +00:00
|
|
|
init_timer(&cfhsi->inactivity_timer);
|
|
|
|
cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
|
|
|
|
cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
|
2011-10-13 11:29:25 +00:00
|
|
|
/* Setup the slowpath RX timer. */
|
|
|
|
init_timer(&cfhsi->rx_slowpath_timer);
|
|
|
|
cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
|
|
|
|
cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
|
2012-04-12 08:27:25 +00:00
|
|
|
/* Setup the aggregation timer. */
|
|
|
|
init_timer(&cfhsi->aggregation_timer);
|
|
|
|
cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
|
|
|
|
cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Activate HSI interface. */
|
2012-06-25 07:49:42 +00:00
|
|
|
res = cfhsi->ops->cfhsi_up(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
if (res) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev,
|
2011-06-01 03:29:18 +00:00
|
|
|
"%s: can't activate HSI interface: %d.\n",
|
|
|
|
__func__, res);
|
|
|
|
goto err_activate;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush FIFO */
|
|
|
|
res = cfhsi_flush_fifo(cfhsi);
|
|
|
|
if (res) {
|
2012-06-25 07:49:38 +00:00
|
|
|
netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
|
2011-06-01 03:29:18 +00:00
|
|
|
__func__, res);
|
|
|
|
goto err_net_reg;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
|
|
|
|
err_net_reg:
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->ops->cfhsi_down(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
err_activate:
|
|
|
|
destroy_workqueue(cfhsi->wq);
|
|
|
|
err_create_wq:
|
2012-02-03 04:36:21 +00:00
|
|
|
kfree(cfhsi->rx_flip_buf);
|
|
|
|
err_alloc_rx_flip:
|
2011-06-01 03:29:18 +00:00
|
|
|
kfree(cfhsi->rx_buf);
|
|
|
|
err_alloc_rx:
|
|
|
|
kfree(cfhsi->tx_buf);
|
|
|
|
err_alloc_tx:
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2012-04-12 08:27:27 +00:00
|
|
|
static int cfhsi_close(struct net_device *ndev)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
2012-04-12 08:27:27 +00:00
|
|
|
struct cfhsi *cfhsi = netdev_priv(ndev);
|
2012-04-12 08:18:08 +00:00
|
|
|
u8 *tx_buf, *rx_buf, *flip_buf;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* going to shutdown driver */
|
|
|
|
set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
|
|
|
|
|
2011-10-13 11:29:25 +00:00
|
|
|
/* Delete timers if pending */
|
2012-04-12 08:27:25 +00:00
|
|
|
del_timer_sync(&cfhsi->inactivity_timer);
|
2011-10-13 11:29:25 +00:00
|
|
|
del_timer_sync(&cfhsi->rx_slowpath_timer);
|
2012-04-12 08:27:25 +00:00
|
|
|
del_timer_sync(&cfhsi->aggregation_timer);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Cancel pending RX request (if any) */
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2011-10-13 11:29:26 +00:00
|
|
|
/* Destroy workqueue */
|
2011-06-01 03:29:18 +00:00
|
|
|
destroy_workqueue(cfhsi->wq);
|
|
|
|
|
|
|
|
/* Store bufferes: will be freed later. */
|
|
|
|
tx_buf = cfhsi->tx_buf;
|
|
|
|
rx_buf = cfhsi->rx_buf;
|
2012-04-12 08:18:08 +00:00
|
|
|
flip_buf = cfhsi->rx_flip_buf;
|
2011-06-01 03:29:18 +00:00
|
|
|
/* Flush transmit queues. */
|
|
|
|
cfhsi_abort_tx(cfhsi);
|
|
|
|
|
|
|
|
/* Deactivate interface */
|
2012-06-25 07:49:42 +00:00
|
|
|
cfhsi->ops->cfhsi_down(cfhsi->ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
|
|
|
/* Free buffers. */
|
|
|
|
kfree(tx_buf);
|
|
|
|
kfree(rx_buf);
|
2012-04-12 08:18:08 +00:00
|
|
|
kfree(flip_buf);
|
2012-04-12 08:27:27 +00:00
|
|
|
return 0;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
static void cfhsi_uninit(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = netdev_priv(dev);
|
|
|
|
ASSERT_RTNL();
|
|
|
|
symbol_put(cfhsi_get_device);
|
|
|
|
list_del(&cfhsi->list);
|
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:42 +00:00
|
|
|
static const struct net_device_ops cfhsi_netdevops = {
|
2012-06-25 07:49:41 +00:00
|
|
|
.ndo_uninit = cfhsi_uninit,
|
2012-04-12 08:27:27 +00:00
|
|
|
.ndo_open = cfhsi_open,
|
|
|
|
.ndo_stop = cfhsi_close,
|
|
|
|
.ndo_start_xmit = cfhsi_xmit
|
|
|
|
};
|
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
2012-06-25 07:49:41 +00:00
|
|
|
int i;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
if (!data) {
|
|
|
|
pr_debug("no params data found\n");
|
|
|
|
return;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
2012-06-25 07:49:41 +00:00
|
|
|
|
|
|
|
i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
|
2012-06-25 07:49:43 +00:00
|
|
|
/*
|
|
|
|
* Inactivity timeout in millisecs. Lowest possible value is 1,
|
|
|
|
* and highest possible is NEXT_TIMER_MAX_DELTA.
|
|
|
|
*/
|
|
|
|
if (data[i]) {
|
|
|
|
u32 inactivity_timeout = nla_get_u32(data[i]);
|
|
|
|
/* Pre-calculate inactivity timeout. */
|
|
|
|
cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
|
|
|
|
if (cfhsi->cfg.inactivity_timeout == 0)
|
|
|
|
cfhsi->cfg.inactivity_timeout = 1;
|
|
|
|
else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
|
|
|
|
cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
|
|
|
|
}
|
2012-06-25 07:49:41 +00:00
|
|
|
|
|
|
|
i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
|
|
|
|
if (data[i])
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
|
2012-06-25 07:49:41 +00:00
|
|
|
|
|
|
|
i = __IFLA_CAIF_HSI_HEAD_ALIGN;
|
|
|
|
if (data[i])
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.head_align = nla_get_u32(data[i]);
|
2012-06-25 07:49:41 +00:00
|
|
|
|
|
|
|
i = __IFLA_CAIF_HSI_TAIL_ALIGN;
|
|
|
|
if (data[i])
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.tail_align = nla_get_u32(data[i]);
|
2012-06-25 07:49:41 +00:00
|
|
|
|
|
|
|
i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
|
|
|
|
if (data[i])
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
|
|
|
|
|
|
|
|
i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
|
|
|
|
if (data[i])
|
|
|
|
cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
|
2012-06-25 07:49:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
|
|
|
|
struct nlattr *data[])
|
|
|
|
{
|
|
|
|
cfhsi_netlink_parms(data, netdev_priv(dev));
|
|
|
|
netdev_state_change(dev);
|
|
|
|
return 0;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
|
|
|
|
[__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
|
|
|
|
[__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
|
|
|
|
[__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
|
|
|
|
[__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
|
|
|
|
[__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
|
|
|
|
[__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
|
2011-06-01 03:29:18 +00:00
|
|
|
};
|
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
static size_t caif_hsi_get_size(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t s = 0;
|
|
|
|
for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
|
|
|
|
s += nla_total_size(caif_hsi_policy[i].len);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|
|
|
{
|
2012-06-25 07:49:43 +00:00
|
|
|
struct cfhsi *cfhsi = netdev_priv(dev);
|
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.inactivity_timeout) ||
|
2012-06-25 07:49:41 +00:00
|
|
|
nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.aggregation_timeout) ||
|
|
|
|
nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
|
|
|
|
cfhsi->cfg.head_align) ||
|
|
|
|
nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
|
|
|
|
cfhsi->cfg.tail_align) ||
|
2012-06-25 07:49:41 +00:00
|
|
|
nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.q_high_mark) ||
|
2012-06-25 07:49:41 +00:00
|
|
|
nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
|
2012-06-25 07:49:43 +00:00
|
|
|
cfhsi->cfg.q_low_mark))
|
2012-06-25 07:49:41 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
|
|
|
|
struct nlattr *tb[], struct nlattr *data[])
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
|
|
|
struct cfhsi *cfhsi = NULL;
|
2012-06-28 20:36:20 +00:00
|
|
|
struct cfhsi_ops *(*get_ops)(void);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
cfhsi = netdev_priv(dev);
|
|
|
|
cfhsi_netlink_parms(data, cfhsi);
|
|
|
|
|
2012-06-28 20:36:20 +00:00
|
|
|
get_ops = symbol_get(cfhsi_get_ops);
|
|
|
|
if (!get_ops) {
|
|
|
|
pr_err("%s: failed to get the cfhsi_ops\n", __func__);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assign the HSI device. */
|
|
|
|
cfhsi->ops = (*get_ops)();
|
|
|
|
if (!cfhsi->ops) {
|
|
|
|
pr_err("%s: failed to get the cfhsi_ops\n", __func__);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assign the driver to this HSI device. */
|
|
|
|
cfhsi->ops->cb_ops = &cfhsi->cb_ops;
|
|
|
|
if (register_netdevice(dev)) {
|
|
|
|
pr_warn("%s: caif_hsi device registration failed\n", __func__);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
/* Add CAIF HSI device to list. */
|
|
|
|
list_add_tail(&cfhsi->list, &cfhsi_list);
|
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
return 0;
|
2012-06-28 20:36:20 +00:00
|
|
|
err:
|
|
|
|
symbol_put(cfhsi_get_ops);
|
|
|
|
return -ENODEV;
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
|
|
|
|
.kind = "cfhsi",
|
|
|
|
.priv_size = sizeof(struct cfhsi),
|
|
|
|
.setup = cfhsi_setup,
|
|
|
|
.maxtype = __IFLA_CAIF_HSI_MAX,
|
|
|
|
.policy = caif_hsi_policy,
|
|
|
|
.newlink = caif_hsi_newlink,
|
|
|
|
.changelink = caif_hsi_changelink,
|
|
|
|
.get_size = caif_hsi_get_size,
|
|
|
|
.fill_info = caif_hsi_fill_info,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void __exit cfhsi_exit_module(void)
|
2011-06-01 03:29:18 +00:00
|
|
|
{
|
2012-06-25 07:49:41 +00:00
|
|
|
struct list_head *list_node;
|
|
|
|
struct list_head *n;
|
|
|
|
struct cfhsi *cfhsi;
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
rtnl_link_unregister(&caif_hsi_link_ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
rtnl_lock();
|
|
|
|
list_for_each_safe(list_node, n, &cfhsi_list) {
|
|
|
|
cfhsi = list_entry(list_node, struct cfhsi, list);
|
|
|
|
unregister_netdev(cfhsi->ndev);
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
2012-06-25 07:49:41 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
}
|
2011-06-01 03:29:18 +00:00
|
|
|
|
2012-06-25 07:49:41 +00:00
|
|
|
static int __init cfhsi_init_module(void)
|
|
|
|
{
|
|
|
|
return rtnl_link_register(&caif_hsi_link_ops);
|
2011-06-01 03:29:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(cfhsi_init_module);
|
|
|
|
module_exit(cfhsi_exit_module);
|