2019-05-27 06:55:21 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-05-15 20:50:22 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2007-2012 Siemens AG
|
|
|
|
*
|
|
|
|
* Written by:
|
|
|
|
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
|
|
|
|
* Sergey Lapin <slapin@ossfans.org>
|
|
|
|
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
|
|
|
|
* Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/crc-ccitt.h>
|
2014-10-27 16:13:28 +00:00
|
|
|
#include <asm/unaligned.h>
|
2012-05-15 20:50:22 +00:00
|
|
|
|
2014-10-26 08:37:09 +00:00
|
|
|
#include <net/rtnetlink.h>
|
2013-04-03 04:00:56 +00:00
|
|
|
#include <net/ieee802154_netdev.h>
|
2012-05-15 20:50:22 +00:00
|
|
|
#include <net/mac802154.h>
|
2014-10-25 07:41:02 +00:00
|
|
|
#include <net/cfg802154.h>
|
2012-05-15 20:50:22 +00:00
|
|
|
|
2014-10-25 07:41:00 +00:00
|
|
|
#include "ieee802154_i.h"
|
2014-10-28 17:21:21 +00:00
|
|
|
#include "driver-ops.h"
|
2012-05-15 20:50:22 +00:00
|
|
|
|
2022-05-19 15:05:06 +00:00
|
|
|
void ieee802154_xmit_sync_worker(struct work_struct *work)
|
2012-05-15 20:50:22 +00:00
|
|
|
{
|
2015-07-21 14:44:47 +00:00
|
|
|
struct ieee802154_local *local =
|
2022-05-19 15:05:07 +00:00
|
|
|
container_of(work, struct ieee802154_local, sync_tx_work);
|
2015-07-21 14:44:47 +00:00
|
|
|
struct sk_buff *skb = local->tx_skb;
|
2014-10-26 08:37:12 +00:00
|
|
|
struct net_device *dev = skb->dev;
|
2012-05-15 20:50:22 +00:00
|
|
|
int res;
|
|
|
|
|
2014-10-28 17:21:21 +00:00
|
|
|
res = drv_xmit_sync(local, skb);
|
2014-10-26 08:37:09 +00:00
|
|
|
if (res)
|
|
|
|
goto err_tx;
|
|
|
|
|
2014-10-26 08:37:12 +00:00
|
|
|
dev->stats.tx_packets++;
|
|
|
|
dev->stats.tx_bytes += skb->len;
|
|
|
|
|
2020-09-08 10:40:25 +00:00
|
|
|
ieee802154_xmit_complete(&local->hw, skb, false);
|
|
|
|
|
2014-10-26 08:37:09 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
err_tx:
|
|
|
|
/* Restart the netif queue on each sub_if_data object. */
|
2022-05-19 15:05:10 +00:00
|
|
|
ieee802154_release_queue(local);
|
2022-06-13 04:37:35 +00:00
|
|
|
if (atomic_dec_and_test(&local->phy->ongoing_txs))
|
2022-05-19 15:05:13 +00:00
|
|
|
wake_up(&local->phy->sync_txq);
|
2014-10-26 08:37:09 +00:00
|
|
|
kfree_skb(skb);
|
2014-10-26 08:37:12 +00:00
|
|
|
netdev_dbg(dev, "transmission failed\n");
|
2012-05-15 20:50:22 +00:00
|
|
|
}
|
|
|
|
|
2014-10-26 08:37:04 +00:00
|
|
|
static netdev_tx_t
|
2014-10-26 08:37:13 +00:00
|
|
|
ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
|
2012-05-15 20:50:22 +00:00
|
|
|
{
|
2014-10-26 08:37:12 +00:00
|
|
|
struct net_device *dev = skb->dev;
|
2014-10-26 08:37:08 +00:00
|
|
|
int ret;
|
2012-05-15 20:50:22 +00:00
|
|
|
|
2014-10-29 20:34:34 +00:00
|
|
|
if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
|
2018-07-02 20:32:03 +00:00
|
|
|
struct sk_buff *nskb;
|
|
|
|
u16 crc;
|
2014-07-02 03:31:09 +00:00
|
|
|
|
2018-07-02 20:32:03 +00:00
|
|
|
if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
|
|
|
|
nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (likely(nskb)) {
|
|
|
|
consume_skb(skb);
|
|
|
|
skb = nskb;
|
|
|
|
} else {
|
2022-05-19 15:05:08 +00:00
|
|
|
goto err_free_skb;
|
2018-07-02 20:32:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
crc = crc_ccitt(0, skb->data, skb->len);
|
2014-10-27 16:13:28 +00:00
|
|
|
put_unaligned_le16(crc, skb_put(skb, 2));
|
2012-05-15 20:50:22 +00:00
|
|
|
}
|
|
|
|
|
2013-04-03 04:00:56 +00:00
|
|
|
/* Stop the netif queue on each sub_if_data object. */
|
2022-05-19 15:05:10 +00:00
|
|
|
ieee802154_hold_queue(local);
|
2022-05-19 15:05:09 +00:00
|
|
|
atomic_inc(&local->phy->ongoing_txs);
|
2013-04-03 04:00:56 +00:00
|
|
|
|
2022-05-19 15:05:07 +00:00
|
|
|
/* Drivers should preferably implement the async callback. In some rare
|
|
|
|
* cases they only provide a sync callback which we will use as a
|
|
|
|
* fallback.
|
|
|
|
*/
|
2014-10-26 08:37:08 +00:00
|
|
|
if (local->ops->xmit_async) {
|
2020-09-08 10:40:25 +00:00
|
|
|
unsigned int len = skb->len;
|
|
|
|
|
2014-10-28 17:21:21 +00:00
|
|
|
ret = drv_xmit_async(local, skb);
|
2022-05-19 15:05:08 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_wake_netif_queue;
|
2014-10-26 08:37:12 +00:00
|
|
|
|
|
|
|
dev->stats.tx_packets++;
|
2020-09-08 10:40:25 +00:00
|
|
|
dev->stats.tx_bytes += len;
|
2014-10-26 08:37:08 +00:00
|
|
|
} else {
|
2015-07-21 14:44:47 +00:00
|
|
|
local->tx_skb = skb;
|
2022-05-19 15:05:07 +00:00
|
|
|
queue_work(local->workqueue, &local->sync_tx_work);
|
2014-10-26 08:37:08 +00:00
|
|
|
}
|
2012-05-15 20:50:22 +00:00
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
2014-08-11 11:25:10 +00:00
|
|
|
|
2022-05-19 15:05:08 +00:00
|
|
|
err_wake_netif_queue:
|
2022-05-19 15:05:10 +00:00
|
|
|
ieee802154_release_queue(local);
|
2022-06-13 04:37:35 +00:00
|
|
|
if (atomic_dec_and_test(&local->phy->ongoing_txs))
|
2022-05-19 15:05:13 +00:00
|
|
|
wake_up(&local->phy->sync_txq);
|
2022-05-19 15:05:08 +00:00
|
|
|
err_free_skb:
|
2014-08-11 11:25:10 +00:00
|
|
|
kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
2012-05-15 20:50:22 +00:00
|
|
|
}
|
2014-10-26 08:37:01 +00:00
|
|
|
|
2022-05-19 15:05:13 +00:00
|
|
|
static int ieee802154_sync_queue(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ieee802154_hold_queue(local);
|
|
|
|
ieee802154_disable_queue(local);
|
|
|
|
wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
|
|
|
|
ret = local->tx_result;
|
|
|
|
ieee802154_release_queue(local);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
|
|
|
|
{
|
2022-05-19 15:05:15 +00:00
|
|
|
int ret;
|
|
|
|
|
2022-05-19 15:05:13 +00:00
|
|
|
ieee802154_hold_queue(local);
|
2022-05-19 15:05:15 +00:00
|
|
|
ret = ieee802154_sync_queue(local);
|
|
|
|
set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
|
2022-05-19 15:05:13 +00:00
|
|
|
|
2022-05-19 15:05:15 +00:00
|
|
|
return ret;
|
2022-05-19 15:05:13 +00:00
|
|
|
}
|
|
|
|
|
2022-05-19 15:05:16 +00:00
|
|
|
static bool ieee802154_netif_is_down(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
struct ieee802154_sub_if_data *sdata;
|
|
|
|
bool is_down = true;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
|
|
|
|
if (!sdata->dev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
is_down = !netif_running(sdata->dev);
|
|
|
|
if (is_down)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return is_down;
|
|
|
|
}
|
|
|
|
|
2022-05-19 15:05:14 +00:00
|
|
|
int ieee802154_mlme_op_pre(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
return ieee802154_sync_and_hold_queue(local);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ieee802154_mlme_tx(struct ieee802154_local *local, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Avoid possible calls to ->ndo_stop() when we asynchronously perform
|
|
|
|
* MLME transmissions.
|
|
|
|
*/
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
/* Ensure the device was not stopped, otherwise error out */
|
|
|
|
if (!local->open_count) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -ENETDOWN;
|
|
|
|
}
|
|
|
|
|
2022-05-19 15:05:16 +00:00
|
|
|
/* Warn if the ieee802154 core thinks MLME frames can be sent while the
|
|
|
|
* net interface expects this cannot happen.
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(ieee802154_netif_is_down(local))) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -ENETDOWN;
|
|
|
|
}
|
|
|
|
|
2022-05-19 15:05:14 +00:00
|
|
|
ieee802154_tx(local, skb);
|
|
|
|
ret = ieee802154_sync_queue(local);
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee802154_mlme_op_post(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
ieee802154_release_queue(local);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ieee802154_mlme_tx_one(struct ieee802154_local *local, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ieee802154_mlme_op_pre(local);
|
|
|
|
ret = ieee802154_mlme_tx(local, skb);
|
|
|
|
ieee802154_mlme_op_post(local);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-19 15:05:15 +00:00
|
|
|
static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
|
|
|
|
}
|
|
|
|
|
2022-05-19 15:05:11 +00:00
|
|
|
static netdev_tx_t
|
|
|
|
ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
|
|
|
|
{
|
2022-05-19 15:05:15 +00:00
|
|
|
/* Warn if the net interface tries to transmit frames while the
|
|
|
|
* ieee802154 core assumes the queue is stopped.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
|
|
|
|
|
2022-05-19 15:05:11 +00:00
|
|
|
return ieee802154_tx(local, skb);
|
|
|
|
}
|
|
|
|
|
2014-10-26 08:37:13 +00:00
|
|
|
netdev_tx_t
|
|
|
|
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
2014-10-26 08:37:01 +00:00
|
|
|
{
|
|
|
|
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
|
|
|
|
|
|
|
|
skb->skb_iif = dev->ifindex;
|
|
|
|
|
2022-05-19 15:05:11 +00:00
|
|
|
return ieee802154_hot_tx(sdata->local, skb);
|
2014-10-26 08:37:01 +00:00
|
|
|
}
|
|
|
|
|
2014-10-26 08:37:13 +00:00
|
|
|
netdev_tx_t
|
|
|
|
ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
2014-10-26 08:37:01 +00:00
|
|
|
{
|
|
|
|
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
|
|
|
|
int rc;
|
|
|
|
|
2015-09-28 07:00:26 +00:00
|
|
|
/* TODO we should move it to wpan_dev_hard_header and dev_hard_header
|
|
|
|
* functions. The reason is wireshark will show a mac header which is
|
|
|
|
* with security fields but the payload is not encrypted.
|
|
|
|
*/
|
2014-10-26 08:37:01 +00:00
|
|
|
rc = mac802154_llsec_encrypt(&sdata->sec, skb);
|
|
|
|
if (rc) {
|
2014-10-26 08:37:10 +00:00
|
|
|
netdev_warn(dev, "encryption failed: %i\n", rc);
|
2014-10-26 08:37:01 +00:00
|
|
|
kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->skb_iif = dev->ifindex;
|
|
|
|
|
2022-05-19 15:05:11 +00:00
|
|
|
return ieee802154_hot_tx(sdata->local, skb);
|
2014-10-26 08:37:01 +00:00
|
|
|
}
|