2019-05-27 06:55:21 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
2014-10-26 08:37:05 +00:00
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Alexander Aring <aar@pengutronix.de>
|
|
|
|
*
|
|
|
|
* Based on: net/mac80211/util.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ieee802154_i.h"
|
2015-06-24 09:36:34 +00:00
|
|
|
#include "driver-ops.h"
|
2014-10-26 08:37:05 +00:00
|
|
|
|
2014-11-12 02:36:51 +00:00
|
|
|
/* privid for wpan_phys to determine whether they belong to us or not */
|
|
|
|
const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
|
|
|
|
|
2022-05-19 15:05:10 +00:00
|
|
|
/**
|
|
|
|
* ieee802154_wake_queue - wake ieee802154 queue
|
2022-10-26 07:40:34 +00:00
|
|
|
* @hw: main hardware object
|
2022-05-19 15:05:10 +00:00
|
|
|
*
|
|
|
|
* Tranceivers usually have either one transmit framebuffer or one framebuffer
|
|
|
|
* for both transmitting and receiving. Hence, the core currently only handles
|
|
|
|
* one frame at a time for each phy, which means we had to stop the queue to
|
|
|
|
* avoid new skb to come during the transmission. The queue then needs to be
|
|
|
|
* woken up after the operation.
|
|
|
|
*/
|
|
|
|
static void ieee802154_wake_queue(struct ieee802154_hw *hw)
|
2014-10-26 08:37:05 +00:00
|
|
|
{
|
|
|
|
struct ieee802154_local *local = hw_to_local(hw);
|
|
|
|
struct ieee802154_sub_if_data *sdata;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2022-05-19 15:05:15 +00:00
|
|
|
clear_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
|
2014-10-26 08:37:05 +00:00
|
|
|
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
|
|
|
|
if (!sdata->dev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
netif_wake_queue(sdata->dev);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2022-05-19 15:05:10 +00:00
|
|
|
/**
|
|
|
|
* ieee802154_stop_queue - stop ieee802154 queue
|
2022-10-26 07:40:34 +00:00
|
|
|
* @hw: main hardware object
|
2022-05-19 15:05:10 +00:00
|
|
|
*
|
|
|
|
* Tranceivers usually have either one transmit framebuffer or one framebuffer
|
|
|
|
* for both transmitting and receiving. Hence, the core currently only handles
|
|
|
|
* one frame at a time for each phy, which means we need to tell upper layers to
|
|
|
|
* stop giving us new skbs while we are busy with the transmitted one. The queue
|
|
|
|
* must then be stopped before transmitting.
|
|
|
|
*/
|
|
|
|
static void ieee802154_stop_queue(struct ieee802154_hw *hw)
|
2014-10-26 08:37:05 +00:00
|
|
|
{
|
|
|
|
struct ieee802154_local *local = hw_to_local(hw);
|
|
|
|
struct ieee802154_sub_if_data *sdata;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
|
|
|
|
if (!sdata->dev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
netif_stop_queue(sdata->dev);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2022-05-19 15:05:10 +00:00
|
|
|
|
|
|
|
void ieee802154_hold_queue(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&local->phy->queue_lock, flags);
|
|
|
|
if (!atomic_fetch_inc(&local->phy->hold_txs))
|
|
|
|
ieee802154_stop_queue(&local->hw);
|
|
|
|
spin_unlock_irqrestore(&local->phy->queue_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee802154_release_queue(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&local->phy->queue_lock, flags);
|
2022-06-13 04:37:34 +00:00
|
|
|
if (atomic_dec_and_test(&local->phy->hold_txs))
|
2022-05-19 15:05:10 +00:00
|
|
|
ieee802154_wake_queue(&local->hw);
|
|
|
|
spin_unlock_irqrestore(&local->phy->queue_lock, flags);
|
|
|
|
}
|
2014-10-26 08:37:05 +00:00
|
|
|
|
2022-05-19 15:05:12 +00:00
|
|
|
void ieee802154_disable_queue(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
struct ieee802154_sub_if_data *sdata;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
|
|
|
|
if (!sdata->dev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
netif_tx_disable(sdata->dev);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2014-11-12 18:51:56 +00:00
|
|
|
enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
|
2014-10-26 08:37:05 +00:00
|
|
|
{
|
2014-11-12 18:51:56 +00:00
|
|
|
struct ieee802154_local *local =
|
|
|
|
container_of(timer, struct ieee802154_local, ifs_timer);
|
|
|
|
|
2022-05-19 15:05:10 +00:00
|
|
|
ieee802154_release_queue(local);
|
2014-11-12 18:51:56 +00:00
|
|
|
|
|
|
|
return HRTIMER_NORESTART;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
|
|
|
|
bool ifs_handling)
|
|
|
|
{
|
2022-04-07 10:08:56 +00:00
|
|
|
struct ieee802154_local *local = hw_to_local(hw);
|
|
|
|
|
|
|
|
local->tx_result = IEEE802154_SUCCESS;
|
|
|
|
|
2014-11-12 18:51:56 +00:00
|
|
|
if (ifs_handling) {
|
2015-03-04 20:19:59 +00:00
|
|
|
u8 max_sifs_size;
|
2014-11-12 18:51:56 +00:00
|
|
|
|
2015-03-04 20:19:59 +00:00
|
|
|
/* If transceiver sets CRC on his own we need to use lifs
|
|
|
|
* threshold len above 16 otherwise 18, because it's not
|
|
|
|
* part of skb->len.
|
|
|
|
*/
|
|
|
|
if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
|
|
|
|
max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
|
|
|
|
IEEE802154_FCS_LEN;
|
|
|
|
else
|
|
|
|
max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;
|
|
|
|
|
|
|
|
if (skb->len > max_sifs_size)
|
2014-11-12 18:51:56 +00:00
|
|
|
hrtimer_start(&local->ifs_timer,
|
2016-12-25 11:30:41 +00:00
|
|
|
hw->phy->lifs_period * NSEC_PER_USEC,
|
2014-11-12 18:51:56 +00:00
|
|
|
HRTIMER_MODE_REL);
|
|
|
|
else
|
|
|
|
hrtimer_start(&local->ifs_timer,
|
2016-12-25 11:30:41 +00:00
|
|
|
hw->phy->sifs_period * NSEC_PER_USEC,
|
2014-11-12 18:51:56 +00:00
|
|
|
HRTIMER_MODE_REL);
|
|
|
|
} else {
|
2022-05-19 15:05:10 +00:00
|
|
|
ieee802154_release_queue(local);
|
2014-11-12 18:51:56 +00:00
|
|
|
}
|
2015-05-17 19:44:56 +00:00
|
|
|
|
|
|
|
dev_consume_skb_any(skb);
|
2022-06-13 04:37:35 +00:00
|
|
|
if (atomic_dec_and_test(&hw->phy->ongoing_txs))
|
2022-05-19 15:05:13 +00:00
|
|
|
wake_up(&hw->phy->sync_txq);
|
2014-10-26 08:37:05 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee802154_xmit_complete);
|
2015-06-24 09:36:34 +00:00
|
|
|
|
2022-04-07 10:08:57 +00:00
|
|
|
void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
|
|
|
|
int reason)
|
|
|
|
{
|
|
|
|
struct ieee802154_local *local = hw_to_local(hw);
|
|
|
|
|
|
|
|
local->tx_result = reason;
|
2022-05-19 15:05:10 +00:00
|
|
|
ieee802154_release_queue(local);
|
2022-04-07 10:08:57 +00:00
|
|
|
dev_kfree_skb_any(skb);
|
2022-06-13 04:37:35 +00:00
|
|
|
if (atomic_dec_and_test(&hw->phy->ongoing_txs))
|
2022-05-19 15:05:13 +00:00
|
|
|
wake_up(&hw->phy->sync_txq);
|
2022-04-07 10:08:57 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee802154_xmit_error);
|
|
|
|
|
2022-04-07 10:08:58 +00:00
|
|
|
void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
ieee802154_xmit_error(hw, skb, IEEE802154_SYSTEM_ERROR);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee802154_xmit_hw_error);
|
|
|
|
|
2015-06-24 09:36:34 +00:00
|
|
|
void ieee802154_stop_device(struct ieee802154_local *local)
|
|
|
|
{
|
|
|
|
flush_workqueue(local->workqueue);
|
|
|
|
hrtimer_cancel(&local->ifs_timer);
|
|
|
|
drv_stop(local);
|
|
|
|
}
|