Updated to support ieee80211 callback to is_queue_full for 802.11e

support.

Signed-off-by: James Ketrenos <jketreno@linux.intel.com>
This commit is contained in:
James Ketrenos 2005-07-28 16:25:55 -05:00
parent d2021cb4e2
commit 227d2dc1f1

View File

@ -9654,7 +9654,7 @@ modify to send one tfd per fragment instead of using chunking. otherwise
we need to heavily modify the ieee80211_skb_to_txb. we need to heavily modify the ieee80211_skb_to_txb.
*/ */
static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, static inline int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
int pri) int pri)
{ {
struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *) struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
@ -9672,6 +9672,11 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
u16 remaining_bytes; u16 remaining_bytes;
int fc; int fc;
/* If there isn't room in the queue, we return busy and let the
* network stack requeue the packet for us */
if (ipw_queue_space(q) < q->high_mark)
return NETDEV_TX_BUSY;
switch (priv->ieee->iw_mode) { switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC: case IW_MODE_ADHOC:
hdr_len = IEEE80211_3ADDR_LEN; hdr_len = IEEE80211_3ADDR_LEN;
@ -9837,14 +9842,28 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
ipw_write32(priv, q->reg_w, q->first_empty); ipw_write32(priv, q->reg_w, q->first_empty);
if (ipw_queue_space(q) < q->high_mark) return NETDEV_TX_OK;
netif_stop_queue(priv->net_dev);
return;
drop: drop:
IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
ieee80211_txb_free(txb); ieee80211_txb_free(txb);
return NETDEV_TX_OK;
}
static int ipw_net_is_queue_full(struct net_device *dev, int pri)
{
struct ipw_priv *priv = ieee80211_priv(dev);
#ifdef CONFIG_IPW_QOS
int tx_id = ipw_get_tx_queue_number(priv, pri);
struct clx2_tx_queue *txq = &priv->txq[tx_id];
#else
struct clx2_tx_queue *txq = &priv->txq[0];
#endif /* CONFIG_IPW_QOS */
if (ipw_queue_space(&txq->q) < txq->q.high_mark)
return 1;
return 0;
} }
static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
@ -9852,6 +9871,7 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
{ {
struct ipw_priv *priv = ieee80211_priv(dev); struct ipw_priv *priv = ieee80211_priv(dev);
unsigned long flags; unsigned long flags;
int ret;
IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
@ -9863,11 +9883,12 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
goto fail_unlock; goto fail_unlock;
} }
ipw_tx_skb(priv, txb, pri); ret = ipw_tx_skb(priv, txb, pri);
if (ret == NETDEV_TX_OK)
__ipw_led_activity_on(priv); __ipw_led_activity_on(priv);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
return 0; return ret;
fail_unlock: fail_unlock:
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
@ -10706,6 +10727,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
priv->ieee->set_security = shim__set_security; priv->ieee->set_security = shim__set_security;
priv->ieee->is_queue_full = ipw_net_is_queue_full;
#ifdef CONFIG_IPW_QOS #ifdef CONFIG_IPW_QOS
priv->ieee->handle_management_frame = ipw_handle_management_frame; priv->ieee->handle_management_frame = ipw_handle_management_frame;