mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
bnxt: use new queue try_stop/try_wake macros
Convert bnxt to use new macros rather than open code the logic. Two differences: (1) bnxt_tx_int() will now only issue a memory barrier if it sees enough space on the ring to wake the queue. This should be fine, the mb() is between the writes to the ring pointers and checking queue state. (2) we'll start the queue instead of waking on race, this should be safe inside the xmit handler. Reviewed-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
9ded5bc77f
commit
08a096780d
@ -56,6 +56,7 @@
|
|||||||
#include <linux/hwmon-sysfs.h>
|
#include <linux/hwmon-sysfs.h>
|
||||||
#include <net/page_pool.h>
|
#include <net/page_pool.h>
|
||||||
#include <linux/align.h>
|
#include <linux/align.h>
|
||||||
|
#include <net/netdev_queues.h>
|
||||||
|
|
||||||
#include "bnxt_hsi.h"
|
#include "bnxt_hsi.h"
|
||||||
#include "bnxt.h"
|
#include "bnxt.h"
|
||||||
@ -331,26 +332,6 @@ static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
|
|||||||
txr->kick_pending = 0;
|
txr->kick_pending = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
|
|
||||||
struct bnxt_tx_ring_info *txr,
|
|
||||||
struct netdev_queue *txq)
|
|
||||||
{
|
|
||||||
netif_tx_stop_queue(txq);
|
|
||||||
|
|
||||||
/* netif_tx_stop_queue() must be done before checking
|
|
||||||
* tx index in bnxt_tx_avail() below, because in
|
|
||||||
* bnxt_tx_int(), we update tx index before checking for
|
|
||||||
* netif_tx_queue_stopped().
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
|
|
||||||
netif_tx_wake_queue(txq);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct bnxt *bp = netdev_priv(dev);
|
struct bnxt *bp = netdev_priv(dev);
|
||||||
@ -384,7 +365,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
if (net_ratelimit() && txr->kick_pending)
|
if (net_ratelimit() && txr->kick_pending)
|
||||||
netif_warn(bp, tx_err, dev,
|
netif_warn(bp, tx_err, dev,
|
||||||
"bnxt: ring busy w/ flush pending!\n");
|
"bnxt: ring busy w/ flush pending!\n");
|
||||||
if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
|
if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
|
||||||
|
bp->tx_wake_thresh))
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -614,7 +596,8 @@ tx_done:
|
|||||||
if (netdev_xmit_more() && !tx_buf->is_push)
|
if (netdev_xmit_more() && !tx_buf->is_push)
|
||||||
bnxt_txr_db_kick(bp, txr, prod);
|
bnxt_txr_db_kick(bp, txr, prod);
|
||||||
|
|
||||||
bnxt_txr_netif_try_stop_queue(bp, txr, txq);
|
netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
|
||||||
|
bp->tx_wake_thresh);
|
||||||
}
|
}
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
@ -708,17 +691,8 @@ next_tx_int:
|
|||||||
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
|
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
|
||||||
txr->tx_cons = cons;
|
txr->tx_cons = cons;
|
||||||
|
|
||||||
/* Need to make the tx_cons update visible to bnxt_start_xmit()
|
__netif_txq_maybe_wake(txq, bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
|
||||||
* before checking for netif_tx_queue_stopped(). Without the
|
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING);
|
||||||
* memory barrier, there is a small possibility that bnxt_start_xmit()
|
|
||||||
* will miss it and cause the queue to be stopped forever.
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
|
|
||||||
if (unlikely(netif_tx_queue_stopped(txq)) &&
|
|
||||||
bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
|
|
||||||
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
|
|
||||||
netif_tx_wake_queue(txq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
|
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
|
||||||
|
Loading…
Reference in New Issue
Block a user