r8169: remove nr_frags argument from rtl_tx_slots_avail
The only time when nr_frags isn't SKB_MAX_FRAGS is when entering rtl8169_start_xmit(). However we can use SKB_MAX_FRAGS also here because when queue isn't stopped there should always be room for MAX_SKB_FRAGS + 1 descriptors. Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com> Link: https://lore.kernel.org/r/3d1f2ad7-31d5-2cac-4f4a-394f8a3cab63@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
b618c32702
commit
83c317d7b3
@@ -4141,14 +4141,13 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
|
static bool rtl_tx_slots_avail(struct rtl8169_private *tp)
|
||||||
unsigned int nr_frags)
|
|
||||||
{
|
{
|
||||||
unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC
|
unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC
|
||||||
- READ_ONCE(tp->cur_tx);
|
- READ_ONCE(tp->cur_tx);
|
||||||
|
|
||||||
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
|
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
|
||||||
return slots_avail > nr_frags;
|
return slots_avail > MAX_SKB_FRAGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
|
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
|
||||||
@@ -4183,7 +4182,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
|
|
||||||
txd_first = tp->TxDescArray + entry;
|
txd_first = tp->TxDescArray + entry;
|
||||||
|
|
||||||
if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
|
if (unlikely(!rtl_tx_slots_avail(tp))) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
|
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
|
||||||
goto err_stop_0;
|
goto err_stop_0;
|
||||||
@@ -4228,7 +4227,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
|
|
||||||
WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
|
WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
|
||||||
|
|
||||||
stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
|
stop_queue = !rtl_tx_slots_avail(tp);
|
||||||
if (unlikely(stop_queue)) {
|
if (unlikely(stop_queue)) {
|
||||||
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
||||||
* not miss a ring update when it notices a stopped queue.
|
* not miss a ring update when it notices a stopped queue.
|
||||||
@@ -4243,7 +4242,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
* can't.
|
* can't.
|
||||||
*/
|
*/
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
|
if (rtl_tx_slots_avail(tp))
|
||||||
netif_start_queue(dev);
|
netif_start_queue(dev);
|
||||||
door_bell = true;
|
door_bell = true;
|
||||||
}
|
}
|
||||||
@@ -4394,10 +4393,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
|
|||||||
* ring status.
|
* ring status.
|
||||||
*/
|
*/
|
||||||
smp_store_mb(tp->dirty_tx, dirty_tx);
|
smp_store_mb(tp->dirty_tx, dirty_tx);
|
||||||
if (netif_queue_stopped(dev) &&
|
if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp))
|
||||||
rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
|
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* 8168 hack: TxPoll requests are lost when the Tx packets are
|
* 8168 hack: TxPoll requests are lost when the Tx packets are
|
||||||
* too close. Let's kick an extra TxPoll request when a burst
|
* too close. Let's kick an extra TxPoll request when a burst
|
||||||
|
|||||||
Reference in New Issue
Block a user