forked from Minki/linux
net: mhi: Get rid of local rx queue count
Use the new mhi_get_free_desc_count helper to track queue usage instead of relying on the locally maintained rx_queued count. Signed-off-by: Loic Poulain <loic.poulain@linaro.org> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
e6ec3ccd4e
commit
6e10785ee1
@ -25,7 +25,6 @@ struct mhi_net_stats {
|
|||||||
u64_stats_t tx_bytes;
|
u64_stats_t tx_bytes;
|
||||||
u64_stats_t tx_errors;
|
u64_stats_t tx_errors;
|
||||||
u64_stats_t tx_dropped;
|
u64_stats_t tx_dropped;
|
||||||
atomic_t rx_queued;
|
|
||||||
struct u64_stats_sync tx_syncp;
|
struct u64_stats_sync tx_syncp;
|
||||||
struct u64_stats_sync rx_syncp;
|
struct u64_stats_sync rx_syncp;
|
||||||
};
|
};
|
||||||
@ -138,9 +137,9 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
|
|||||||
{
|
{
|
||||||
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
|
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
|
||||||
struct sk_buff *skb = mhi_res->buf_addr;
|
struct sk_buff *skb = mhi_res->buf_addr;
|
||||||
int remaining;
|
int free_desc_count;
|
||||||
|
|
||||||
remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
|
free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
if (unlikely(mhi_res->transaction_status)) {
|
if (unlikely(mhi_res->transaction_status)) {
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
@ -175,7 +174,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Refill if RX buffers queue becomes low */
|
/* Refill if RX buffers queue becomes low */
|
||||||
if (remaining <= mhi_netdev->rx_queue_sz / 2)
|
if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
|
||||||
schedule_delayed_work(&mhi_netdev->rx_refill, 0);
|
schedule_delayed_work(&mhi_netdev->rx_refill, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,7 +221,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
|
while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
|
||||||
skb = netdev_alloc_skb(ndev, size);
|
skb = netdev_alloc_skb(ndev, size);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
break;
|
break;
|
||||||
@ -235,8 +234,6 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_inc(&mhi_netdev->stats.rx_queued);
|
|
||||||
|
|
||||||
/* Do not hog the CPU if rx buffers are consumed faster than
|
/* Do not hog the CPU if rx buffers are consumed faster than
|
||||||
* queued (unlikely).
|
* queued (unlikely).
|
||||||
*/
|
*/
|
||||||
@ -244,7 +241,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* If we're still starved of rx buffers, reschedule later */
|
/* If we're still starved of rx buffers, reschedule later */
|
||||||
if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
|
if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
|
||||||
schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
|
schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user