mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 22:51:35 +00:00
net: hns: Replace netif_tx_lock to ring spin lock
netif_tx_lock is a global spin lock, it will take affect in all rings in the netdevice. In tx_poll_one process, it can only lock the current ring, in this case, we define a spin lock in hnae_ring struct for it. Signed-off-by: lipeng <lipeng321@huawei.com> reviewed-by: Yisen Zhuang <yisen.zhuang@huawei.com> Signed-off-by: Salil Mehta <salil.mehta@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b29bd41259
commit
f2aaed557e
@ -196,6 +196,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
|
||||
|
||||
ring->q = q;
|
||||
ring->flags = flags;
|
||||
spin_lock_init(&ring->lock);
|
||||
assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
|
||||
|
||||
/* not matter for tx or rx ring, the ntc and ntc start from 0 */
|
||||
|
@ -275,6 +275,9 @@ struct hnae_ring {
|
||||
/* statistic */
|
||||
struct ring_stats stats;
|
||||
|
||||
/* ring lock for poll one */
|
||||
spinlock_t lock;
|
||||
|
||||
dma_addr_t desc_dma_addr;
|
||||
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
|
||||
u16 desc_num; /* total number of desc */
|
||||
|
@ -922,12 +922,13 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
|
||||
|
||||
/* netif_tx_lock will turn down the performance, set only when necessary */
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
|
||||
#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
|
||||
#define NETIF_TX_LOCK(ring) spin_lock(&ring->lock)
|
||||
#define NETIF_TX_UNLOCK(ring) spin_unlock(&ring->lock)
|
||||
#else
|
||||
#define NETIF_TX_LOCK(ndev)
|
||||
#define NETIF_TX_UNLOCK(ndev)
|
||||
#define NETIF_TX_LOCK(ring)
|
||||
#define NETIF_TX_UNLOCK(ring)
|
||||
#endif
|
||||
|
||||
/* reclaim all desc in one budget
|
||||
* return error or number of desc left
|
||||
*/
|
||||
@ -941,13 +942,13 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
|
||||
int head;
|
||||
int bytes, pkts;
|
||||
|
||||
NETIF_TX_LOCK(ndev);
|
||||
NETIF_TX_LOCK(ring);
|
||||
|
||||
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
|
||||
rmb(); /* make sure head is ready before touch any data */
|
||||
|
||||
if (is_ring_empty(ring) || head == ring->next_to_clean) {
|
||||
NETIF_TX_UNLOCK(ndev);
|
||||
NETIF_TX_UNLOCK(ring);
|
||||
return 0; /* no data to poll */
|
||||
}
|
||||
|
||||
@ -955,7 +956,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
|
||||
netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
|
||||
ring->next_to_use, ring->next_to_clean);
|
||||
ring->stats.io_err_cnt++;
|
||||
NETIF_TX_UNLOCK(ndev);
|
||||
NETIF_TX_UNLOCK(ring);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -967,7 +968,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
|
||||
prefetch(&ring->desc_cb[ring->next_to_clean]);
|
||||
}
|
||||
|
||||
NETIF_TX_UNLOCK(ndev);
|
||||
NETIF_TX_UNLOCK(ring);
|
||||
|
||||
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
|
||||
netdev_tx_completed_queue(dev_queue, pkts, bytes);
|
||||
@ -1028,7 +1029,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
|
||||
int head;
|
||||
int bytes, pkts;
|
||||
|
||||
NETIF_TX_LOCK(ndev);
|
||||
NETIF_TX_LOCK(ring);
|
||||
|
||||
head = ring->next_to_use; /* ntu :soft setted ring position*/
|
||||
bytes = 0;
|
||||
@ -1036,7 +1037,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
|
||||
while (head != ring->next_to_clean)
|
||||
hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
|
||||
|
||||
NETIF_TX_UNLOCK(ndev);
|
||||
NETIF_TX_UNLOCK(ring);
|
||||
|
||||
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
|
||||
netdev_tx_reset_queue(dev_queue);
|
||||
|
Loading…
Reference in New Issue
Block a user