mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
2543a6000e
After cited commit, gro_cells_destroy() became damn slow
on hosts with a lot of cores.
This is because we have one additional synchronize_net() per cpu as
stated in the changelog.
gro_cells_init() is setting NAPI_STATE_NO_BUSY_POLL, and this was enough
to not have one synchronize_net() call per netif_napi_del()
We can factorize all the synchronize_net() to a single one,
right before freeing per-cpu memory.
Fixes: 5198d545db
("net: remove napi_hash_del() from driver-facing API")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20201124203822.1360107-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
114 lines
2.3 KiB
C
114 lines
2.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/skbuff.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/netdevice.h>
|
|
#include <net/gro_cells.h>
|
|
|
|
struct gro_cell {
|
|
struct sk_buff_head napi_skbs;
|
|
struct napi_struct napi;
|
|
};
|
|
|
|
int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
|
|
{
|
|
struct net_device *dev = skb->dev;
|
|
struct gro_cell *cell;
|
|
int res;
|
|
|
|
rcu_read_lock();
|
|
if (unlikely(!(dev->flags & IFF_UP)))
|
|
goto drop;
|
|
|
|
if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
|
|
res = netif_rx(skb);
|
|
goto unlock;
|
|
}
|
|
|
|
cell = this_cpu_ptr(gcells->cells);
|
|
|
|
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
|
|
drop:
|
|
atomic_long_inc(&dev->rx_dropped);
|
|
kfree_skb(skb);
|
|
res = NET_RX_DROP;
|
|
goto unlock;
|
|
}
|
|
|
|
__skb_queue_tail(&cell->napi_skbs, skb);
|
|
if (skb_queue_len(&cell->napi_skbs) == 1)
|
|
napi_schedule(&cell->napi);
|
|
|
|
res = NET_RX_SUCCESS;
|
|
|
|
unlock:
|
|
rcu_read_unlock();
|
|
return res;
|
|
}
|
|
EXPORT_SYMBOL(gro_cells_receive);
|
|
|
|
/* called under BH context */
|
|
static int gro_cell_poll(struct napi_struct *napi, int budget)
|
|
{
|
|
struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
|
|
struct sk_buff *skb;
|
|
int work_done = 0;
|
|
|
|
while (work_done < budget) {
|
|
skb = __skb_dequeue(&cell->napi_skbs);
|
|
if (!skb)
|
|
break;
|
|
napi_gro_receive(napi, skb);
|
|
work_done++;
|
|
}
|
|
|
|
if (work_done < budget)
|
|
napi_complete_done(napi, work_done);
|
|
return work_done;
|
|
}
|
|
|
|
int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
|
|
{
|
|
int i;
|
|
|
|
gcells->cells = alloc_percpu(struct gro_cell);
|
|
if (!gcells->cells)
|
|
return -ENOMEM;
|
|
|
|
for_each_possible_cpu(i) {
|
|
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
|
|
|
|
__skb_queue_head_init(&cell->napi_skbs);
|
|
|
|
set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
|
|
|
|
netif_napi_add(dev, &cell->napi, gro_cell_poll,
|
|
NAPI_POLL_WEIGHT);
|
|
napi_enable(&cell->napi);
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(gro_cells_init);
|
|
|
|
void gro_cells_destroy(struct gro_cells *gcells)
|
|
{
|
|
int i;
|
|
|
|
if (!gcells->cells)
|
|
return;
|
|
for_each_possible_cpu(i) {
|
|
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
|
|
|
|
napi_disable(&cell->napi);
|
|
__netif_napi_del(&cell->napi);
|
|
__skb_queue_purge(&cell->napi_skbs);
|
|
}
|
|
/* This barrier is needed because netpoll could access dev->napi_list
|
|
* under rcu protection.
|
|
*/
|
|
synchronize_net();
|
|
|
|
free_percpu(gcells->cells);
|
|
gcells->cells = NULL;
|
|
}
|
|
EXPORT_SYMBOL(gro_cells_destroy);
|