mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 08:01:59 +00:00
625788b584
Before adding yet another possibly contended atomic_long_t, it is time to add per-cpu storage for existing ones: dev->tx_dropped, dev->rx_dropped, and dev->rx_nohandler Because many devices do not have to increment such counters, allocate the per-cpu storage on demand, so that dev_get_stats() does not have to spend considerable time folding zero counters. Note that some drivers have abused these counters which were supposed to be only used by core networking stack. v4: should use per_cpu_ptr() in dev_get_stats() (Jakub) v3: added a READ_ONCE() in netdev_core_stats_alloc() (Paolo) v2: add a missing include (reported by kernel test robot <lkp@intel.com>) Change in netdev_core_stats_alloc() (Jakub) Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: jeffreyji <jeffreyji@google.com> Reviewed-by: Brian Vazquez <brianvv@google.com> Reviewed-by: Jakub Kicinski <kuba@kernel.org> Acked-by: Paolo Abeni <pabeni@redhat.com> Link: https://lore.kernel.org/r/20220311051420.2608812-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
140 lines
3.1 KiB
C
140 lines
3.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/skbuff.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/netdevice.h>
|
|
#include <net/gro_cells.h>
|
|
|
|
struct gro_cell {
|
|
struct sk_buff_head napi_skbs;
|
|
struct napi_struct napi;
|
|
};
|
|
|
|
int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
|
|
{
|
|
struct net_device *dev = skb->dev;
|
|
struct gro_cell *cell;
|
|
int res;
|
|
|
|
rcu_read_lock();
|
|
if (unlikely(!(dev->flags & IFF_UP)))
|
|
goto drop;
|
|
|
|
if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
|
|
res = netif_rx(skb);
|
|
goto unlock;
|
|
}
|
|
|
|
cell = this_cpu_ptr(gcells->cells);
|
|
|
|
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
|
|
drop:
|
|
dev_core_stats_rx_dropped_inc(dev);
|
|
kfree_skb(skb);
|
|
res = NET_RX_DROP;
|
|
goto unlock;
|
|
}
|
|
|
|
__skb_queue_tail(&cell->napi_skbs, skb);
|
|
if (skb_queue_len(&cell->napi_skbs) == 1)
|
|
napi_schedule(&cell->napi);
|
|
|
|
res = NET_RX_SUCCESS;
|
|
|
|
unlock:
|
|
rcu_read_unlock();
|
|
return res;
|
|
}
|
|
EXPORT_SYMBOL(gro_cells_receive);
|
|
|
|
/* called under BH context */
|
|
static int gro_cell_poll(struct napi_struct *napi, int budget)
|
|
{
|
|
struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
|
|
struct sk_buff *skb;
|
|
int work_done = 0;
|
|
|
|
while (work_done < budget) {
|
|
skb = __skb_dequeue(&cell->napi_skbs);
|
|
if (!skb)
|
|
break;
|
|
napi_gro_receive(napi, skb);
|
|
work_done++;
|
|
}
|
|
|
|
if (work_done < budget)
|
|
napi_complete_done(napi, work_done);
|
|
return work_done;
|
|
}
|
|
|
|
int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
|
|
{
|
|
int i;
|
|
|
|
gcells->cells = alloc_percpu(struct gro_cell);
|
|
if (!gcells->cells)
|
|
return -ENOMEM;
|
|
|
|
for_each_possible_cpu(i) {
|
|
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
|
|
|
|
__skb_queue_head_init(&cell->napi_skbs);
|
|
|
|
set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
|
|
|
|
netif_napi_add(dev, &cell->napi, gro_cell_poll,
|
|
NAPI_POLL_WEIGHT);
|
|
napi_enable(&cell->napi);
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(gro_cells_init);
|
|
|
|
struct percpu_free_defer {
|
|
struct rcu_head rcu;
|
|
void __percpu *ptr;
|
|
};
|
|
|
|
static void percpu_free_defer_callback(struct rcu_head *head)
|
|
{
|
|
struct percpu_free_defer *defer;
|
|
|
|
defer = container_of(head, struct percpu_free_defer, rcu);
|
|
free_percpu(defer->ptr);
|
|
kfree(defer);
|
|
}
|
|
|
|
void gro_cells_destroy(struct gro_cells *gcells)
|
|
{
|
|
struct percpu_free_defer *defer;
|
|
int i;
|
|
|
|
if (!gcells->cells)
|
|
return;
|
|
for_each_possible_cpu(i) {
|
|
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
|
|
|
|
napi_disable(&cell->napi);
|
|
__netif_napi_del(&cell->napi);
|
|
__skb_queue_purge(&cell->napi_skbs);
|
|
}
|
|
/* We need to observe an rcu grace period before freeing ->cells,
|
|
* because netpoll could access dev->napi_list under rcu protection.
|
|
* Try hard using call_rcu() instead of synchronize_rcu(),
|
|
* because we might be called from cleanup_net(), and we
|
|
* definitely do not want to block this critical task.
|
|
*/
|
|
defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN);
|
|
if (likely(defer)) {
|
|
defer->ptr = gcells->cells;
|
|
call_rcu(&defer->rcu, percpu_free_defer_callback);
|
|
} else {
|
|
/* We do not hold RTNL at this point, synchronize_net()
|
|
* would not be able to expedite this sync.
|
|
*/
|
|
synchronize_rcu_expedited();
|
|
free_percpu(gcells->cells);
|
|
}
|
|
gcells->cells = NULL;
|
|
}
|
|
EXPORT_SYMBOL(gro_cells_destroy);
|