net: Use static_key for XPS maps
Use static_key for XPS maps to reduce the cost of extra map checks, similar to how it is used for RPS and RFS. This includes static_key 'xps_needed' for XPS and another for 'xps_rxqs_needed' for XPS using Rx queues map. Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
80d19669ec
commit
04157469b7
@ -2081,6 +2081,10 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
|
|||||||
EXPORT_SYMBOL(netdev_txq_to_tc);
|
EXPORT_SYMBOL(netdev_txq_to_tc);
|
||||||
|
|
||||||
#ifdef CONFIG_XPS
|
#ifdef CONFIG_XPS
|
||||||
|
struct static_key xps_needed __read_mostly;
|
||||||
|
EXPORT_SYMBOL(xps_needed);
|
||||||
|
struct static_key xps_rxqs_needed __read_mostly;
|
||||||
|
EXPORT_SYMBOL(xps_rxqs_needed);
|
||||||
static DEFINE_MUTEX(xps_map_mutex);
|
static DEFINE_MUTEX(xps_map_mutex);
|
||||||
#define xmap_dereference(P) \
|
#define xmap_dereference(P) \
|
||||||
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
|
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
|
||||||
@ -2168,14 +2172,18 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
|
|||||||
struct xps_dev_maps *dev_maps;
|
struct xps_dev_maps *dev_maps;
|
||||||
unsigned int nr_ids;
|
unsigned int nr_ids;
|
||||||
|
|
||||||
|
if (!static_key_false(&xps_needed))
|
||||||
|
return;
|
||||||
|
|
||||||
mutex_lock(&xps_map_mutex);
|
mutex_lock(&xps_map_mutex);
|
||||||
|
|
||||||
dev_maps = xmap_dereference(dev->xps_rxqs_map);
|
if (static_key_false(&xps_rxqs_needed)) {
|
||||||
if (dev_maps) {
|
dev_maps = xmap_dereference(dev->xps_rxqs_map);
|
||||||
nr_ids = dev->num_rx_queues;
|
if (dev_maps) {
|
||||||
clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset,
|
nr_ids = dev->num_rx_queues;
|
||||||
count, true);
|
clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
|
||||||
|
offset, count, true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_maps = xmap_dereference(dev->xps_cpus_map);
|
dev_maps = xmap_dereference(dev->xps_cpus_map);
|
||||||
@ -2189,6 +2197,10 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
|
|||||||
false);
|
false);
|
||||||
|
|
||||||
out_no_maps:
|
out_no_maps:
|
||||||
|
if (static_key_enabled(&xps_rxqs_needed))
|
||||||
|
static_key_slow_dec(&xps_rxqs_needed);
|
||||||
|
|
||||||
|
static_key_slow_dec(&xps_needed);
|
||||||
mutex_unlock(&xps_map_mutex);
|
mutex_unlock(&xps_map_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2297,6 +2309,10 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
|
|||||||
if (!new_dev_maps)
|
if (!new_dev_maps)
|
||||||
goto out_no_new_maps;
|
goto out_no_new_maps;
|
||||||
|
|
||||||
|
static_key_slow_inc(&xps_needed);
|
||||||
|
if (is_rxqs_map)
|
||||||
|
static_key_slow_inc(&xps_rxqs_needed);
|
||||||
|
|
||||||
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
|
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
|
||||||
j < nr_ids;) {
|
j < nr_ids;) {
|
||||||
/* copy maps belonging to foreign traffic classes */
|
/* copy maps belonging to foreign traffic classes */
|
||||||
@ -3450,6 +3466,9 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|||||||
struct xps_map *map;
|
struct xps_map *map;
|
||||||
int queue_index = -1;
|
int queue_index = -1;
|
||||||
|
|
||||||
|
if (!static_key_false(&xps_needed))
|
||||||
|
return -1;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
dev_maps = rcu_dereference(dev->xps_cpus_map);
|
dev_maps = rcu_dereference(dev->xps_cpus_map);
|
||||||
if (dev_maps) {
|
if (dev_maps) {
|
||||||
|
Loading…
Reference in New Issue
Block a user