net: Refactor removal of queues from XPS map and apply on num_tc changes

This patch updates the code for removing queues from the XPS map and makes
it so that we can apply the code any time we change either the number of
traffic classes or the mapping of a given block of queues.  This way we
avoid having queues pulling traffic from a foreign traffic class.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Alexander Duyck 2016-10-28 11:46:49 -04:00 committed by David S. Miller
parent 8d059b0f6f
commit 6234f87407

View File

@ -1970,32 +1970,50 @@ static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P) \
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
int cpu, u16 index)
static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
int tci, u16 index)
{
struct xps_map *map = NULL;
int pos;
if (dev_maps)
map = xmap_dereference(dev_maps->cpu_map[cpu]);
map = xmap_dereference(dev_maps->cpu_map[tci]);
if (!map)
return false;
for (pos = 0; map && pos < map->len; pos++) {
if (map->queues[pos] == index) {
if (map->len > 1) {
map->queues[pos] = map->queues[--map->len];
} else {
RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
kfree_rcu(map, rcu);
map = NULL;
}
for (pos = map->len; pos--;) {
if (map->queues[pos] != index)
continue;
if (map->len > 1) {
map->queues[pos] = map->queues[--map->len];
break;
}
RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
kfree_rcu(map, rcu);
return false;
}
return map;
return true;
}
static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
static bool remove_xps_queue_cpu(struct net_device *dev,
struct xps_dev_maps *dev_maps,
int cpu, u16 offset, u16 count)
{
int i, j;
for (i = count, j = offset; i--; j++) {
if (!remove_xps_queue(dev_maps, cpu, j))
break;
}
return i < 0;
}
static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
u16 count)
{
struct xps_dev_maps *dev_maps;
int cpu, i;
@ -2007,21 +2025,16 @@ static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
if (!dev_maps)
goto out_no_maps;
for_each_possible_cpu(cpu) {
for (i = index; i < dev->num_tx_queues; i++) {
if (!remove_xps_queue(dev_maps, cpu, i))
break;
}
if (i == dev->num_tx_queues)
active = true;
}
for_each_possible_cpu(cpu)
active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
offset, count);
if (!active) {
RCU_INIT_POINTER(dev->xps_maps, NULL);
kfree_rcu(dev_maps, rcu);
}
for (i = index; i < dev->num_tx_queues; i++)
for (i = offset + (count - 1); count--; i--)
netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
NUMA_NO_NODE);
@ -2029,6 +2042,11 @@ out_no_maps:
mutex_unlock(&xps_map_mutex);
}
static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
{
netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
}
static struct xps_map *expand_xps_map(struct xps_map *map,
int cpu, u16 index)
{
@ -2192,6 +2210,9 @@ EXPORT_SYMBOL(netif_set_xps_queue);
#endif
void netdev_reset_tc(struct net_device *dev)
{
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, 0);
#endif
dev->num_tc = 0;
memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
@ -2203,6 +2224,9 @@ int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
if (tc >= dev->num_tc)
return -EINVAL;
#ifdef CONFIG_XPS
netif_reset_xps_queues(dev, offset, count);
#endif
dev->tc_to_txq[tc].count = count;
dev->tc_to_txq[tc].offset = offset;
return 0;
@ -2214,6 +2238,9 @@ int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
if (num_tc > TC_MAX_QUEUE)
return -EINVAL;
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, 0);
#endif
dev->num_tc = num_tc;
return 0;
}