forked from Minki/linux
4ba9920e5e
Pull networking updates from David Miller: 1) BPF debugger and asm tool by Daniel Borkmann. 2) Speed up create/bind in AF_PACKET, also from Daniel Borkmann. 3) Correct reciprocal_divide and update users, from Hannes Frederic Sowa and Daniel Borkmann. 4) Currently we only have a "set" operation for the hw timestamp socket ioctl, add a "get" operation to match. From Ben Hutchings. 5) Add better trace events for debugging driver datapath problems, also from Ben Hutchings. 6) Implement auto corking in TCP, from Eric Dumazet. Basically, if we have a small send and a previous packet is already in the qdisc or device queue, defer until TX completion or we get more data. 7) Allow userspace to manage ipv6 temporary addresses, from Jiri Pirko. 8) Add a qdisc bypass option for AF_PACKET sockets, from Daniel Borkmann. 9) Share IP header compression code between Bluetooth and IEEE802154 layers, from Jukka Rissanen. 10) Fix ipv6 router reachability probing, from Jiri Benc. 11) Allow packets to be captured on macvtap devices, from Vlad Yasevich. 12) Support tunneling in GRO layer, from Jerry Chu. 13) Allow bonding to be configured fully using netlink, from Scott Feldman. 14) Allow AF_PACKET users to obtain the VLAN TPID, just like they can already get the TCI. From Atzm Watanabe. 15) New "Heavy Hitter" qdisc, from Terry Lam. 16) Significantly improve the IPSEC support in pktgen, from Fan Du. 17) Allow ipv4 tunnels to cache routes, just like sockets. From Tom Herbert. 18) Add Proportional Integral Enhanced packet scheduler, from Vijay Subramanian. 19) Allow openvswitch to mmap'd netlink, from Thomas Graf. 20) Key TCP metrics blobs also by source address, not just destination address. From Christoph Paasch. 21) Support 10G in generic phylib. From Andy Fleming. 22) Try to short-circuit GRO flow compares using device provided RX hash, if provided. From Tom Herbert. The wireless and netfilter folks have been busy little bees too. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2064 commits) net/cxgb4: Fix referencing freed adapter ipv6: reallocate addrconf router for ipv6 address when lo device up fib_frontend: fix possible NULL pointer dereference rtnetlink: remove IFLA_BOND_SLAVE definition rtnetlink: remove check for fill_slave_info in rtnl_have_link_slave_info qlcnic: update version to 5.3.55 qlcnic: Enhance logic to calculate msix vectors. qlcnic: Refactor interrupt coalescing code for all adapters. qlcnic: Update poll controller code path qlcnic: Interrupt code cleanup qlcnic: Enhance Tx timeout debugging. qlcnic: Use bool for rx_mac_learn. bonding: fix u64 division rtnetlink: add missing IFLA_BOND_AD_INFO_UNSPEC sfc: Use the correct maximum TX DMA ring size for SFC9100 Add Shradha Shah as the sfc driver maintainer. net/vxlan: Share RX skb de-marking and checksum checks with ovs tulip: cleanup by using ARRAY_SIZE() ip_tunnel: clear IPCB in ip_tunnel_xmit() in case dst_link_failure() is called net/cxgb4: Don't retrieve stats during recovery ...
320 lines
7.0 KiB
C
320 lines
7.0 KiB
C
/*
|
|
* net/core/netprio_cgroup.c Priority Control Group
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* Authors: Neil Horman <nhorman@tuxdriver.com>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/types.h>
|
|
#include <linux/string.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/cgroup.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/atomic.h>
|
|
#include <net/rtnetlink.h>
|
|
#include <net/pkt_cls.h>
|
|
#include <net/sock.h>
|
|
#include <net/netprio_cgroup.h>
|
|
|
|
#include <linux/fdtable.h>
|
|
|
|
#define PRIOMAP_MIN_SZ 128
|
|
|
|
/*
|
|
* Extend @dev->priomap so that it's large enough to accommodate
|
|
* @target_idx. @dev->priomap.priomap_len > @target_idx after successful
|
|
* return. Must be called under rtnl lock.
|
|
*/
|
|
static int extend_netdev_table(struct net_device *dev, u32 target_idx)
|
|
{
|
|
struct netprio_map *old, *new;
|
|
size_t new_sz, new_len;
|
|
|
|
/* is the existing priomap large enough? */
|
|
old = rtnl_dereference(dev->priomap);
|
|
if (old && old->priomap_len > target_idx)
|
|
return 0;
|
|
|
|
/*
|
|
* Determine the new size. Let's keep it power-of-two. We start
|
|
* from PRIOMAP_MIN_SZ and double it until it's large enough to
|
|
* accommodate @target_idx.
|
|
*/
|
|
new_sz = PRIOMAP_MIN_SZ;
|
|
while (true) {
|
|
new_len = (new_sz - offsetof(struct netprio_map, priomap)) /
|
|
sizeof(new->priomap[0]);
|
|
if (new_len > target_idx)
|
|
break;
|
|
new_sz *= 2;
|
|
/* overflowed? */
|
|
if (WARN_ON(new_sz < PRIOMAP_MIN_SZ))
|
|
return -ENOSPC;
|
|
}
|
|
|
|
/* allocate & copy */
|
|
new = kzalloc(new_sz, GFP_KERNEL);
|
|
if (!new)
|
|
return -ENOMEM;
|
|
|
|
if (old)
|
|
memcpy(new->priomap, old->priomap,
|
|
old->priomap_len * sizeof(old->priomap[0]));
|
|
|
|
new->priomap_len = new_len;
|
|
|
|
/* install the new priomap */
|
|
rcu_assign_pointer(dev->priomap, new);
|
|
if (old)
|
|
kfree_rcu(old, rcu);
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* netprio_prio - return the effective netprio of a cgroup-net_device pair
|
|
* @css: css part of the target pair
|
|
* @dev: net_device part of the target pair
|
|
*
|
|
* Should be called under RCU read or rtnl lock.
|
|
*/
|
|
static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev)
|
|
{
|
|
struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
|
|
int id = css->cgroup->id;
|
|
|
|
if (map && id < map->priomap_len)
|
|
return map->priomap[id];
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* netprio_set_prio - set netprio on a cgroup-net_device pair
|
|
* @css: css part of the target pair
|
|
* @dev: net_device part of the target pair
|
|
* @prio: prio to set
|
|
*
|
|
* Set netprio to @prio on @css-@dev pair. Should be called under rtnl
|
|
* lock and may fail under memory pressure for non-zero @prio.
|
|
*/
|
|
static int netprio_set_prio(struct cgroup_subsys_state *css,
|
|
struct net_device *dev, u32 prio)
|
|
{
|
|
struct netprio_map *map;
|
|
int id = css->cgroup->id;
|
|
int ret;
|
|
|
|
/* avoid extending priomap for zero writes */
|
|
map = rtnl_dereference(dev->priomap);
|
|
if (!prio && (!map || map->priomap_len <= id))
|
|
return 0;
|
|
|
|
ret = extend_netdev_table(dev, id);
|
|
if (ret)
|
|
return ret;
|
|
|
|
map = rtnl_dereference(dev->priomap);
|
|
map->priomap[id] = prio;
|
|
return 0;
|
|
}
|
|
|
|
static struct cgroup_subsys_state *
|
|
cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
|
|
{
|
|
struct cgroup_subsys_state *css;
|
|
|
|
css = kzalloc(sizeof(*css), GFP_KERNEL);
|
|
if (!css)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
return css;
|
|
}
|
|
|
|
static int cgrp_css_online(struct cgroup_subsys_state *css)
|
|
{
|
|
struct cgroup_subsys_state *parent_css = css_parent(css);
|
|
struct net_device *dev;
|
|
int ret = 0;
|
|
|
|
if (!parent_css)
|
|
return 0;
|
|
|
|
rtnl_lock();
|
|
/*
|
|
* Inherit prios from the parent. As all prios are set during
|
|
* onlining, there is no need to clear them on offline.
|
|
*/
|
|
for_each_netdev(&init_net, dev) {
|
|
u32 prio = netprio_prio(parent_css, dev);
|
|
|
|
ret = netprio_set_prio(css, dev, prio);
|
|
if (ret)
|
|
break;
|
|
}
|
|
rtnl_unlock();
|
|
return ret;
|
|
}
|
|
|
|
static void cgrp_css_free(struct cgroup_subsys_state *css)
|
|
{
|
|
kfree(css);
|
|
}
|
|
|
|
static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft)
|
|
{
|
|
return css->cgroup->id;
|
|
}
|
|
|
|
static int read_priomap(struct seq_file *sf, void *v)
|
|
{
|
|
struct net_device *dev;
|
|
|
|
rcu_read_lock();
|
|
for_each_netdev_rcu(&init_net, dev)
|
|
seq_printf(sf, "%s %u\n", dev->name,
|
|
netprio_prio(seq_css(sf), dev));
|
|
rcu_read_unlock();
|
|
return 0;
|
|
}
|
|
|
|
static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
|
|
const char *buffer)
|
|
{
|
|
char devname[IFNAMSIZ + 1];
|
|
struct net_device *dev;
|
|
u32 prio;
|
|
int ret;
|
|
|
|
if (sscanf(buffer, "%"__stringify(IFNAMSIZ)"s %u", devname, &prio) != 2)
|
|
return -EINVAL;
|
|
|
|
dev = dev_get_by_name(&init_net, devname);
|
|
if (!dev)
|
|
return -ENODEV;
|
|
|
|
rtnl_lock();
|
|
|
|
ret = netprio_set_prio(css, dev, prio);
|
|
|
|
rtnl_unlock();
|
|
dev_put(dev);
|
|
return ret;
|
|
}
|
|
|
|
static int update_netprio(const void *v, struct file *file, unsigned n)
|
|
{
|
|
int err;
|
|
struct socket *sock = sock_from_file(file, &err);
|
|
if (sock)
|
|
sock->sk->sk_cgrp_prioidx = (u32)(unsigned long)v;
|
|
return 0;
|
|
}
|
|
|
|
static void net_prio_attach(struct cgroup_subsys_state *css,
|
|
struct cgroup_taskset *tset)
|
|
{
|
|
struct task_struct *p;
|
|
void *v = (void *)(unsigned long)css->cgroup->id;
|
|
|
|
cgroup_taskset_for_each(p, css, tset) {
|
|
task_lock(p);
|
|
iterate_fd(p->files, 0, update_netprio, v);
|
|
task_unlock(p);
|
|
}
|
|
}
|
|
|
|
static struct cftype ss_files[] = {
|
|
{
|
|
.name = "prioidx",
|
|
.read_u64 = read_prioidx,
|
|
},
|
|
{
|
|
.name = "ifpriomap",
|
|
.seq_show = read_priomap,
|
|
.write_string = write_priomap,
|
|
},
|
|
{ } /* terminate */
|
|
};
|
|
|
|
struct cgroup_subsys net_prio_subsys = {
|
|
.name = "net_prio",
|
|
.css_alloc = cgrp_css_alloc,
|
|
.css_online = cgrp_css_online,
|
|
.css_free = cgrp_css_free,
|
|
.attach = net_prio_attach,
|
|
.subsys_id = net_prio_subsys_id,
|
|
.base_cftypes = ss_files,
|
|
.module = THIS_MODULE,
|
|
};
|
|
|
|
static int netprio_device_event(struct notifier_block *unused,
|
|
unsigned long event, void *ptr)
|
|
{
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
struct netprio_map *old;
|
|
|
|
/*
|
|
* Note this is called with rtnl_lock held so we have update side
|
|
* protection on our rcu assignments
|
|
*/
|
|
|
|
switch (event) {
|
|
case NETDEV_UNREGISTER:
|
|
old = rtnl_dereference(dev->priomap);
|
|
RCU_INIT_POINTER(dev->priomap, NULL);
|
|
if (old)
|
|
kfree_rcu(old, rcu);
|
|
break;
|
|
}
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static struct notifier_block netprio_device_notifier = {
|
|
.notifier_call = netprio_device_event
|
|
};
|
|
|
|
static int __init init_cgroup_netprio(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = cgroup_load_subsys(&net_prio_subsys);
|
|
if (ret)
|
|
goto out;
|
|
|
|
register_netdevice_notifier(&netprio_device_notifier);
|
|
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static void __exit exit_cgroup_netprio(void)
|
|
{
|
|
struct netprio_map *old;
|
|
struct net_device *dev;
|
|
|
|
unregister_netdevice_notifier(&netprio_device_notifier);
|
|
|
|
cgroup_unload_subsys(&net_prio_subsys);
|
|
|
|
rtnl_lock();
|
|
for_each_netdev(&init_net, dev) {
|
|
old = rtnl_dereference(dev->priomap);
|
|
RCU_INIT_POINTER(dev->priomap, NULL);
|
|
if (old)
|
|
kfree_rcu(old, rcu);
|
|
}
|
|
rtnl_unlock();
|
|
}
|
|
|
|
module_init(init_cgroup_netprio);
|
|
module_exit(exit_cgroup_netprio);
|
|
MODULE_LICENSE("GPL v2");
|