Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply fixes on top of newer things in tree (efi-stub). Signed-off-by: Jiri Kosina <jkosina@suse.cz>
This commit is contained in:
@@ -577,7 +577,7 @@ EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
|
||||
/**
|
||||
* zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
|
||||
* @skb: buffer to copy
|
||||
* @from: io vector to copy to
|
||||
* @from: io vector to copy from
|
||||
* @offset: offset in the io vector to start copying from
|
||||
* @count: amount of vectors to copy to buffer from
|
||||
*
|
||||
|
||||
622
net/core/dev.c
622
net/core/dev.c
File diff suppressed because it is too large
Load Diff
@@ -752,7 +752,7 @@ int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
|
||||
EXPORT_SYMBOL(dev_mc_del_global);
|
||||
|
||||
/**
|
||||
* dev_mc_sync - Synchronize device's unicast list to another device
|
||||
* dev_mc_sync - Synchronize device's multicast list to another device
|
||||
* @to: destination device
|
||||
* @from: source device
|
||||
*
|
||||
@@ -780,7 +780,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
|
||||
EXPORT_SYMBOL(dev_mc_sync);
|
||||
|
||||
/**
|
||||
* dev_mc_sync_multiple - Synchronize device's unicast list to another
|
||||
* dev_mc_sync_multiple - Synchronize device's multicast list to another
|
||||
* device, but allow for multiple calls to sync to multiple devices.
|
||||
* @to: destination device
|
||||
* @from: source device
|
||||
|
||||
@@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
|
||||
.hdrsize = 0,
|
||||
.name = "NET_DM",
|
||||
.version = 2,
|
||||
.maxattr = NET_DM_CMD_MAX,
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
|
||||
@@ -106,6 +105,10 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct genl_multicast_group dropmon_mcgrps[] = {
|
||||
{ .name = "events", },
|
||||
};
|
||||
|
||||
static void send_dm_alert(struct work_struct *work)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@@ -116,7 +119,8 @@ static void send_dm_alert(struct work_struct *work)
|
||||
skb = reset_per_cpu_data(data);
|
||||
|
||||
if (skb)
|
||||
genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
|
||||
genlmsg_multicast(&net_drop_monitor_family, skb, 0,
|
||||
0, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -333,7 +337,7 @@ out:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct genl_ops dropmon_ops[] = {
|
||||
static const struct genl_ops dropmon_ops[] = {
|
||||
{
|
||||
.cmd = NET_DM_CMD_CONFIG,
|
||||
.doit = net_dm_cmd_config,
|
||||
@@ -364,13 +368,13 @@ static int __init init_net_drop_monitor(void)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
rc = genl_register_family_with_ops(&net_drop_monitor_family,
|
||||
dropmon_ops,
|
||||
ARRAY_SIZE(dropmon_ops));
|
||||
rc = genl_register_family_with_ops_groups(&net_drop_monitor_family,
|
||||
dropmon_ops, dropmon_mcgrps);
|
||||
if (rc) {
|
||||
pr_err("Could not create drop monitor netlink family\n");
|
||||
return rc;
|
||||
}
|
||||
WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
|
||||
|
||||
rc = register_netdevice_notifier(&dropmon_net_notifier);
|
||||
if (rc < 0) {
|
||||
|
||||
@@ -81,6 +81,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
|
||||
[NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
|
||||
[NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
|
||||
[NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
|
||||
[NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation",
|
||||
[NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation",
|
||||
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
|
||||
[NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation",
|
||||
|
||||
@@ -94,6 +96,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
|
||||
[NETIF_F_LOOPBACK_BIT] = "loopback",
|
||||
[NETIF_F_RXFCS_BIT] = "rx-fcs",
|
||||
[NETIF_F_RXALL_BIT] = "rx-all",
|
||||
[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
|
||||
};
|
||||
|
||||
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
|
||||
|
||||
@@ -460,7 +460,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
|
||||
if (frh->action && (frh->action != rule->action))
|
||||
continue;
|
||||
|
||||
if (frh->table && (frh_get_table(frh, tb) != rule->table))
|
||||
if (frh_get_table(frh, tb) &&
|
||||
(frh_get_table(frh, tb) != rule->table))
|
||||
continue;
|
||||
|
||||
if (tb[FRA_PRIORITY] &&
|
||||
|
||||
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
|
||||
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
|
||||
|
||||
bpf_jit_free(fp);
|
||||
kfree(fp);
|
||||
}
|
||||
EXPORT_SYMBOL(sk_filter_release_rcu);
|
||||
|
||||
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
|
||||
if (fprog->filter == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
|
||||
fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
|
||||
if (!fp)
|
||||
return -ENOMEM;
|
||||
memcpy(fp->insns, fprog->filter, fsize);
|
||||
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
{
|
||||
struct sk_filter *fp, *old_fp;
|
||||
unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
|
||||
unsigned int sk_fsize = sk_filter_size(fprog->len);
|
||||
int err;
|
||||
|
||||
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
||||
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
if (fprog->filter == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
|
||||
fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
|
||||
if (!fp)
|
||||
return -ENOMEM;
|
||||
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
|
||||
sock_kfree_s(sk, fp, fsize+sizeof(*fp));
|
||||
sock_kfree_s(sk, fp, sk_fsize);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
||||
@@ -25,9 +25,35 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i
|
||||
memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_flow_get_ports - extract the upper layer ports and return them
|
||||
* @skb: buffer to extract the ports from
|
||||
* @thoff: transport header offset
|
||||
* @ip_proto: protocol for which to get port offset
|
||||
*
|
||||
* The function will try to retrieve the ports at offset thoff + poff where poff
|
||||
* is the protocol port offset returned from proto_ports_offset
|
||||
*/
|
||||
__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
|
||||
{
|
||||
int poff = proto_ports_offset(ip_proto);
|
||||
|
||||
if (poff >= 0) {
|
||||
__be32 *ports, _ports;
|
||||
|
||||
ports = skb_header_pointer(skb, thoff + poff,
|
||||
sizeof(_ports), &_ports);
|
||||
if (ports)
|
||||
return *ports;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_flow_get_ports);
|
||||
|
||||
bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
|
||||
{
|
||||
int poff, nhoff = skb_network_offset(skb);
|
||||
int nhoff = skb_network_offset(skb);
|
||||
u8 ip_proto;
|
||||
__be16 proto = skb->protocol;
|
||||
|
||||
@@ -40,15 +66,15 @@ again:
|
||||
struct iphdr _iph;
|
||||
ip:
|
||||
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
|
||||
if (!iph)
|
||||
if (!iph || iph->ihl < 5)
|
||||
return false;
|
||||
nhoff += iph->ihl * 4;
|
||||
|
||||
ip_proto = iph->protocol;
|
||||
if (ip_is_fragment(iph))
|
||||
ip_proto = 0;
|
||||
else
|
||||
ip_proto = iph->protocol;
|
||||
|
||||
iph_to_flow_copy_addrs(flow, iph);
|
||||
nhoff += iph->ihl * 4;
|
||||
break;
|
||||
}
|
||||
case __constant_htons(ETH_P_IPV6): {
|
||||
@@ -150,16 +176,7 @@ ipv6:
|
||||
}
|
||||
|
||||
flow->ip_proto = ip_proto;
|
||||
poff = proto_ports_offset(ip_proto);
|
||||
if (poff >= 0) {
|
||||
__be32 *ports, _ports;
|
||||
|
||||
nhoff += poff;
|
||||
ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
|
||||
if (ports)
|
||||
flow->ports = *ports;
|
||||
}
|
||||
|
||||
flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
|
||||
flow->thoff = (u16) nhoff;
|
||||
|
||||
return true;
|
||||
@@ -167,6 +184,22 @@ ipv6:
|
||||
EXPORT_SYMBOL(skb_flow_dissect);
|
||||
|
||||
static u32 hashrnd __read_mostly;
|
||||
static __always_inline void __flow_hash_secret_init(void)
|
||||
{
|
||||
net_get_random_once(&hashrnd, sizeof(hashrnd));
|
||||
}
|
||||
|
||||
static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
|
||||
{
|
||||
__flow_hash_secret_init();
|
||||
return jhash_3words(a, b, c, hashrnd);
|
||||
}
|
||||
|
||||
static __always_inline u32 __flow_hash_1word(u32 a)
|
||||
{
|
||||
__flow_hash_secret_init();
|
||||
return jhash_1word(a, hashrnd);
|
||||
}
|
||||
|
||||
/*
|
||||
* __skb_get_rxhash: calculate a flow hash based on src/dst addresses
|
||||
@@ -193,9 +226,9 @@ void __skb_get_rxhash(struct sk_buff *skb)
|
||||
swap(keys.port16[0], keys.port16[1]);
|
||||
}
|
||||
|
||||
hash = jhash_3words((__force u32)keys.dst,
|
||||
(__force u32)keys.src,
|
||||
(__force u32)keys.ports, hashrnd);
|
||||
hash = __flow_hash_3words((__force u32)keys.dst,
|
||||
(__force u32)keys.src,
|
||||
(__force u32)keys.ports);
|
||||
if (!hash)
|
||||
hash = 1;
|
||||
|
||||
@@ -231,7 +264,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
|
||||
hash = skb->sk->sk_hash;
|
||||
else
|
||||
hash = (__force u16) skb->protocol;
|
||||
hash = jhash_1word(hash, hashrnd);
|
||||
hash = __flow_hash_1word(hash);
|
||||
|
||||
return (u16) (((u64) hash * qcount) >> 32) + qoffset;
|
||||
}
|
||||
@@ -323,7 +356,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
else
|
||||
hash = (__force u16) skb->protocol ^
|
||||
skb->rxhash;
|
||||
hash = jhash_1word(hash, hashrnd);
|
||||
hash = __flow_hash_1word(hash);
|
||||
queue_index = map->queues[
|
||||
((u64)hash * map->len) >> 32];
|
||||
}
|
||||
@@ -352,7 +385,7 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
||||
|
||||
if (queue_index != new_index && sk &&
|
||||
rcu_access_pointer(sk->sk_dst_cache))
|
||||
sk_tx_queue_set(sk, queue_index);
|
||||
sk_tx_queue_set(sk, new_index);
|
||||
|
||||
queue_index = new_index;
|
||||
}
|
||||
@@ -378,11 +411,3 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
skb_set_queue_mapping(skb, queue_index);
|
||||
return netdev_get_tx_queue(dev, queue_index);
|
||||
}
|
||||
|
||||
static int __init initialize_hashrnd(void)
|
||||
{
|
||||
get_random_bytes(&hashrnd, sizeof(hashrnd));
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall_sync(initialize_hashrnd);
|
||||
|
||||
@@ -48,7 +48,8 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
m->msg_name = address;
|
||||
if (m->msg_name)
|
||||
m->msg_name = address;
|
||||
} else {
|
||||
m->msg_name = NULL;
|
||||
}
|
||||
@@ -100,7 +101,7 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
|
||||
EXPORT_SYMBOL(memcpy_toiovecend);
|
||||
|
||||
/*
|
||||
* Copy iovec from kernel. Returns -EFAULT on error.
|
||||
* Copy iovec to kernel. Returns -EFAULT on error.
|
||||
*/
|
||||
|
||||
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
|
||||
|
||||
@@ -867,7 +867,7 @@ static void neigh_invalidate(struct neighbour *neigh)
|
||||
static void neigh_probe(struct neighbour *neigh)
|
||||
__releases(neigh->lock)
|
||||
{
|
||||
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
|
||||
struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
|
||||
/* keep skb alive even if arp_queue overflows */
|
||||
if (skb)
|
||||
skb = skb_copy(skb, GFP_ATOMIC);
|
||||
|
||||
@@ -1196,6 +1196,13 @@ static void remove_queue_kobjects(struct net_device *net)
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool net_current_may_mount(void)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
|
||||
return ns_capable(net->user_ns, CAP_SYS_ADMIN);
|
||||
}
|
||||
|
||||
static void *net_grab_current_ns(void)
|
||||
{
|
||||
struct net *ns = current->nsproxy->net_ns;
|
||||
@@ -1218,6 +1225,7 @@ static const void *net_netlink_ns(struct sock *sk)
|
||||
|
||||
struct kobj_ns_type_operations net_ns_type_operations = {
|
||||
.type = KOBJ_NS_TYPE_NET,
|
||||
.current_may_mount = net_current_may_mount,
|
||||
.grab_current_ns = net_grab_current_ns,
|
||||
.netlink_ns = net_netlink_ns,
|
||||
.initial_ns = net_initial_ns,
|
||||
@@ -1255,7 +1263,7 @@ static void netdev_release(struct device *d)
|
||||
BUG_ON(dev->reg_state != NETREG_RELEASED);
|
||||
|
||||
kfree(dev->ifalias);
|
||||
kfree((char *)dev - dev->padded);
|
||||
netdev_freemem(dev);
|
||||
}
|
||||
|
||||
static const void *net_namespace(struct device *d)
|
||||
@@ -1336,17 +1344,19 @@ int netdev_register_kobject(struct net_device *net)
|
||||
return error;
|
||||
}
|
||||
|
||||
int netdev_class_create_file(struct class_attribute *class_attr)
|
||||
int netdev_class_create_file_ns(struct class_attribute *class_attr,
|
||||
const void *ns)
|
||||
{
|
||||
return class_create_file(&net_class, class_attr);
|
||||
return class_create_file_ns(&net_class, class_attr, ns);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_class_create_file);
|
||||
EXPORT_SYMBOL(netdev_class_create_file_ns);
|
||||
|
||||
void netdev_class_remove_file(struct class_attribute *class_attr)
|
||||
void netdev_class_remove_file_ns(struct class_attribute *class_attr,
|
||||
const void *ns)
|
||||
{
|
||||
class_remove_file(&net_class, class_attr);
|
||||
class_remove_file_ns(&net_class, class_attr, ns);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_class_remove_file);
|
||||
EXPORT_SYMBOL(netdev_class_remove_file_ns);
|
||||
|
||||
int netdev_kobject_init(void)
|
||||
{
|
||||
|
||||
@@ -651,7 +651,7 @@ static int netns_install(struct nsproxy *nsproxy, void *ns)
|
||||
struct net *net = ns;
|
||||
|
||||
if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
|
||||
!nsown_capable(CAP_SYS_ADMIN))
|
||||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
put_net(nsproxy->net_ns);
|
||||
|
||||
@@ -550,7 +550,7 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
|
||||
return;
|
||||
|
||||
proto = ntohs(eth_hdr(skb)->h_proto);
|
||||
if (proto == ETH_P_IP) {
|
||||
if (proto == ETH_P_ARP) {
|
||||
struct arphdr *arp;
|
||||
unsigned char *arp_ptr;
|
||||
/* No arp on this interface */
|
||||
@@ -636,8 +636,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
|
||||
|
||||
netpoll_send_skb(np, send_skb);
|
||||
|
||||
/* If there are several rx_hooks for the same address,
|
||||
we're fine by sending a single reply */
|
||||
/* If there are several rx_skb_hooks for the same
|
||||
* address we're fine by sending a single reply
|
||||
*/
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
@@ -719,8 +720,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
|
||||
|
||||
netpoll_send_skb(np, send_skb);
|
||||
|
||||
/* If there are several rx_hooks for the same address,
|
||||
we're fine by sending a single reply */
|
||||
/* If there are several rx_skb_hooks for the same
|
||||
* address, we're fine by sending a single reply
|
||||
*/
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
@@ -756,11 +758,12 @@ static bool pkt_is_ns(struct sk_buff *skb)
|
||||
|
||||
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
|
||||
{
|
||||
int proto, len, ulen;
|
||||
int hits = 0;
|
||||
int proto, len, ulen, data_len;
|
||||
int hits = 0, offset;
|
||||
const struct iphdr *iph;
|
||||
struct udphdr *uh;
|
||||
struct netpoll *np, *tmp;
|
||||
uint16_t source;
|
||||
|
||||
if (list_empty(&npinfo->rx_np))
|
||||
goto out;
|
||||
@@ -820,7 +823,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
|
||||
|
||||
len -= iph->ihl*4;
|
||||
uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
|
||||
offset = (unsigned char *)(uh + 1) - skb->data;
|
||||
ulen = ntohs(uh->len);
|
||||
data_len = skb->len - offset;
|
||||
source = ntohs(uh->source);
|
||||
|
||||
if (ulen != len)
|
||||
goto out;
|
||||
@@ -834,9 +840,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
|
||||
if (np->local_port && np->local_port != ntohs(uh->dest))
|
||||
continue;
|
||||
|
||||
np->rx_hook(np, ntohs(uh->source),
|
||||
(char *)(uh+1),
|
||||
ulen - sizeof(struct udphdr));
|
||||
np->rx_skb_hook(np, source, skb, offset, data_len);
|
||||
hits++;
|
||||
}
|
||||
} else {
|
||||
@@ -859,7 +863,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
|
||||
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
|
||||
goto out;
|
||||
uh = udp_hdr(skb);
|
||||
offset = (unsigned char *)(uh + 1) - skb->data;
|
||||
ulen = ntohs(uh->len);
|
||||
data_len = skb->len - offset;
|
||||
source = ntohs(uh->source);
|
||||
if (ulen != skb->len)
|
||||
goto out;
|
||||
if (udp6_csum_init(skb, uh, IPPROTO_UDP))
|
||||
@@ -872,9 +879,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
|
||||
if (np->local_port && np->local_port != ntohs(uh->dest))
|
||||
continue;
|
||||
|
||||
np->rx_hook(np, ntohs(uh->source),
|
||||
(char *)(uh+1),
|
||||
ulen - sizeof(struct udphdr));
|
||||
np->rx_skb_hook(np, source, skb, offset, data_len);
|
||||
hits++;
|
||||
}
|
||||
#endif
|
||||
@@ -1062,7 +1067,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
|
||||
|
||||
npinfo->netpoll = np;
|
||||
|
||||
if (np->rx_hook) {
|
||||
if (np->rx_skb_hook) {
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
|
||||
list_add_tail(&np->rx, &npinfo->rx_np);
|
||||
@@ -1284,15 +1289,14 @@ EXPORT_SYMBOL_GPL(__netpoll_free_async);
|
||||
|
||||
void netpoll_cleanup(struct netpoll *np)
|
||||
{
|
||||
if (!np->dev)
|
||||
return;
|
||||
|
||||
rtnl_lock();
|
||||
if (!np->dev)
|
||||
goto out;
|
||||
__netpoll_cleanup(np);
|
||||
rtnl_unlock();
|
||||
|
||||
dev_put(np->dev);
|
||||
np->dev = NULL;
|
||||
out:
|
||||
rtnl_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(netpoll_cleanup);
|
||||
|
||||
|
||||
@@ -222,11 +222,10 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *p;
|
||||
void *v;
|
||||
void *v = (void *)(unsigned long)css->cgroup->id;
|
||||
|
||||
cgroup_taskset_for_each(p, css, tset) {
|
||||
task_lock(p);
|
||||
v = (void *)(unsigned long)task_netprioidx(p);
|
||||
iterate_fd(p->files, 0, update_netprio, v);
|
||||
task_unlock(p);
|
||||
}
|
||||
|
||||
@@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
|
||||
if (x) {
|
||||
int ret;
|
||||
__u8 *eth;
|
||||
struct iphdr *iph;
|
||||
|
||||
nhead = x->props.header_len - skb_headroom(skb);
|
||||
if (nhead > 0) {
|
||||
ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
|
||||
@@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
|
||||
eth = (__u8 *) skb_push(skb, ETH_HLEN);
|
||||
memcpy(eth, pkt_dev->hh, 12);
|
||||
*(u16 *) ð[12] = protocol;
|
||||
|
||||
/* Update IPv4 header len as well as checksum value */
|
||||
iph = ip_hdr(skb);
|
||||
iph->tot_len = htons(skb->len - ETH_HLEN);
|
||||
ip_send_check(iph);
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
|
||||
@@ -1647,9 +1647,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
|
||||
}
|
||||
|
||||
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
|
||||
rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
|
||||
|
||||
__dev_notify_flags(dev, old_flags);
|
||||
__dev_notify_flags(dev, old_flags, ~0U);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rtnl_configure_link);
|
||||
@@ -1985,14 +1984,15 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
|
||||
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
struct sk_buff *skb;
|
||||
int err = -ENOBUFS;
|
||||
size_t if_info_size;
|
||||
|
||||
skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
|
||||
skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
|
||||
if (skb == NULL)
|
||||
goto errout;
|
||||
|
||||
@@ -2003,7 +2003,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
|
||||
kfree_skb(skb);
|
||||
goto errout;
|
||||
}
|
||||
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
|
||||
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
|
||||
return;
|
||||
errout:
|
||||
if (err < 0)
|
||||
@@ -2717,7 +2717,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
|
||||
case NETDEV_JOIN:
|
||||
break;
|
||||
default:
|
||||
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
|
||||
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
|
||||
@@ -56,9 +56,9 @@ static __inline__ int scm_check_creds(struct ucred *creds)
|
||||
if ((creds->pid == task_tgid_vnr(current) ||
|
||||
ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
|
||||
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
|
||||
uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
|
||||
uid_eq(uid, cred->suid)) || ns_capable(cred->user_ns, CAP_SETUID)) &&
|
||||
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
|
||||
gid_eq(gid, cred->sgid)) || nsown_capable(CAP_SETGID))) {
|
||||
gid_eq(gid, cred->sgid)) || ns_capable(cred->user_ns, CAP_SETGID))) {
|
||||
return 0;
|
||||
}
|
||||
return -EPERM;
|
||||
|
||||
@@ -7,15 +7,20 @@
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/net.h>
|
||||
|
||||
#include <net/secure_seq.h>
|
||||
|
||||
static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
|
||||
#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
|
||||
#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
|
||||
|
||||
void net_secret_init(void)
|
||||
static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
|
||||
|
||||
static __always_inline void net_secret_init(void)
|
||||
{
|
||||
get_random_bytes(net_secret, sizeof(net_secret));
|
||||
net_get_random_once(net_secret, sizeof(net_secret));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
static u32 seq_scale(u32 seq)
|
||||
@@ -42,6 +47,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
|
||||
u32 hash[MD5_DIGEST_WORDS];
|
||||
u32 i;
|
||||
|
||||
net_secret_init();
|
||||
memcpy(hash, saddr, 16);
|
||||
for (i = 0; i < 4; i++)
|
||||
secret[i] = net_secret[i] + (__force u32)daddr[i];
|
||||
@@ -63,6 +69,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
||||
u32 hash[MD5_DIGEST_WORDS];
|
||||
u32 i;
|
||||
|
||||
net_secret_init();
|
||||
memcpy(hash, saddr, 16);
|
||||
for (i = 0; i < 4; i++)
|
||||
secret[i] = net_secret[i] + (__force u32) daddr[i];
|
||||
@@ -82,6 +89,7 @@ __u32 secure_ip_id(__be32 daddr)
|
||||
{
|
||||
u32 hash[MD5_DIGEST_WORDS];
|
||||
|
||||
net_secret_init();
|
||||
hash[0] = (__force __u32) daddr;
|
||||
hash[1] = net_secret[13];
|
||||
hash[2] = net_secret[14];
|
||||
@@ -96,6 +104,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4])
|
||||
{
|
||||
__u32 hash[4];
|
||||
|
||||
net_secret_init();
|
||||
memcpy(hash, daddr, 16);
|
||||
md5_transform(hash, net_secret);
|
||||
|
||||
@@ -107,6 +116,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
|
||||
{
|
||||
u32 hash[MD5_DIGEST_WORDS];
|
||||
|
||||
net_secret_init();
|
||||
hash[0] = (__force u32)saddr;
|
||||
hash[1] = (__force u32)daddr;
|
||||
hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
|
||||
@@ -121,6 +131,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
|
||||
{
|
||||
u32 hash[MD5_DIGEST_WORDS];
|
||||
|
||||
net_secret_init();
|
||||
hash[0] = (__force u32)saddr;
|
||||
hash[1] = (__force u32)daddr;
|
||||
hash[2] = (__force u32)dport ^ net_secret[14];
|
||||
@@ -140,6 +151,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
|
||||
u32 hash[MD5_DIGEST_WORDS];
|
||||
u64 seq;
|
||||
|
||||
net_secret_init();
|
||||
hash[0] = (__force u32)saddr;
|
||||
hash[1] = (__force u32)daddr;
|
||||
hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
|
||||
@@ -164,6 +176,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
|
||||
u64 seq;
|
||||
u32 i;
|
||||
|
||||
net_secret_init();
|
||||
memcpy(hash, saddr, 16);
|
||||
for (i = 0; i < 4; i++)
|
||||
secret[i] = net_secret[i] + daddr[i];
|
||||
|
||||
@@ -476,6 +476,18 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
|
||||
}
|
||||
EXPORT_SYMBOL(skb_add_rx_frag);
|
||||
|
||||
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
|
||||
unsigned int truesize)
|
||||
{
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
skb_frag_size_add(frag, size);
|
||||
skb->len += size;
|
||||
skb->data_len += size;
|
||||
skb->truesize += truesize;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_coalesce_rx_frag);
|
||||
|
||||
static void skb_drop_list(struct sk_buff **listp)
|
||||
{
|
||||
kfree_skb_list(*listp);
|
||||
@@ -580,9 +592,6 @@ static void skb_release_head_state(struct sk_buff *skb)
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
nf_conntrack_put(skb->nfct);
|
||||
#endif
|
||||
#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
|
||||
nf_conntrack_put_reasm(skb->nfct_reasm);
|
||||
#endif
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
nf_bridge_put(skb->nf_bridge);
|
||||
#endif
|
||||
@@ -903,6 +912,9 @@ EXPORT_SYMBOL(skb_clone);
|
||||
|
||||
static void skb_headers_offset_update(struct sk_buff *skb, int off)
|
||||
{
|
||||
/* Only adjust this if it actually is csum_start rather than csum */
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
skb->csum_start += off;
|
||||
/* {transport,network,mac}_header and tail are relative to skb->head */
|
||||
skb->transport_header += off;
|
||||
skb->network_header += off;
|
||||
@@ -1036,8 +1048,8 @@ EXPORT_SYMBOL(__pskb_copy);
|
||||
* @ntail: room to add at tail
|
||||
* @gfp_mask: allocation priority
|
||||
*
|
||||
* Expands (or creates identical copy, if &nhead and &ntail are zero)
|
||||
* header of skb. &sk_buff itself is not changed. &sk_buff MUST have
|
||||
* Expands (or creates identical copy, if @nhead and @ntail are zero)
|
||||
* header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
|
||||
* reference count of 1. Returns zero in the case of success or error,
|
||||
* if expansion failed. In the last case, &sk_buff is not changed.
|
||||
*
|
||||
@@ -1109,9 +1121,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
|
||||
#endif
|
||||
skb->tail += off;
|
||||
skb_headers_offset_update(skb, nhead);
|
||||
/* Only adjust this if it actually is csum_start rather than csum */
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
skb->csum_start += nhead;
|
||||
skb->cloned = 0;
|
||||
skb->hdr_len = 0;
|
||||
skb->nohdr = 0;
|
||||
@@ -1176,7 +1185,6 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
|
||||
NUMA_NO_NODE);
|
||||
int oldheadroom = skb_headroom(skb);
|
||||
int head_copy_len, head_copy_off;
|
||||
int off;
|
||||
|
||||
if (!n)
|
||||
return NULL;
|
||||
@@ -1200,11 +1208,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
|
||||
|
||||
copy_skb_header(n, skb);
|
||||
|
||||
off = newheadroom - oldheadroom;
|
||||
if (n->ip_summed == CHECKSUM_PARTIAL)
|
||||
n->csum_start += off;
|
||||
|
||||
skb_headers_offset_update(n, off);
|
||||
skb_headers_offset_update(n, newheadroom - oldheadroom);
|
||||
|
||||
return n;
|
||||
}
|
||||
@@ -1256,6 +1260,29 @@ free_skb:
|
||||
}
|
||||
EXPORT_SYMBOL(skb_pad);
|
||||
|
||||
/**
|
||||
* pskb_put - add data to the tail of a potentially fragmented buffer
|
||||
* @skb: start of the buffer to use
|
||||
* @tail: tail fragment of the buffer to use
|
||||
* @len: amount of data to add
|
||||
*
|
||||
* This function extends the used data area of the potentially
|
||||
* fragmented buffer. @tail must be the last fragment of @skb -- or
|
||||
* @skb itself. If this would exceed the total buffer size the kernel
|
||||
* will panic. A pointer to the first byte of the extra data is
|
||||
* returned.
|
||||
*/
|
||||
|
||||
unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
|
||||
{
|
||||
if (tail != skb) {
|
||||
skb->data_len += len;
|
||||
skb->len += len;
|
||||
}
|
||||
return skb_put(tail, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pskb_put);
|
||||
|
||||
/**
|
||||
* skb_put - add data to a buffer
|
||||
* @skb: buffer to use
|
||||
@@ -1933,9 +1960,8 @@ fault:
|
||||
EXPORT_SYMBOL(skb_store_bits);
|
||||
|
||||
/* Checksum skb data. */
|
||||
|
||||
__wsum skb_checksum(const struct sk_buff *skb, int offset,
|
||||
int len, __wsum csum)
|
||||
__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
|
||||
__wsum csum, const struct skb_checksum_ops *ops)
|
||||
{
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
@@ -1946,7 +1972,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
|
||||
if (copy > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
csum = csum_partial(skb->data + offset, copy, csum);
|
||||
csum = ops->update(skb->data + offset, copy, csum);
|
||||
if ((len -= copy) == 0)
|
||||
return csum;
|
||||
offset += copy;
|
||||
@@ -1967,10 +1993,10 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
vaddr = kmap_atomic(skb_frag_page(frag));
|
||||
csum2 = csum_partial(vaddr + frag->page_offset +
|
||||
offset - start, copy, 0);
|
||||
csum2 = ops->update(vaddr + frag->page_offset +
|
||||
offset - start, copy, 0);
|
||||
kunmap_atomic(vaddr);
|
||||
csum = csum_block_add(csum, csum2, pos);
|
||||
csum = ops->combine(csum, csum2, pos, copy);
|
||||
if (!(len -= copy))
|
||||
return csum;
|
||||
offset += copy;
|
||||
@@ -1989,9 +2015,9 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
|
||||
__wsum csum2;
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
csum2 = skb_checksum(frag_iter, offset - start,
|
||||
copy, 0);
|
||||
csum = csum_block_add(csum, csum2, pos);
|
||||
csum2 = __skb_checksum(frag_iter, offset - start,
|
||||
copy, 0, ops);
|
||||
csum = ops->combine(csum, csum2, pos, copy);
|
||||
if ((len -= copy) == 0)
|
||||
return csum;
|
||||
offset += copy;
|
||||
@@ -2003,6 +2029,18 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
|
||||
|
||||
return csum;
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_checksum);
|
||||
|
||||
__wsum skb_checksum(const struct sk_buff *skb, int offset,
|
||||
int len, __wsum csum)
|
||||
{
|
||||
const struct skb_checksum_ops ops = {
|
||||
.update = csum_partial_ext,
|
||||
.combine = csum_block_add_ext,
|
||||
};
|
||||
|
||||
return __skb_checksum(skb, offset, len, csum, &ops);
|
||||
}
|
||||
EXPORT_SYMBOL(skb_checksum);
|
||||
|
||||
/* Both of above in one bottle. */
|
||||
@@ -2522,14 +2560,14 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
|
||||
* @data: destination pointer for data to be returned
|
||||
* @st: state variable
|
||||
*
|
||||
* Reads a block of skb data at &consumed relative to the
|
||||
* Reads a block of skb data at @consumed relative to the
|
||||
* lower offset specified to skb_prepare_seq_read(). Assigns
|
||||
* the head of the data block to &data and returns the length
|
||||
* the head of the data block to @data and returns the length
|
||||
* of the block or 0 if the end of the skb data or the upper
|
||||
* offset has been reached.
|
||||
*
|
||||
* The caller is not required to consume all of the data
|
||||
* returned, i.e. &consumed is typically set to the number
|
||||
* returned, i.e. @consumed is typically set to the number
|
||||
* of bytes already consumed and the next call to
|
||||
* skb_seq_read() will return the remaining part of the block.
|
||||
*
|
||||
@@ -2758,6 +2796,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
|
||||
struct sk_buff *segs = NULL;
|
||||
struct sk_buff *tail = NULL;
|
||||
struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
|
||||
skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
|
||||
unsigned int mss = skb_shinfo(skb)->gso_size;
|
||||
unsigned int doffset = skb->data - skb_mac_header(skb);
|
||||
unsigned int offset = doffset;
|
||||
@@ -2797,16 +2836,38 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
|
||||
if (hsize > len || !sg)
|
||||
hsize = len;
|
||||
|
||||
if (!hsize && i >= nfrags) {
|
||||
BUG_ON(fskb->len != len);
|
||||
if (!hsize && i >= nfrags && skb_headlen(fskb) &&
|
||||
(skb_headlen(fskb) == len || sg)) {
|
||||
BUG_ON(skb_headlen(fskb) > len);
|
||||
|
||||
i = 0;
|
||||
nfrags = skb_shinfo(fskb)->nr_frags;
|
||||
skb_frag = skb_shinfo(fskb)->frags;
|
||||
pos += skb_headlen(fskb);
|
||||
|
||||
while (pos < offset + len) {
|
||||
BUG_ON(i >= nfrags);
|
||||
|
||||
size = skb_frag_size(skb_frag);
|
||||
if (pos + size > offset + len)
|
||||
break;
|
||||
|
||||
i++;
|
||||
pos += size;
|
||||
skb_frag++;
|
||||
}
|
||||
|
||||
pos += len;
|
||||
nskb = skb_clone(fskb, GFP_ATOMIC);
|
||||
fskb = fskb->next;
|
||||
|
||||
if (unlikely(!nskb))
|
||||
goto err;
|
||||
|
||||
if (unlikely(pskb_trim(nskb, len))) {
|
||||
kfree_skb(nskb);
|
||||
goto err;
|
||||
}
|
||||
|
||||
hsize = skb_end_offset(nskb);
|
||||
if (skb_cow_head(nskb, doffset + headroom)) {
|
||||
kfree_skb(nskb);
|
||||
@@ -2837,20 +2898,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
|
||||
__copy_skb_header(nskb, skb);
|
||||
nskb->mac_len = skb->mac_len;
|
||||
|
||||
/* nskb and skb might have different headroom */
|
||||
if (nskb->ip_summed == CHECKSUM_PARTIAL)
|
||||
nskb->csum_start += skb_headroom(nskb) - headroom;
|
||||
|
||||
skb_reset_mac_header(nskb);
|
||||
skb_set_network_header(nskb, skb->mac_len);
|
||||
nskb->transport_header = (nskb->network_header +
|
||||
skb_network_header_len(skb));
|
||||
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
|
||||
|
||||
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
|
||||
nskb->data - tnl_hlen,
|
||||
doffset + tnl_hlen);
|
||||
|
||||
if (fskb != skb_shinfo(skb)->frag_list)
|
||||
if (nskb->len == len + doffset)
|
||||
goto perform_csum_check;
|
||||
|
||||
if (!sg) {
|
||||
@@ -2868,8 +2922,28 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
|
||||
|
||||
skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
|
||||
|
||||
while (pos < offset + len && i < nfrags) {
|
||||
*frag = skb_shinfo(skb)->frags[i];
|
||||
while (pos < offset + len) {
|
||||
if (i >= nfrags) {
|
||||
BUG_ON(skb_headlen(fskb));
|
||||
|
||||
i = 0;
|
||||
nfrags = skb_shinfo(fskb)->nr_frags;
|
||||
skb_frag = skb_shinfo(fskb)->frags;
|
||||
|
||||
BUG_ON(!nfrags);
|
||||
|
||||
fskb = fskb->next;
|
||||
}
|
||||
|
||||
if (unlikely(skb_shinfo(nskb)->nr_frags >=
|
||||
MAX_SKB_FRAGS)) {
|
||||
net_warn_ratelimited(
|
||||
"skb_segment: too many frags: %u %u\n",
|
||||
pos, mss);
|
||||
goto err;
|
||||
}
|
||||
|
||||
*frag = *skb_frag;
|
||||
__skb_frag_ref(frag);
|
||||
size = skb_frag_size(frag);
|
||||
|
||||
@@ -2882,6 +2956,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
|
||||
|
||||
if (pos + size <= offset + len) {
|
||||
i++;
|
||||
skb_frag++;
|
||||
pos += size;
|
||||
} else {
|
||||
skb_frag_size_sub(frag, pos + size - (offset + len));
|
||||
@@ -2891,25 +2966,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
|
||||
frag++;
|
||||
}
|
||||
|
||||
if (pos < offset + len) {
|
||||
struct sk_buff *fskb2 = fskb;
|
||||
|
||||
BUG_ON(pos + fskb->len != offset + len);
|
||||
|
||||
pos += fskb->len;
|
||||
fskb = fskb->next;
|
||||
|
||||
if (fskb2->next) {
|
||||
fskb2 = skb_clone(fskb2, GFP_ATOMIC);
|
||||
if (!fskb2)
|
||||
goto err;
|
||||
} else
|
||||
skb_get(fskb2);
|
||||
|
||||
SKB_FRAG_ASSERT(nskb);
|
||||
skb_shinfo(nskb)->frag_list = fskb2;
|
||||
}
|
||||
|
||||
skip_fraglist:
|
||||
nskb->data_len = len - hsize;
|
||||
nskb->len += nskb->data_len;
|
||||
@@ -2936,32 +2992,30 @@ EXPORT_SYMBOL_GPL(skb_segment);
|
||||
|
||||
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *p = *head;
|
||||
struct sk_buff *nskb;
|
||||
struct skb_shared_info *skbinfo = skb_shinfo(skb);
|
||||
struct skb_shared_info *pinfo = skb_shinfo(p);
|
||||
unsigned int headroom;
|
||||
unsigned int len = skb_gro_len(skb);
|
||||
struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
|
||||
unsigned int offset = skb_gro_offset(skb);
|
||||
unsigned int headlen = skb_headlen(skb);
|
||||
struct sk_buff *nskb, *lp, *p = *head;
|
||||
unsigned int len = skb_gro_len(skb);
|
||||
unsigned int delta_truesize;
|
||||
unsigned int headroom;
|
||||
|
||||
if (p->len + len >= 65536)
|
||||
if (unlikely(p->len + len >= 65536))
|
||||
return -E2BIG;
|
||||
|
||||
if (pinfo->frag_list)
|
||||
goto merge;
|
||||
else if (headlen <= offset) {
|
||||
lp = NAPI_GRO_CB(p)->last ?: p;
|
||||
pinfo = skb_shinfo(lp);
|
||||
|
||||
if (headlen <= offset) {
|
||||
skb_frag_t *frag;
|
||||
skb_frag_t *frag2;
|
||||
int i = skbinfo->nr_frags;
|
||||
int nr_frags = pinfo->nr_frags + i;
|
||||
|
||||
offset -= headlen;
|
||||
|
||||
if (nr_frags > MAX_SKB_FRAGS)
|
||||
return -E2BIG;
|
||||
goto merge;
|
||||
|
||||
offset -= headlen;
|
||||
pinfo->nr_frags = nr_frags;
|
||||
skbinfo->nr_frags = 0;
|
||||
|
||||
@@ -2992,7 +3046,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
||||
unsigned int first_offset;
|
||||
|
||||
if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
|
||||
return -E2BIG;
|
||||
goto merge;
|
||||
|
||||
first_offset = skb->data -
|
||||
(unsigned char *)page_address(page) +
|
||||
@@ -3010,7 +3064,10 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
||||
delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
|
||||
NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
|
||||
goto done;
|
||||
} else if (skb_gro_len(p) != pinfo->gso_size)
|
||||
}
|
||||
if (pinfo->frag_list)
|
||||
goto merge;
|
||||
if (skb_gro_len(p) != pinfo->gso_size)
|
||||
return -E2BIG;
|
||||
|
||||
headroom = skb_headroom(p);
|
||||
@@ -3062,16 +3119,24 @@ merge:
|
||||
|
||||
__skb_pull(skb, offset);
|
||||
|
||||
NAPI_GRO_CB(p)->last->next = skb;
|
||||
if (!NAPI_GRO_CB(p)->last)
|
||||
skb_shinfo(p)->frag_list = skb;
|
||||
else
|
||||
NAPI_GRO_CB(p)->last->next = skb;
|
||||
NAPI_GRO_CB(p)->last = skb;
|
||||
skb_header_release(skb);
|
||||
lp = p;
|
||||
|
||||
done:
|
||||
NAPI_GRO_CB(p)->count++;
|
||||
p->data_len += len;
|
||||
p->truesize += delta_truesize;
|
||||
p->len += len;
|
||||
|
||||
if (lp != p) {
|
||||
lp->data_len += len;
|
||||
lp->truesize += delta_truesize;
|
||||
lp->len += len;
|
||||
}
|
||||
NAPI_GRO_CB(skb)->same_flow = 1;
|
||||
return 0;
|
||||
}
|
||||
@@ -3519,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
|
||||
skb->tstamp.tv64 = 0;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
skb->skb_iif = 0;
|
||||
skb->local_df = 0;
|
||||
skb_dst_drop(skb);
|
||||
skb->mark = 0;
|
||||
secpath_reset(skb);
|
||||
|
||||
@@ -475,12 +475,6 @@ discard_and_relse:
|
||||
}
|
||||
EXPORT_SYMBOL(sk_receive_skb);
|
||||
|
||||
void sk_reset_txq(struct sock *sk)
|
||||
{
|
||||
sk_tx_queue_clear(sk);
|
||||
}
|
||||
EXPORT_SYMBOL(sk_reset_txq);
|
||||
|
||||
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
|
||||
{
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
@@ -888,7 +882,7 @@ set_rcvbuf:
|
||||
|
||||
case SO_PEEK_OFF:
|
||||
if (sock->ops->set_peek_off)
|
||||
sock->ops->set_peek_off(sk, val);
|
||||
ret = sock->ops->set_peek_off(sk, val);
|
||||
else
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
@@ -914,6 +908,13 @@ set_rcvbuf:
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
case SO_MAX_PACING_RATE:
|
||||
sk->sk_max_pacing_rate = val;
|
||||
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
|
||||
sk->sk_max_pacing_rate);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -ENOPROTOOPT;
|
||||
break;
|
||||
@@ -1177,6 +1178,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
|
||||
break;
|
||||
#endif
|
||||
|
||||
case SO_MAX_PACING_RATE:
|
||||
v.val = sk->sk_max_pacing_rate;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -ENOPROTOOPT;
|
||||
}
|
||||
@@ -1836,7 +1841,17 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
|
||||
/* On 32bit arches, an skb frag is limited to 2^15 */
|
||||
#define SKB_FRAG_PAGE_ORDER get_order(32768)
|
||||
|
||||
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
|
||||
/**
|
||||
* skb_page_frag_refill - check that a page_frag contains enough room
|
||||
* @sz: minimum size of the fragment we want to get
|
||||
* @pfrag: pointer to page_frag
|
||||
* @prio: priority for memory allocation
|
||||
*
|
||||
* Note: While this allocator tries to use high order pages, there is
|
||||
* no guarantee that allocations succeed. Therefore, @sz MUST be
|
||||
* less or equal than PAGE_SIZE.
|
||||
*/
|
||||
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
|
||||
{
|
||||
int order;
|
||||
|
||||
@@ -1845,16 +1860,16 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
|
||||
pfrag->offset = 0;
|
||||
return true;
|
||||
}
|
||||
if (pfrag->offset < pfrag->size)
|
||||
if (pfrag->offset + sz <= pfrag->size)
|
||||
return true;
|
||||
put_page(pfrag->page);
|
||||
}
|
||||
|
||||
/* We restrict high order allocations to users that can afford to wait */
|
||||
order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
|
||||
order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
|
||||
|
||||
do {
|
||||
gfp_t gfp = sk->sk_allocation;
|
||||
gfp_t gfp = prio;
|
||||
|
||||
if (order)
|
||||
gfp |= __GFP_COMP | __GFP_NOWARN;
|
||||
@@ -1866,6 +1881,15 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
|
||||
}
|
||||
} while (--order >= 0);
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_page_frag_refill);
|
||||
|
||||
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
|
||||
{
|
||||
if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
|
||||
return true;
|
||||
|
||||
sk_enter_memory_pressure(sk);
|
||||
sk_stream_moderate_sndbuf(sk);
|
||||
return false;
|
||||
@@ -2319,6 +2343,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
||||
sk->sk_ll_usec = sysctl_net_busy_read;
|
||||
#endif
|
||||
|
||||
sk->sk_max_pacing_rate = ~0U;
|
||||
sk->sk_pacing_rate = ~0U;
|
||||
/*
|
||||
* Before updating sk_refcnt, we must commit prior changes to memory
|
||||
* (Documentation/RCU/rculist_nulls.txt for details)
|
||||
|
||||
@@ -338,3 +338,52 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
|
||||
csum_unfold(*sum)));
|
||||
}
|
||||
EXPORT_SYMBOL(inet_proto_csum_replace16);
|
||||
|
||||
struct __net_random_once_work {
|
||||
struct work_struct work;
|
||||
struct static_key *key;
|
||||
};
|
||||
|
||||
static void __net_random_once_deferred(struct work_struct *w)
|
||||
{
|
||||
struct __net_random_once_work *work =
|
||||
container_of(w, struct __net_random_once_work, work);
|
||||
if (!static_key_enabled(work->key))
|
||||
static_key_slow_inc(work->key);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void __net_random_once_disable_jump(struct static_key *key)
|
||||
{
|
||||
struct __net_random_once_work *w;
|
||||
|
||||
w = kmalloc(sizeof(*w), GFP_ATOMIC);
|
||||
if (!w)
|
||||
return;
|
||||
|
||||
INIT_WORK(&w->work, __net_random_once_deferred);
|
||||
w->key = key;
|
||||
schedule_work(&w->work);
|
||||
}
|
||||
|
||||
bool __net_get_random_once(void *buf, int nbytes, bool *done,
|
||||
struct static_key *done_key)
|
||||
{
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
if (*done) {
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
get_random_bytes(buf, nbytes);
|
||||
*done = true;
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
|
||||
__net_random_once_disable_jump(done_key);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(__net_get_random_once);
|
||||
|
||||
Reference in New Issue
Block a user