2019-05-29 14:12:43 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-06-18 00:49:56 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2013 Nicira, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/in6.h>
|
|
|
|
#include <linux/inetdevice.h>
|
|
|
|
#include <linux/netfilter_ipv4.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/if_vlan.h>
|
2015-07-21 08:44:01 +00:00
|
|
|
#include <linux/static_key.h>
|
2013-06-18 00:49:56 +00:00
|
|
|
|
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/icmp.h>
|
|
|
|
#include <net/protocol.h>
|
|
|
|
#include <net/ip_tunnels.h>
|
2016-05-18 16:06:17 +00:00
|
|
|
#include <net/ip6_tunnel.h>
|
2013-06-18 00:49:56 +00:00
|
|
|
#include <net/arp.h>
|
|
|
|
#include <net/checksum.h>
|
|
|
|
#include <net/dsfield.h>
|
|
|
|
#include <net/inet_ecn.h>
|
|
|
|
#include <net/xfrm.h>
|
|
|
|
#include <net/net_namespace.h>
|
|
|
|
#include <net/netns/generic.h>
|
|
|
|
#include <net/rtnetlink.h>
|
2015-09-22 16:12:11 +00:00
|
|
|
#include <net/dst_metadata.h>
|
2019-11-06 09:01:05 +00:00
|
|
|
#include <net/geneve.h>
|
2019-11-06 09:01:06 +00:00
|
|
|
#include <net/vxlan.h>
|
2019-11-06 09:01:07 +00:00
|
|
|
#include <net/erspan.h>
|
2013-06-18 00:49:56 +00:00
|
|
|
|
2016-05-18 16:06:13 +00:00
|
|
|
const struct ip_tunnel_encap_ops __rcu *
|
|
|
|
iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
|
|
|
|
EXPORT_SYMBOL(iptun_encaps);
|
|
|
|
|
2016-05-18 16:06:17 +00:00
|
|
|
const struct ip6_tnl_encap_ops __rcu *
|
|
|
|
ip6tun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
|
|
|
|
EXPORT_SYMBOL(ip6tun_encaps);
|
|
|
|
|
2015-12-24 22:34:54 +00:00
|
|
|
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
|
|
|
|
__be32 src, __be32 dst, __u8 proto,
|
|
|
|
__u8 tos, __u8 ttl, __be16 df, bool xnet)
|
2013-06-18 00:49:56 +00:00
|
|
|
{
|
2015-09-18 09:47:40 +00:00
|
|
|
int pkt_len = skb->len - skb_inner_network_offset(skb);
|
2015-10-07 21:48:41 +00:00
|
|
|
struct net *net = dev_net(rt->dst.dev);
|
2015-12-24 22:34:54 +00:00
|
|
|
struct net_device *dev = skb->dev;
|
2013-06-18 00:49:56 +00:00
|
|
|
struct iphdr *iph;
|
|
|
|
int err;
|
|
|
|
|
2013-09-02 13:34:57 +00:00
|
|
|
skb_scrub_packet(skb, xnet);
|
|
|
|
|
2016-09-08 22:40:48 +00:00
|
|
|
skb_clear_hash_if_not_l4(skb);
|
2013-06-18 00:49:56 +00:00
|
|
|
skb_dst_set(skb, &rt->dst);
|
|
|
|
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
|
|
|
|
|
|
|
/* Push down and install the IP header. */
|
2013-10-01 09:35:51 +00:00
|
|
|
skb_push(skb, sizeof(struct iphdr));
|
2013-06-18 00:49:56 +00:00
|
|
|
skb_reset_network_header(skb);
|
|
|
|
|
|
|
|
iph = ip_hdr(skb);
|
|
|
|
|
|
|
|
iph->version = 4;
|
|
|
|
iph->ihl = sizeof(struct iphdr) >> 2;
|
2018-11-16 15:58:19 +00:00
|
|
|
iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
|
2013-06-18 00:49:56 +00:00
|
|
|
iph->protocol = proto;
|
|
|
|
iph->tos = tos;
|
|
|
|
iph->daddr = dst;
|
|
|
|
iph->saddr = src;
|
|
|
|
iph->ttl = ttl;
|
2015-10-07 21:48:41 +00:00
|
|
|
__ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
|
2013-06-18 00:49:56 +00:00
|
|
|
|
2015-10-07 21:48:46 +00:00
|
|
|
err = ip_local_out(net, sk, skb);
|
2019-06-17 13:34:13 +00:00
|
|
|
|
|
|
|
if (dev) {
|
|
|
|
if (unlikely(net_xmit_eval(err)))
|
|
|
|
pkt_len = 0;
|
|
|
|
iptunnel_xmit_stats(dev, pkt_len);
|
|
|
|
}
|
2013-06-18 00:49:56 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iptunnel_xmit);
|
2013-06-18 00:50:02 +00:00
|
|
|
|
2016-04-05 12:47:12 +00:00
|
|
|
int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
|
|
|
|
__be16 inner_proto, bool raw_proto, bool xnet)
|
2013-06-18 00:50:02 +00:00
|
|
|
{
|
|
|
|
if (unlikely(!pskb_may_pull(skb, hdr_len)))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
skb_pull_rcsum(skb, hdr_len);
|
|
|
|
|
2016-04-05 12:47:12 +00:00
|
|
|
if (!raw_proto && inner_proto == htons(ETH_P_TEB)) {
|
2014-10-17 08:53:23 +00:00
|
|
|
struct ethhdr *eh;
|
2013-06-18 00:50:02 +00:00
|
|
|
|
|
|
|
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-10-17 08:53:23 +00:00
|
|
|
eh = (struct ethhdr *)skb->data;
|
2015-05-04 21:33:59 +00:00
|
|
|
if (likely(eth_proto_is_802_3(eh->h_proto)))
|
2013-06-18 00:50:02 +00:00
|
|
|
skb->protocol = eh->h_proto;
|
|
|
|
else
|
|
|
|
skb->protocol = htons(ETH_P_802_2);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
skb->protocol = inner_proto;
|
|
|
|
}
|
|
|
|
|
2013-12-16 06:12:18 +00:00
|
|
|
skb_clear_hash_if_not_l4(skb);
|
2018-11-08 23:18:04 +00:00
|
|
|
__vlan_hwaccel_clear_tag(skb);
|
2013-06-18 00:50:02 +00:00
|
|
|
skb_set_queue_mapping(skb, 0);
|
2016-02-18 10:22:52 +00:00
|
|
|
skb_scrub_packet(skb, xnet);
|
2016-03-19 16:32:02 +00:00
|
|
|
|
|
|
|
return iptunnel_pull_offloads(skb);
|
2013-06-18 00:50:02 +00:00
|
|
|
}
|
2016-04-05 12:47:12 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
|
2013-10-19 18:42:55 +00:00
|
|
|
|
2015-09-22 16:12:11 +00:00
|
|
|
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
|
|
|
|
gfp_t flags)
|
|
|
|
{
|
|
|
|
struct metadata_dst *res;
|
|
|
|
struct ip_tunnel_info *dst, *src;
|
|
|
|
|
2017-06-23 20:11:58 +00:00
|
|
|
if (!md || md->type != METADATA_IP_TUNNEL ||
|
|
|
|
md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
|
2015-09-22 16:12:11 +00:00
|
|
|
return NULL;
|
|
|
|
|
2019-11-06 09:01:03 +00:00
|
|
|
src = &md->u.tun_info;
|
|
|
|
res = metadata_dst_alloc(src->options_len, METADATA_IP_TUNNEL, flags);
|
2015-09-22 16:12:11 +00:00
|
|
|
if (!res)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dst = &res->u.tun_info;
|
|
|
|
dst->key.tun_id = src->key.tun_id;
|
|
|
|
if (src->mode & IP_TUNNEL_INFO_IPV6)
|
|
|
|
memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
else
|
|
|
|
dst->key.u.ipv4.dst = src->key.u.ipv4.src;
|
2018-12-23 08:18:39 +00:00
|
|
|
dst->key.tun_flags = src->key.tun_flags;
|
2015-09-22 16:12:11 +00:00
|
|
|
dst->mode = src->mode | IP_TUNNEL_INFO_TX;
|
2019-11-06 09:01:03 +00:00
|
|
|
ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src),
|
|
|
|
src->options_len, 0);
|
2015-09-22 16:12:11 +00:00
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
|
|
|
|
|
2016-04-14 19:33:37 +00:00
|
|
|
int iptunnel_handle_offloads(struct sk_buff *skb,
|
|
|
|
int gso_type_mask)
|
2013-10-19 18:42:55 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (likely(!skb->encapsulation)) {
|
|
|
|
skb_reset_inner_headers(skb);
|
|
|
|
skb->encapsulation = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_is_gso(skb)) {
|
2016-04-30 17:19:29 +00:00
|
|
|
err = skb_header_unclone(skb, GFP_ATOMIC);
|
2013-10-19 18:42:55 +00:00
|
|
|
if (unlikely(err))
|
2016-04-14 19:33:37 +00:00
|
|
|
return err;
|
2013-10-19 18:42:55 +00:00
|
|
|
skb_shinfo(skb)->gso_type |= gso_type_mask;
|
2016-04-14 19:33:37 +00:00
|
|
|
return 0;
|
2013-10-19 18:42:55 +00:00
|
|
|
}
|
|
|
|
|
2016-02-11 21:02:31 +00:00
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL) {
|
2013-10-19 18:42:55 +00:00
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
2016-02-11 21:02:31 +00:00
|
|
|
/* We clear encapsulation here to prevent badly-written
|
|
|
|
* drivers potentially deciding to offload an inner checksum
|
|
|
|
* if we set CHECKSUM_PARTIAL on the outer header.
|
|
|
|
* This should go away when the drivers are all fixed.
|
|
|
|
*/
|
2016-02-11 20:48:04 +00:00
|
|
|
skb->encapsulation = 0;
|
|
|
|
}
|
2013-10-19 18:42:55 +00:00
|
|
|
|
2016-04-14 19:33:37 +00:00
|
|
|
return 0;
|
2013-10-19 18:42:55 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
|
2014-02-20 07:14:23 +00:00
|
|
|
|
|
|
|
/* Often modified stats are per cpu, other are shared (netdev->stats) */
|
2017-01-07 03:12:52 +00:00
|
|
|
void ip_tunnel_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *tot)
|
2014-02-20 07:14:23 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2015-05-14 21:31:28 +00:00
|
|
|
netdev_stats_to_stats64(tot, &dev->stats);
|
|
|
|
|
2014-02-20 07:14:23 +00:00
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
const struct pcpu_sw_netstats *tstats =
|
|
|
|
per_cpu_ptr(dev->tstats, i);
|
|
|
|
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
|
|
|
|
unsigned int start;
|
|
|
|
|
|
|
|
do {
|
2014-03-14 04:26:42 +00:00
|
|
|
start = u64_stats_fetch_begin_irq(&tstats->syncp);
|
2014-02-20 07:14:23 +00:00
|
|
|
rx_packets = tstats->rx_packets;
|
|
|
|
tx_packets = tstats->tx_packets;
|
|
|
|
rx_bytes = tstats->rx_bytes;
|
|
|
|
tx_bytes = tstats->tx_bytes;
|
2014-03-14 04:26:42 +00:00
|
|
|
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
|
2014-02-20 07:14:23 +00:00
|
|
|
|
|
|
|
tot->rx_packets += rx_packets;
|
|
|
|
tot->tx_packets += tx_packets;
|
|
|
|
tot->rx_bytes += rx_bytes;
|
|
|
|
tot->tx_bytes += tx_bytes;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
|
2015-07-21 08:44:00 +00:00
|
|
|
|
2015-08-14 14:40:40 +00:00
|
|
|
static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
|
2019-11-21 10:11:27 +00:00
|
|
|
[LWTUNNEL_IP_UNSPEC] = { .strict_start_type = LWTUNNEL_IP_OPTS },
|
2015-08-14 14:40:40 +00:00
|
|
|
[LWTUNNEL_IP_ID] = { .type = NLA_U64 },
|
|
|
|
[LWTUNNEL_IP_DST] = { .type = NLA_U32 },
|
|
|
|
[LWTUNNEL_IP_SRC] = { .type = NLA_U32 },
|
|
|
|
[LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
|
|
|
|
[LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
|
|
|
|
[LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
|
2019-11-06 09:01:05 +00:00
|
|
|
[LWTUNNEL_IP_OPTS] = { .type = NLA_NESTED },
|
2015-07-21 08:44:00 +00:00
|
|
|
};
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
static const struct nla_policy ip_opts_policy[LWTUNNEL_IP_OPTS_MAX + 1] = {
|
|
|
|
[LWTUNNEL_IP_OPTS_GENEVE] = { .type = NLA_NESTED },
|
2019-11-06 09:01:06 +00:00
|
|
|
[LWTUNNEL_IP_OPTS_VXLAN] = { .type = NLA_NESTED },
|
2019-11-06 09:01:07 +00:00
|
|
|
[LWTUNNEL_IP_OPTS_ERSPAN] = { .type = NLA_NESTED },
|
2019-11-06 09:01:05 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct nla_policy
|
|
|
|
geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = {
|
|
|
|
[LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
|
|
|
|
[LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
|
|
|
|
[LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
|
|
|
|
};
|
|
|
|
|
2019-11-06 09:01:06 +00:00
|
|
|
static const struct nla_policy
|
|
|
|
vxlan_opt_policy[LWTUNNEL_IP_OPT_VXLAN_MAX + 1] = {
|
|
|
|
[LWTUNNEL_IP_OPT_VXLAN_GBP] = { .type = NLA_U32 },
|
|
|
|
};
|
|
|
|
|
2019-11-06 09:01:07 +00:00
|
|
|
static const struct nla_policy
|
|
|
|
erspan_opt_policy[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1] = {
|
|
|
|
[LWTUNNEL_IP_OPT_ERSPAN_VER] = { .type = NLA_U8 },
|
|
|
|
[LWTUNNEL_IP_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
|
|
|
|
[LWTUNNEL_IP_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
|
|
|
|
[LWTUNNEL_IP_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
|
|
|
|
};
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
static int ip_tun_parse_opts_geneve(struct nlattr *attr,
|
2019-11-19 09:39:11 +00:00
|
|
|
struct ip_tunnel_info *info, int opts_len,
|
2019-11-06 09:01:05 +00:00
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct nlattr *tb[LWTUNNEL_IP_OPT_GENEVE_MAX + 1];
|
|
|
|
int data_len, err;
|
|
|
|
|
2019-11-10 04:16:22 +00:00
|
|
|
err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_GENEVE_MAX, attr,
|
|
|
|
geneve_opt_policy, extack);
|
2019-11-06 09:01:05 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!tb[LWTUNNEL_IP_OPT_GENEVE_CLASS] ||
|
|
|
|
!tb[LWTUNNEL_IP_OPT_GENEVE_TYPE] ||
|
|
|
|
!tb[LWTUNNEL_IP_OPT_GENEVE_DATA])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
attr = tb[LWTUNNEL_IP_OPT_GENEVE_DATA];
|
|
|
|
data_len = nla_len(attr);
|
|
|
|
if (data_len % 4)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (info) {
|
2019-11-19 09:39:11 +00:00
|
|
|
struct geneve_opt *opt = ip_tunnel_info_opts(info) + opts_len;
|
2019-11-06 09:01:05 +00:00
|
|
|
|
|
|
|
memcpy(opt->opt_data, nla_data(attr), data_len);
|
|
|
|
opt->length = data_len / 4;
|
|
|
|
attr = tb[LWTUNNEL_IP_OPT_GENEVE_CLASS];
|
|
|
|
opt->opt_class = nla_get_be16(attr);
|
|
|
|
attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE];
|
|
|
|
opt->type = nla_get_u8(attr);
|
|
|
|
info->key.tun_flags |= TUNNEL_GENEVE_OPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sizeof(struct geneve_opt) + data_len;
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:06 +00:00
|
|
|
static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
|
2019-11-19 09:39:11 +00:00
|
|
|
struct ip_tunnel_info *info, int opts_len,
|
2019-11-06 09:01:06 +00:00
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct nlattr *tb[LWTUNNEL_IP_OPT_VXLAN_MAX + 1];
|
|
|
|
int err;
|
|
|
|
|
2019-11-10 04:16:22 +00:00
|
|
|
err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_VXLAN_MAX, attr,
|
|
|
|
vxlan_opt_policy, extack);
|
2019-11-06 09:01:06 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!tb[LWTUNNEL_IP_OPT_VXLAN_GBP])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (info) {
|
2019-11-19 09:39:11 +00:00
|
|
|
struct vxlan_metadata *md =
|
|
|
|
ip_tunnel_info_opts(info) + opts_len;
|
2019-11-06 09:01:06 +00:00
|
|
|
|
|
|
|
attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
|
|
|
|
md->gbp = nla_get_u32(attr);
|
|
|
|
info->key.tun_flags |= TUNNEL_VXLAN_OPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sizeof(struct vxlan_metadata);
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:07 +00:00
|
|
|
static int ip_tun_parse_opts_erspan(struct nlattr *attr,
|
2019-11-19 09:39:11 +00:00
|
|
|
struct ip_tunnel_info *info, int opts_len,
|
2019-11-06 09:01:07 +00:00
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct nlattr *tb[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1];
|
|
|
|
int err;
|
2019-11-21 10:14:50 +00:00
|
|
|
u8 ver;
|
2019-11-06 09:01:07 +00:00
|
|
|
|
2019-11-10 04:16:22 +00:00
|
|
|
err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_ERSPAN_MAX, attr,
|
|
|
|
erspan_opt_policy, extack);
|
2019-11-06 09:01:07 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!tb[LWTUNNEL_IP_OPT_ERSPAN_VER])
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-11-21 10:14:50 +00:00
|
|
|
ver = nla_get_u8(tb[LWTUNNEL_IP_OPT_ERSPAN_VER]);
|
|
|
|
if (ver == 1) {
|
|
|
|
if (!tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
} else if (ver == 2) {
|
|
|
|
if (!tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] ||
|
|
|
|
!tb[LWTUNNEL_IP_OPT_ERSPAN_HWID])
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:07 +00:00
|
|
|
if (info) {
|
2019-11-19 09:39:11 +00:00
|
|
|
struct erspan_metadata *md =
|
|
|
|
ip_tunnel_info_opts(info) + opts_len;
|
2019-11-06 09:01:07 +00:00
|
|
|
|
2019-11-21 10:14:50 +00:00
|
|
|
md->version = ver;
|
|
|
|
if (ver == 1) {
|
2019-11-06 09:01:07 +00:00
|
|
|
attr = tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX];
|
|
|
|
md->u.index = nla_get_be32(attr);
|
2019-11-21 10:14:50 +00:00
|
|
|
} else {
|
2019-11-06 09:01:07 +00:00
|
|
|
attr = tb[LWTUNNEL_IP_OPT_ERSPAN_DIR];
|
|
|
|
md->u.md2.dir = nla_get_u8(attr);
|
|
|
|
attr = tb[LWTUNNEL_IP_OPT_ERSPAN_HWID];
|
|
|
|
set_hwid(&md->u.md2, nla_get_u8(attr));
|
|
|
|
}
|
|
|
|
|
|
|
|
info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sizeof(struct erspan_metadata);
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
2019-11-19 09:39:11 +00:00
|
|
|
int err, rem, opt_len, opts_len = 0, type = 0;
|
|
|
|
struct nlattr *nla;
|
2019-11-06 09:01:05 +00:00
|
|
|
|
|
|
|
if (!attr)
|
|
|
|
return 0;
|
|
|
|
|
2019-11-19 09:39:11 +00:00
|
|
|
err = nla_validate(nla_data(attr), nla_len(attr), LWTUNNEL_IP_OPTS_MAX,
|
|
|
|
ip_opts_policy, extack);
|
2019-11-06 09:01:05 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2019-11-19 09:39:11 +00:00
|
|
|
nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
|
|
|
|
switch (nla_type(nla)) {
|
|
|
|
case LWTUNNEL_IP_OPTS_GENEVE:
|
|
|
|
if (type && type != TUNNEL_GENEVE_OPT)
|
|
|
|
return -EINVAL;
|
|
|
|
opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len,
|
|
|
|
extack);
|
|
|
|
if (opt_len < 0)
|
|
|
|
return opt_len;
|
|
|
|
opts_len += opt_len;
|
|
|
|
if (opts_len > IP_TUNNEL_OPTS_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
type = TUNNEL_GENEVE_OPT;
|
|
|
|
break;
|
|
|
|
case LWTUNNEL_IP_OPTS_VXLAN:
|
|
|
|
if (type)
|
|
|
|
return -EINVAL;
|
|
|
|
opt_len = ip_tun_parse_opts_vxlan(nla, info, opts_len,
|
|
|
|
extack);
|
|
|
|
if (opt_len < 0)
|
|
|
|
return opt_len;
|
|
|
|
opts_len += opt_len;
|
|
|
|
type = TUNNEL_VXLAN_OPT;
|
|
|
|
break;
|
|
|
|
case LWTUNNEL_IP_OPTS_ERSPAN:
|
|
|
|
if (type)
|
|
|
|
return -EINVAL;
|
|
|
|
opt_len = ip_tun_parse_opts_erspan(nla, info, opts_len,
|
|
|
|
extack);
|
|
|
|
if (opt_len < 0)
|
|
|
|
return opt_len;
|
|
|
|
opts_len += opt_len;
|
|
|
|
type = TUNNEL_ERSPAN_OPT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2019-11-06 09:01:05 +00:00
|
|
|
|
2019-11-19 09:39:11 +00:00
|
|
|
return opts_len;
|
2019-11-06 09:01:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ip_tun_get_optlen(struct nlattr *attr,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
return ip_tun_parse_opts(attr, NULL, extack);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ip_tun_set_opts(struct nlattr *attr, struct ip_tunnel_info *info,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
return ip_tun_parse_opts(attr, info, extack);
|
|
|
|
}
|
|
|
|
|
2020-03-27 22:00:21 +00:00
|
|
|
static int ip_tun_build_state(struct net *net, struct nlattr *attr,
|
2015-08-24 16:45:41 +00:00
|
|
|
unsigned int family, const void *cfg,
|
2017-05-27 22:19:28 +00:00
|
|
|
struct lwtunnel_state **ts,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-07-21 08:44:00 +00:00
|
|
|
{
|
2015-08-14 14:40:40 +00:00
|
|
|
struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
|
2019-11-06 09:01:05 +00:00
|
|
|
struct lwtunnel_state *new_state;
|
|
|
|
struct ip_tunnel_info *tun_info;
|
|
|
|
int err, opt_len;
|
2015-07-21 08:44:00 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
|
|
|
|
ip_tun_policy, extack);
|
2015-07-21 08:44:00 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP_OPTS], extack);
|
|
|
|
if (opt_len < 0)
|
|
|
|
return opt_len;
|
|
|
|
|
|
|
|
new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
|
2015-07-21 08:44:00 +00:00
|
|
|
if (!new_state)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
new_state->type = LWTUNNEL_ENCAP_IP;
|
|
|
|
|
|
|
|
tun_info = lwt_tun_info(new_state);
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
err = ip_tun_set_opts(tb[LWTUNNEL_IP_OPTS], tun_info, extack);
|
|
|
|
if (err < 0) {
|
|
|
|
lwtstate_free(new_state);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-02-23 13:32:54 +00:00
|
|
|
#ifdef CONFIG_DST_CACHE
|
|
|
|
err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL);
|
|
|
|
if (err) {
|
|
|
|
lwtstate_free(new_state);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-08-14 14:40:40 +00:00
|
|
|
if (tb[LWTUNNEL_IP_ID])
|
2016-01-06 22:22:45 +00:00
|
|
|
tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
|
2015-07-21 08:44:00 +00:00
|
|
|
|
2015-08-14 14:40:40 +00:00
|
|
|
if (tb[LWTUNNEL_IP_DST])
|
2016-03-31 10:21:38 +00:00
|
|
|
tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);
|
2015-07-21 08:44:00 +00:00
|
|
|
|
2015-08-14 14:40:40 +00:00
|
|
|
if (tb[LWTUNNEL_IP_SRC])
|
2016-03-31 10:21:38 +00:00
|
|
|
tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);
|
2015-07-21 08:44:00 +00:00
|
|
|
|
2015-08-14 14:40:40 +00:00
|
|
|
if (tb[LWTUNNEL_IP_TTL])
|
2015-08-20 11:56:24 +00:00
|
|
|
tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
|
2015-07-21 08:44:00 +00:00
|
|
|
|
2015-08-14 14:40:40 +00:00
|
|
|
if (tb[LWTUNNEL_IP_TOS])
|
2015-08-20 11:56:24 +00:00
|
|
|
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
|
2015-07-21 08:44:00 +00:00
|
|
|
|
2015-08-14 14:40:40 +00:00
|
|
|
if (tb[LWTUNNEL_IP_FLAGS])
|
2019-11-10 04:26:21 +00:00
|
|
|
tun_info->key.tun_flags |=
|
|
|
|
(nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) &
|
|
|
|
~TUNNEL_OPTIONS_PRESENT);
|
2015-07-21 08:44:00 +00:00
|
|
|
|
|
|
|
tun_info->mode = IP_TUNNEL_INFO_TX;
|
2019-11-06 09:01:05 +00:00
|
|
|
tun_info->options_len = opt_len;
|
2015-07-21 08:44:00 +00:00
|
|
|
|
|
|
|
*ts = new_state;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-23 13:32:54 +00:00
|
|
|
static void ip_tun_destroy_state(struct lwtunnel_state *lwtstate)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_DST_CACHE
|
|
|
|
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
|
|
|
|
|
|
|
|
dst_cache_destroy(&tun_info->dst_cache);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
static int ip_tun_fill_encap_opts_geneve(struct sk_buff *skb,
|
|
|
|
struct ip_tunnel_info *tun_info)
|
|
|
|
{
|
|
|
|
struct geneve_opt *opt;
|
|
|
|
struct nlattr *nest;
|
2019-11-19 09:39:11 +00:00
|
|
|
int offset = 0;
|
2019-11-06 09:01:05 +00:00
|
|
|
|
|
|
|
nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_GENEVE);
|
|
|
|
if (!nest)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-11-19 09:39:11 +00:00
|
|
|
while (tun_info->options_len > offset) {
|
|
|
|
opt = ip_tunnel_info_opts(tun_info) + offset;
|
|
|
|
if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS,
|
|
|
|
opt->opt_class) ||
|
|
|
|
nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) ||
|
|
|
|
nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4,
|
|
|
|
opt->opt_data)) {
|
|
|
|
nla_nest_cancel(skb, nest);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
offset += sizeof(*opt) + opt->length * 4;
|
2019-11-06 09:01:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:06 +00:00
|
|
|
static int ip_tun_fill_encap_opts_vxlan(struct sk_buff *skb,
|
|
|
|
struct ip_tunnel_info *tun_info)
|
|
|
|
{
|
|
|
|
struct vxlan_metadata *md;
|
|
|
|
struct nlattr *nest;
|
|
|
|
|
|
|
|
nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_VXLAN);
|
|
|
|
if (!nest)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
md = ip_tunnel_info_opts(tun_info);
|
|
|
|
if (nla_put_u32(skb, LWTUNNEL_IP_OPT_VXLAN_GBP, md->gbp)) {
|
|
|
|
nla_nest_cancel(skb, nest);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:07 +00:00
|
|
|
static int ip_tun_fill_encap_opts_erspan(struct sk_buff *skb,
|
|
|
|
struct ip_tunnel_info *tun_info)
|
|
|
|
{
|
|
|
|
struct erspan_metadata *md;
|
|
|
|
struct nlattr *nest;
|
|
|
|
|
|
|
|
nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_ERSPAN);
|
|
|
|
if (!nest)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
md = ip_tunnel_info_opts(tun_info);
|
2019-11-18 10:10:12 +00:00
|
|
|
if (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version))
|
2019-11-06 09:01:07 +00:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (md->version == 1 &&
|
|
|
|
nla_put_be32(skb, LWTUNNEL_IP_OPT_ERSPAN_INDEX, md->u.index))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (md->version == 2 &&
|
|
|
|
(nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_DIR, md->u.md2.dir) ||
|
|
|
|
nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_HWID,
|
|
|
|
get_hwid(&md->u.md2))))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
nla_nest_cancel(skb, nest);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type,
|
|
|
|
struct ip_tunnel_info *tun_info)
|
|
|
|
{
|
|
|
|
struct nlattr *nest;
|
|
|
|
int err = 0;
|
|
|
|
|
2019-11-10 04:26:21 +00:00
|
|
|
if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
|
2019-11-06 09:01:05 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
nest = nla_nest_start_noflag(skb, type);
|
|
|
|
if (!nest)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT)
|
|
|
|
err = ip_tun_fill_encap_opts_geneve(skb, tun_info);
|
2019-11-06 09:01:06 +00:00
|
|
|
else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT)
|
|
|
|
err = ip_tun_fill_encap_opts_vxlan(skb, tun_info);
|
2019-11-06 09:01:07 +00:00
|
|
|
else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)
|
|
|
|
err = ip_tun_fill_encap_opts_erspan(skb, tun_info);
|
2019-11-06 09:01:05 +00:00
|
|
|
|
|
|
|
if (err) {
|
|
|
|
nla_nest_cancel(skb, nest);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-21 08:44:00 +00:00
|
|
|
static int ip_tun_fill_encap_info(struct sk_buff *skb,
|
|
|
|
struct lwtunnel_state *lwtstate)
|
|
|
|
{
|
|
|
|
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
|
|
|
|
|
2016-04-22 15:31:18 +00:00
|
|
|
if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id,
|
|
|
|
LWTUNNEL_IP_PAD) ||
|
2016-03-31 10:21:38 +00:00
|
|
|
nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
|
|
|
|
nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
|
2015-08-20 11:56:24 +00:00
|
|
|
nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
|
|
|
|
nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
|
2019-11-06 09:01:05 +00:00
|
|
|
nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) ||
|
|
|
|
ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info))
|
2015-07-21 08:44:00 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
|
|
|
|
{
|
|
|
|
int opt_len;
|
|
|
|
|
2019-11-10 04:26:21 +00:00
|
|
|
if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
|
2019-11-06 09:01:05 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */
|
|
|
|
if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
|
2019-11-19 09:39:11 +00:00
|
|
|
struct geneve_opt *opt;
|
|
|
|
int offset = 0;
|
|
|
|
|
|
|
|
opt_len += nla_total_size(0); /* LWTUNNEL_IP_OPTS_GENEVE */
|
|
|
|
while (info->options_len > offset) {
|
|
|
|
opt = ip_tunnel_info_opts(info) + offset;
|
|
|
|
opt_len += nla_total_size(2) /* OPT_GENEVE_CLASS */
|
|
|
|
+ nla_total_size(1) /* OPT_GENEVE_TYPE */
|
|
|
|
+ nla_total_size(opt->length * 4);
|
|
|
|
/* OPT_GENEVE_DATA */
|
|
|
|
offset += sizeof(*opt) + opt->length * 4;
|
|
|
|
}
|
2019-11-06 09:01:06 +00:00
|
|
|
} else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
|
|
|
|
opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */
|
|
|
|
+ nla_total_size(4); /* OPT_VXLAN_GBP */
|
2019-11-06 09:01:07 +00:00
|
|
|
} else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
|
2019-11-10 04:21:18 +00:00
|
|
|
struct erspan_metadata *md = ip_tunnel_info_opts(info);
|
|
|
|
|
2019-11-06 09:01:07 +00:00
|
|
|
opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_ERSPAN */
|
|
|
|
+ nla_total_size(1) /* OPT_ERSPAN_VER */
|
2019-11-10 04:21:18 +00:00
|
|
|
+ (md->version == 1 ? nla_total_size(4)
|
|
|
|
/* OPT_ERSPAN_INDEX (v1) */
|
|
|
|
: nla_total_size(1) +
|
|
|
|
nla_total_size(1));
|
|
|
|
/* OPT_ERSPAN_DIR + HWID (v2) */
|
2019-11-06 09:01:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return opt_len;
|
|
|
|
}
|
|
|
|
|
2015-07-21 08:44:00 +00:00
|
|
|
static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
|
|
|
|
{
|
2016-04-22 15:31:18 +00:00
|
|
|
return nla_total_size_64bit(8) /* LWTUNNEL_IP_ID */
|
2015-08-14 14:40:40 +00:00
|
|
|
+ nla_total_size(4) /* LWTUNNEL_IP_DST */
|
|
|
|
+ nla_total_size(4) /* LWTUNNEL_IP_SRC */
|
|
|
|
+ nla_total_size(1) /* LWTUNNEL_IP_TOS */
|
|
|
|
+ nla_total_size(1) /* LWTUNNEL_IP_TTL */
|
2019-11-06 09:01:05 +00:00
|
|
|
+ nla_total_size(2) /* LWTUNNEL_IP_FLAGS */
|
|
|
|
+ ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
|
|
|
|
/* LWTUNNEL_IP_OPTS */
|
2015-07-21 08:44:00 +00:00
|
|
|
}
|
|
|
|
|
2015-08-18 16:42:09 +00:00
|
|
|
static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
|
|
|
|
{
|
2019-11-06 09:01:04 +00:00
|
|
|
struct ip_tunnel_info *info_a = lwt_tun_info(a);
|
|
|
|
struct ip_tunnel_info *info_b = lwt_tun_info(b);
|
|
|
|
|
|
|
|
return memcmp(info_a, info_b, sizeof(info_a->key)) ||
|
|
|
|
info_a->mode != info_b->mode ||
|
|
|
|
info_a->options_len != info_b->options_len ||
|
|
|
|
memcmp(ip_tunnel_info_opts(info_a),
|
|
|
|
ip_tunnel_info_opts(info_b), info_a->options_len);
|
2015-08-18 16:42:09 +00:00
|
|
|
}
|
|
|
|
|
2015-07-21 08:44:00 +00:00
|
|
|
static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
|
|
|
|
.build_state = ip_tun_build_state,
|
2019-02-23 13:32:54 +00:00
|
|
|
.destroy_state = ip_tun_destroy_state,
|
2015-07-21 08:44:00 +00:00
|
|
|
.fill_encap = ip_tun_fill_encap_info,
|
|
|
|
.get_encap_size = ip_tun_encap_nlsize,
|
2015-08-18 16:42:09 +00:00
|
|
|
.cmp_encap = ip_tun_cmp_encap,
|
2017-01-24 16:26:47 +00:00
|
|
|
.owner = THIS_MODULE,
|
2015-07-21 08:44:00 +00:00
|
|
|
};
|
|
|
|
|
2015-08-20 11:56:32 +00:00
|
|
|
static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
|
2019-11-21 10:11:27 +00:00
|
|
|
[LWTUNNEL_IP6_UNSPEC] = { .strict_start_type = LWTUNNEL_IP6_OPTS },
|
2015-08-20 11:56:32 +00:00
|
|
|
[LWTUNNEL_IP6_ID] = { .type = NLA_U64 },
|
|
|
|
[LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) },
|
|
|
|
[LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) },
|
|
|
|
[LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 },
|
|
|
|
[LWTUNNEL_IP6_TC] = { .type = NLA_U8 },
|
|
|
|
[LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
|
2019-11-21 10:11:27 +00:00
|
|
|
[LWTUNNEL_IP6_OPTS] = { .type = NLA_NESTED },
|
2015-08-20 11:56:32 +00:00
|
|
|
};
|
|
|
|
|
2020-03-27 22:00:21 +00:00
|
|
|
static int ip6_tun_build_state(struct net *net, struct nlattr *attr,
|
2015-08-24 16:45:41 +00:00
|
|
|
unsigned int family, const void *cfg,
|
2017-05-27 22:19:28 +00:00
|
|
|
struct lwtunnel_state **ts,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-08-20 11:56:32 +00:00
|
|
|
{
|
|
|
|
struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
|
2019-11-06 09:01:05 +00:00
|
|
|
struct lwtunnel_state *new_state;
|
|
|
|
struct ip_tunnel_info *tun_info;
|
|
|
|
int err, opt_len;
|
2015-08-20 11:56:32 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
|
|
|
|
ip6_tun_policy, extack);
|
2015-08-20 11:56:32 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP6_OPTS], extack);
|
|
|
|
if (opt_len < 0)
|
|
|
|
return opt_len;
|
|
|
|
|
|
|
|
new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
|
2015-08-20 11:56:32 +00:00
|
|
|
if (!new_state)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
new_state->type = LWTUNNEL_ENCAP_IP6;
|
|
|
|
|
|
|
|
tun_info = lwt_tun_info(new_state);
|
|
|
|
|
2019-11-06 09:01:05 +00:00
|
|
|
err = ip_tun_set_opts(tb[LWTUNNEL_IP6_OPTS], tun_info, extack);
|
|
|
|
if (err < 0) {
|
|
|
|
lwtstate_free(new_state);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-08-20 11:56:32 +00:00
|
|
|
if (tb[LWTUNNEL_IP6_ID])
|
2016-01-06 22:22:45 +00:00
|
|
|
tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
|
2015-08-20 11:56:32 +00:00
|
|
|
|
|
|
|
if (tb[LWTUNNEL_IP6_DST])
|
|
|
|
tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
|
|
|
|
|
|
|
|
if (tb[LWTUNNEL_IP6_SRC])
|
|
|
|
tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
|
|
|
|
|
|
|
|
if (tb[LWTUNNEL_IP6_HOPLIMIT])
|
|
|
|
tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
|
|
|
|
|
|
|
|
if (tb[LWTUNNEL_IP6_TC])
|
|
|
|
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
|
|
|
|
|
|
|
|
if (tb[LWTUNNEL_IP6_FLAGS])
|
2019-11-10 04:26:21 +00:00
|
|
|
tun_info->key.tun_flags |=
|
|
|
|
(nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) &
|
|
|
|
~TUNNEL_OPTIONS_PRESENT);
|
2015-08-20 11:56:32 +00:00
|
|
|
|
2015-08-28 18:48:20 +00:00
|
|
|
tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
|
2019-11-06 09:01:05 +00:00
|
|
|
tun_info->options_len = opt_len;
|
2015-08-20 11:56:32 +00:00
|
|
|
|
|
|
|
*ts = new_state;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ip6_tun_fill_encap_info(struct sk_buff *skb,
|
|
|
|
struct lwtunnel_state *lwtstate)
|
|
|
|
{
|
|
|
|
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
|
|
|
|
|
2016-04-22 15:31:18 +00:00
|
|
|
if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id,
|
|
|
|
LWTUNNEL_IP6_PAD) ||
|
2015-08-20 11:56:32 +00:00
|
|
|
nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
|
|
|
|
nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
|
2016-03-27 16:06:11 +00:00
|
|
|
nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
|
|
|
|
nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
|
2019-11-06 09:01:05 +00:00
|
|
|
nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) ||
|
|
|
|
ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info))
|
2015-08-20 11:56:32 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
|
|
|
|
{
|
2016-04-22 15:31:18 +00:00
|
|
|
return nla_total_size_64bit(8) /* LWTUNNEL_IP6_ID */
|
2015-08-20 11:56:32 +00:00
|
|
|
+ nla_total_size(16) /* LWTUNNEL_IP6_DST */
|
|
|
|
+ nla_total_size(16) /* LWTUNNEL_IP6_SRC */
|
|
|
|
+ nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
|
|
|
|
+ nla_total_size(1) /* LWTUNNEL_IP6_TC */
|
2019-11-06 09:01:05 +00:00
|
|
|
+ nla_total_size(2) /* LWTUNNEL_IP6_FLAGS */
|
|
|
|
+ ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
|
|
|
|
/* LWTUNNEL_IP6_OPTS */
|
2015-08-20 11:56:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
|
|
|
|
.build_state = ip6_tun_build_state,
|
|
|
|
.fill_encap = ip6_tun_fill_encap_info,
|
|
|
|
.get_encap_size = ip6_tun_encap_nlsize,
|
|
|
|
.cmp_encap = ip_tun_cmp_encap,
|
2017-01-24 16:26:47 +00:00
|
|
|
.owner = THIS_MODULE,
|
2015-08-20 11:56:32 +00:00
|
|
|
};
|
|
|
|
|
2015-07-23 08:08:44 +00:00
|
|
|
void __init ip_tunnel_core_init(void)
|
2015-07-21 08:44:00 +00:00
|
|
|
{
|
2016-03-16 00:42:51 +00:00
|
|
|
/* If you land here, make sure whether increasing ip_tunnel_info's
|
|
|
|
* options_len is a reasonable choice with its usage in front ends
|
|
|
|
* (f.e., it's part of flow keys, etc).
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(IP_TUNNEL_OPTS_MAX != 255);
|
|
|
|
|
2015-07-21 08:44:00 +00:00
|
|
|
lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
|
2015-08-20 11:56:32 +00:00
|
|
|
lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
|
2015-07-21 08:44:00 +00:00
|
|
|
}
|
2015-07-21 08:44:01 +00:00
|
|
|
|
2018-05-08 16:06:58 +00:00
|
|
|
DEFINE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
|
2015-07-21 08:44:01 +00:00
|
|
|
EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
|
|
|
|
|
|
|
|
void ip_tunnel_need_metadata(void)
|
|
|
|
{
|
2018-05-08 16:06:58 +00:00
|
|
|
static_branch_inc(&ip_tunnel_metadata_cnt);
|
2015-07-21 08:44:01 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
|
|
|
|
|
|
|
|
void ip_tunnel_unneed_metadata(void)
|
|
|
|
{
|
2018-05-08 16:06:58 +00:00
|
|
|
static_branch_dec(&ip_tunnel_metadata_cnt);
|
2015-07-21 08:44:01 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
|