ip: validate header length on virtual device xmit
KMSAN detected read beyond end of buffer in vti and sit devices when passing truncated packets with PF_PACKET. The issue affects additional ip tunnel devices. Extend commit76c0ddd8c3
("ip6_tunnel: be careful when accessing the inner header") and commitccfec9e5cb
("ip_tunnel: be careful when accessing the inner header"). Move the check to a separate helper and call at the start of each ndo_start_xmit function in net/ipv4 and net/ipv6. Minor changes: - convert dev_kfree_skb to kfree_skb on error path, as dev_kfree_skb calls consume_skb which is not for error paths. - use pskb_network_may_pull even though that is pedantic here, as the same as pskb_may_pull for devices without llheaders. - do not cache ipv6 hdrs if used only once (unsafe across pskb_may_pull, was more relevant to earlier patch) Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8c76e77f90
commit
cb9f1b7838
@ -308,6 +308,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
|
|||||||
int ip_tunnel_encap_setup(struct ip_tunnel *t,
|
int ip_tunnel_encap_setup(struct ip_tunnel *t,
|
||||||
struct ip_tunnel_encap *ipencap);
|
struct ip_tunnel_encap *ipencap);
|
||||||
|
|
||||||
|
static inline bool pskb_inet_may_pull(struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
int nhlen;
|
||||||
|
|
||||||
|
switch (skb->protocol) {
|
||||||
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
case htons(ETH_P_IPV6):
|
||||||
|
nhlen = sizeof(struct ipv6hdr);
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
case htons(ETH_P_IP):
|
||||||
|
nhlen = sizeof(struct iphdr);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
nhlen = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pskb_network_may_pull(skb, nhlen);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
|
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
|
||||||
{
|
{
|
||||||
const struct ip_tunnel_encap_ops *ops;
|
const struct ip_tunnel_encap_ops *ops;
|
||||||
|
@ -676,6 +676,9 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
|
|||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
const struct iphdr *tnl_params;
|
const struct iphdr *tnl_params;
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto free_skb;
|
||||||
|
|
||||||
if (tunnel->collect_md) {
|
if (tunnel->collect_md) {
|
||||||
gre_fb_xmit(skb, dev, skb->protocol);
|
gre_fb_xmit(skb, dev, skb->protocol);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
@ -719,6 +722,9 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
|
|||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
bool truncate = false;
|
bool truncate = false;
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto free_skb;
|
||||||
|
|
||||||
if (tunnel->collect_md) {
|
if (tunnel->collect_md) {
|
||||||
erspan_fb_xmit(skb, dev, skb->protocol);
|
erspan_fb_xmit(skb, dev, skb->protocol);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
@ -762,6 +768,9 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
|
|||||||
{
|
{
|
||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto free_skb;
|
||||||
|
|
||||||
if (tunnel->collect_md) {
|
if (tunnel->collect_md) {
|
||||||
gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
|
gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
@ -627,7 +627,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||||||
const struct iphdr *tnl_params, u8 protocol)
|
const struct iphdr *tnl_params, u8 protocol)
|
||||||
{
|
{
|
||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
unsigned int inner_nhdr_len = 0;
|
|
||||||
const struct iphdr *inner_iph;
|
const struct iphdr *inner_iph;
|
||||||
struct flowi4 fl4;
|
struct flowi4 fl4;
|
||||||
u8 tos, ttl;
|
u8 tos, ttl;
|
||||||
@ -637,14 +636,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||||||
__be32 dst;
|
__be32 dst;
|
||||||
bool connected;
|
bool connected;
|
||||||
|
|
||||||
/* ensure we can access the inner net header, for several users below */
|
|
||||||
if (skb->protocol == htons(ETH_P_IP))
|
|
||||||
inner_nhdr_len = sizeof(struct iphdr);
|
|
||||||
else if (skb->protocol == htons(ETH_P_IPV6))
|
|
||||||
inner_nhdr_len = sizeof(struct ipv6hdr);
|
|
||||||
if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
|
|
||||||
goto tx_error;
|
|
||||||
|
|
||||||
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
|
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
|
||||||
connected = (tunnel->parms.iph.daddr != 0);
|
connected = (tunnel->parms.iph.daddr != 0);
|
||||||
|
|
||||||
|
@ -241,6 +241,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
struct flowi fl;
|
struct flowi fl;
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto tx_err;
|
||||||
|
|
||||||
memset(&fl, 0, sizeof(fl));
|
memset(&fl, 0, sizeof(fl));
|
||||||
|
|
||||||
switch (skb->protocol) {
|
switch (skb->protocol) {
|
||||||
@ -253,15 +256,18 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev->stats.tx_errors++;
|
goto tx_err;
|
||||||
dev_kfree_skb(skb);
|
|
||||||
return NETDEV_TX_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* override mark with tunnel output key */
|
/* override mark with tunnel output key */
|
||||||
fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
|
fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
|
||||||
|
|
||||||
return vti_xmit(skb, dev, &fl);
|
return vti_xmit(skb, dev, &fl);
|
||||||
|
|
||||||
|
tx_err:
|
||||||
|
dev->stats.tx_errors++;
|
||||||
|
kfree_skb(skb);
|
||||||
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vti4_err(struct sk_buff *skb, u32 info)
|
static int vti4_err(struct sk_buff *skb, u32 info)
|
||||||
|
@ -881,6 +881,9 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
|
|||||||
struct net_device_stats *stats = &t->dev->stats;
|
struct net_device_stats *stats = &t->dev->stats;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto tx_err;
|
||||||
|
|
||||||
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
|
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
|
||||||
goto tx_err;
|
goto tx_err;
|
||||||
|
|
||||||
@ -923,6 +926,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|||||||
int nhoff;
|
int nhoff;
|
||||||
int thoff;
|
int thoff;
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto tx_err;
|
||||||
|
|
||||||
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
|
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
|
||||||
goto tx_err;
|
goto tx_err;
|
||||||
|
|
||||||
@ -995,8 +1001,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|||||||
goto tx_err;
|
goto tx_err;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
|
||||||
|
|
||||||
switch (skb->protocol) {
|
switch (skb->protocol) {
|
||||||
case htons(ETH_P_IP):
|
case htons(ETH_P_IP):
|
||||||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||||
@ -1004,7 +1008,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|||||||
&dsfield, &encap_limit);
|
&dsfield, &encap_limit);
|
||||||
break;
|
break;
|
||||||
case htons(ETH_P_IPV6):
|
case htons(ETH_P_IPV6):
|
||||||
if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
|
if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
|
||||||
goto tx_err;
|
goto tx_err;
|
||||||
if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
|
if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
|
||||||
&dsfield, &encap_limit))
|
&dsfield, &encap_limit))
|
||||||
|
@ -1243,10 +1243,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
u8 tproto;
|
u8 tproto;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* ensure we can access the full inner ip header */
|
|
||||||
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||||
|
|
||||||
@ -1321,9 +1317,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
u8 tproto;
|
u8 tproto;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
ipv6h = ipv6_hdr(skb);
|
ipv6h = ipv6_hdr(skb);
|
||||||
tproto = READ_ONCE(t->parms.proto);
|
tproto = READ_ONCE(t->parms.proto);
|
||||||
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
|
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
|
||||||
@ -1405,6 +1398,9 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
struct net_device_stats *stats = &t->dev->stats;
|
struct net_device_stats *stats = &t->dev->stats;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto tx_err;
|
||||||
|
|
||||||
switch (skb->protocol) {
|
switch (skb->protocol) {
|
||||||
case htons(ETH_P_IP):
|
case htons(ETH_P_IP):
|
||||||
ret = ip4ip6_tnl_xmit(skb, dev);
|
ret = ip4ip6_tnl_xmit(skb, dev);
|
||||||
|
@ -522,18 +522,18 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
{
|
{
|
||||||
struct ip6_tnl *t = netdev_priv(dev);
|
struct ip6_tnl *t = netdev_priv(dev);
|
||||||
struct net_device_stats *stats = &t->dev->stats;
|
struct net_device_stats *stats = &t->dev->stats;
|
||||||
struct ipv6hdr *ipv6h;
|
|
||||||
struct flowi fl;
|
struct flowi fl;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto tx_err;
|
||||||
|
|
||||||
memset(&fl, 0, sizeof(fl));
|
memset(&fl, 0, sizeof(fl));
|
||||||
|
|
||||||
switch (skb->protocol) {
|
switch (skb->protocol) {
|
||||||
case htons(ETH_P_IPV6):
|
case htons(ETH_P_IPV6):
|
||||||
ipv6h = ipv6_hdr(skb);
|
|
||||||
|
|
||||||
if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
|
if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
|
||||||
vti6_addr_conflict(t, ipv6h))
|
vti6_addr_conflict(t, ipv6_hdr(skb)))
|
||||||
goto tx_err;
|
goto tx_err;
|
||||||
|
|
||||||
xfrm_decode_session(skb, &fl, AF_INET6);
|
xfrm_decode_session(skb, &fl, AF_INET6);
|
||||||
|
@ -51,6 +51,7 @@
|
|||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <net/ip6_checksum.h>
|
#include <net/ip6_checksum.h>
|
||||||
#include <linux/netconf.h>
|
#include <linux/netconf.h>
|
||||||
|
#include <net/ip_tunnels.h>
|
||||||
|
|
||||||
#include <linux/nospec.h>
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
@ -599,13 +600,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
|
|||||||
.flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
|
.flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
|
||||||
.flowi6_mark = skb->mark,
|
.flowi6_mark = skb->mark,
|
||||||
};
|
};
|
||||||
int err;
|
|
||||||
|
|
||||||
err = ip6mr_fib_lookup(net, &fl6, &mrt);
|
if (!pskb_inet_may_pull(skb))
|
||||||
if (err < 0) {
|
goto tx_err;
|
||||||
kfree_skb(skb);
|
|
||||||
return err;
|
if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
|
||||||
}
|
goto tx_err;
|
||||||
|
|
||||||
read_lock(&mrt_lock);
|
read_lock(&mrt_lock);
|
||||||
dev->stats.tx_bytes += skb->len;
|
dev->stats.tx_bytes += skb->len;
|
||||||
@ -614,6 +614,11 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
|
|||||||
read_unlock(&mrt_lock);
|
read_unlock(&mrt_lock);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
|
tx_err:
|
||||||
|
dev->stats.tx_errors++;
|
||||||
|
kfree_skb(skb);
|
||||||
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int reg_vif_get_iflink(const struct net_device *dev)
|
static int reg_vif_get_iflink(const struct net_device *dev)
|
||||||
|
@ -1021,6 +1021,9 @@ tx_error:
|
|||||||
static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
|
static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
|
||||||
struct net_device *dev)
|
struct net_device *dev)
|
||||||
{
|
{
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto tx_err;
|
||||||
|
|
||||||
switch (skb->protocol) {
|
switch (skb->protocol) {
|
||||||
case htons(ETH_P_IP):
|
case htons(ETH_P_IP):
|
||||||
sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
|
sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
|
||||||
|
Loading…
Reference in New Issue
Block a user