2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2012-11-15 08:49:17 +00:00
|
|
|
/*
|
|
|
|
* IPV6 GSO/GRO offload support
|
|
|
|
* Linux INET6 implementation
|
|
|
|
*
|
|
|
|
* TCPv6 GSO/GRO support
|
|
|
|
*/
|
2018-12-14 10:51:59 +00:00
|
|
|
#include <linux/indirect_call_wrapper.h>
|
2012-11-15 08:49:17 +00:00
|
|
|
#include <linux/skbuff.h>
|
2024-05-02 08:44:47 +00:00
|
|
|
#include <net/inet6_hashtables.h>
|
2021-11-15 17:05:51 +00:00
|
|
|
#include <net/gro.h>
|
2012-11-15 08:49:17 +00:00
|
|
|
#include <net/protocol.h>
|
|
|
|
#include <net/tcp.h>
|
|
|
|
#include <net/ip6_checksum.h>
|
|
|
|
#include "ip6_offload.h"
|
|
|
|
|
2024-05-02 08:44:47 +00:00
|
|
|
static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
|
|
|
|
struct tcphdr *th)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
const struct ipv6hdr *hdr;
|
|
|
|
struct sk_buff *p;
|
|
|
|
struct sock *sk;
|
|
|
|
struct net *net;
|
|
|
|
int iif, sdif;
|
|
|
|
|
|
|
|
if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
p = tcp_gro_lookup(head, th);
|
|
|
|
if (p) {
|
|
|
|
NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
inet6_get_iif_sdif(skb, &iif, &sdif);
|
|
|
|
hdr = skb_gro_network_header(skb);
|
|
|
|
net = dev_net(skb->dev);
|
|
|
|
sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
|
|
|
|
&hdr->saddr, th->source,
|
|
|
|
&hdr->daddr, ntohs(th->dest),
|
|
|
|
iif, sdif);
|
|
|
|
NAPI_GRO_CB(skb)->is_flist = !sk;
|
|
|
|
if (sk)
|
|
|
|
sock_put(sk);
|
|
|
|
#endif /* IS_ENABLED(CONFIG_IPV6) */
|
|
|
|
}
|
|
|
|
|
2018-12-14 10:51:59 +00:00
|
|
|
INDIRECT_CALLABLE_SCOPE
|
|
|
|
struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
|
2012-11-15 08:49:17 +00:00
|
|
|
{
|
2024-05-02 08:44:46 +00:00
|
|
|
struct tcphdr *th;
|
|
|
|
|
2013-11-22 02:31:29 +00:00
|
|
|
/* Don't bother verifying checksum if we're going to flush anyway. */
|
2014-08-22 20:34:30 +00:00
|
|
|
if (!NAPI_GRO_CB(skb)->flush &&
|
|
|
|
skb_gro_checksum_validate(skb, IPPROTO_TCP,
|
2024-05-02 08:44:46 +00:00
|
|
|
ip6_gro_compute_pseudo))
|
|
|
|
goto flush;
|
|
|
|
|
|
|
|
th = tcp_gro_pull_header(skb);
|
|
|
|
if (!th)
|
|
|
|
goto flush;
|
|
|
|
|
2024-05-02 08:44:47 +00:00
|
|
|
tcp6_check_fraglist_gro(head, skb, th);
|
|
|
|
|
2024-05-02 08:44:46 +00:00
|
|
|
return tcp_gro_receive(head, skb, th);
|
2012-11-15 08:49:17 +00:00
|
|
|
|
2024-05-02 08:44:46 +00:00
|
|
|
flush:
|
|
|
|
NAPI_GRO_CB(skb)->flush = 1;
|
|
|
|
return NULL;
|
2012-11-15 08:49:17 +00:00
|
|
|
}
|
|
|
|
|
2018-12-14 10:51:59 +00:00
|
|
|
INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
|
2012-11-15 08:49:17 +00:00
|
|
|
{
|
2024-05-09 19:08:17 +00:00
|
|
|
const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
|
|
|
|
const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
|
2012-11-15 08:49:17 +00:00
|
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
|
2024-05-02 08:44:44 +00:00
|
|
|
if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
|
|
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
|
|
|
|
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
|
|
|
|
|
|
|
|
__skb_incr_checksum_unnecessary(skb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
net-gro: Prepare GRO stack for the upcoming tunneling support
This patch modifies the GRO stack to avoid the use of "network_header"
and associated macros like ip_hdr() and ipv6_hdr() in order to allow
an arbitary number of IP hdrs (v4 or v6) to be used in the
encapsulation chain. This lays the foundation for various IP
tunneling support (IP-in-IP, GRE, VXLAN, SIT,...) to be added later.
With this patch, the GRO stack traversing now is mostly based on
skb_gro_offset rather than special hdr offsets saved in skb (e.g.,
skb->network_header). As a result all but the top layer (i.e., the
the transport layer) must have hdrs of the same length in order for
a pkt to be considered for aggregation. Therefore when adding a new
encap layer (e.g., for tunneling), one must check and skip flows
(e.g., by setting NAPI_GRO_CB(p)->same_flow to 0) that have a
different hdr length.
Note that unlike the network header, the transport header can and
will continue to be set by the GRO code since there will be at
most one "transport layer" in the encap chain.
Signed-off-by: H.K. Jerry Chu <hkchu@google.com>
Suggested-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-12-12 04:53:45 +00:00
|
|
|
th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
|
|
|
|
&iph->daddr, 0);
|
2014-07-14 22:54:46 +00:00
|
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
|
2012-11-15 08:49:17 +00:00
|
|
|
|
2023-05-29 13:44:30 +00:00
|
|
|
tcp_gro_complete(skb);
|
|
|
|
return 0;
|
2012-11-15 08:49:17 +00:00
|
|
|
}
|
|
|
|
|
2024-05-02 08:44:43 +00:00
|
|
|
static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
|
|
|
|
__be16 *oldport, __be16 newport)
|
|
|
|
{
|
|
|
|
struct tcphdr *th;
|
|
|
|
|
|
|
|
if (*oldport == newport)
|
|
|
|
return;
|
|
|
|
|
|
|
|
th = tcp_hdr(seg);
|
|
|
|
inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
|
|
|
|
*oldport = newport;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
|
|
|
|
{
|
|
|
|
const struct tcphdr *th;
|
|
|
|
const struct ipv6hdr *iph;
|
|
|
|
struct sk_buff *seg;
|
|
|
|
struct tcphdr *th2;
|
|
|
|
struct ipv6hdr *iph2;
|
|
|
|
|
|
|
|
seg = segs;
|
|
|
|
th = tcp_hdr(seg);
|
|
|
|
iph = ipv6_hdr(seg);
|
|
|
|
th2 = tcp_hdr(seg->next);
|
|
|
|
iph2 = ipv6_hdr(seg->next);
|
|
|
|
|
|
|
|
if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
|
|
|
|
ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
|
|
|
|
ipv6_addr_equal(&iph->daddr, &iph2->daddr))
|
|
|
|
return segs;
|
|
|
|
|
|
|
|
while ((seg = seg->next)) {
|
|
|
|
th2 = tcp_hdr(seg);
|
|
|
|
iph2 = ipv6_hdr(seg);
|
|
|
|
|
|
|
|
iph2->saddr = iph->saddr;
|
|
|
|
iph2->daddr = iph->daddr;
|
|
|
|
__tcpv6_gso_segment_csum(seg, &th2->source, th->source);
|
|
|
|
__tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
|
|
|
|
}
|
|
|
|
|
|
|
|
return segs;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
|
|
|
|
if (IS_ERR(skb))
|
|
|
|
return skb;
|
|
|
|
|
|
|
|
return __tcpv6_gso_segment_list_csum(skb);
|
|
|
|
}
|
|
|
|
|
2015-02-27 03:08:59 +00:00
|
|
|
static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
|
|
|
|
netdev_features_t features)
|
2014-09-20 21:52:28 +00:00
|
|
|
{
|
|
|
|
struct tcphdr *th;
|
|
|
|
|
2018-01-19 14:29:18 +00:00
|
|
|
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2014-09-20 21:52:28 +00:00
|
|
|
if (!pskb_may_pull(skb, sizeof(*th)))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2024-09-26 08:53:14 +00:00
|
|
|
if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
|
|
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
|
|
|
|
if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
|
|
|
|
return __tcp6_gso_segment_list(skb, features);
|
|
|
|
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
}
|
2024-05-02 08:44:43 +00:00
|
|
|
|
2014-09-20 21:52:28 +00:00
|
|
|
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
|
|
|
|
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
|
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
|
|
|
|
/* Set up pseudo header, usually expect stack to have done
|
|
|
|
* this.
|
|
|
|
*/
|
|
|
|
|
|
|
|
th->check = 0;
|
|
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
|
|
__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return tcp_gso_segment(skb, features);
|
|
|
|
}
|
2012-11-15 08:49:17 +00:00
|
|
|
|
|
|
|
int __init tcpv6_offload_init(void)
|
|
|
|
{
|
2024-03-06 16:00:20 +00:00
|
|
|
net_hotdata.tcpv6_offload = (struct net_offload) {
|
|
|
|
.callbacks = {
|
|
|
|
.gso_segment = tcp6_gso_segment,
|
|
|
|
.gro_receive = tcp6_gro_receive,
|
|
|
|
.gro_complete = tcp6_gro_complete,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
|
2012-11-15 08:49:17 +00:00
|
|
|
}
|