forked from Minki/linux
d4546c2509
Manage pending per-NAPI GRO packets via list_head. Return an SKB pointer from the GRO receive handlers. When GRO receive handlers return non-NULL, it means that this SKB needs to be completed at this time and removed from the NAPI queue. Several operations are greatly simplified by this transformation, especially timing out the oldest SKB in the list when gro_count exceeds MAX_GRO_SKBS, and napi_gro_flush() which walks the queue in reverse order. Signed-off-by: David S. Miller <davem@davemloft.net>
82 lines
2.0 KiB
C
82 lines
2.0 KiB
C
/*
|
|
* IPV6 GSO/GRO offload support
|
|
* Linux INET6 implementation
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* TCPv6 GSO/GRO support
|
|
*/
|
|
#include <linux/skbuff.h>
|
|
#include <net/protocol.h>
|
|
#include <net/tcp.h>
|
|
#include <net/ip6_checksum.h>
|
|
#include "ip6_offload.h"
|
|
|
|
static struct sk_buff *tcp6_gro_receive(struct list_head *head,
|
|
struct sk_buff *skb)
|
|
{
|
|
/* Don't bother verifying checksum if we're going to flush anyway. */
|
|
if (!NAPI_GRO_CB(skb)->flush &&
|
|
skb_gro_checksum_validate(skb, IPPROTO_TCP,
|
|
ip6_gro_compute_pseudo)) {
|
|
NAPI_GRO_CB(skb)->flush = 1;
|
|
return NULL;
|
|
}
|
|
|
|
return tcp_gro_receive(head, skb);
|
|
}
|
|
|
|
static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
|
|
{
|
|
const struct ipv6hdr *iph = ipv6_hdr(skb);
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
|
|
&iph->daddr, 0);
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
|
|
|
|
return tcp_gro_complete(skb);
|
|
}
|
|
|
|
static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
|
|
netdev_features_t features)
|
|
{
|
|
struct tcphdr *th;
|
|
|
|
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
if (!pskb_may_pull(skb, sizeof(*th)))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
|
|
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
/* Set up pseudo header, usually expect stack to have done
|
|
* this.
|
|
*/
|
|
|
|
th->check = 0;
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
|
|
}
|
|
|
|
return tcp_gso_segment(skb, features);
|
|
}
|
|
static const struct net_offload tcpv6_offload = {
|
|
.callbacks = {
|
|
.gso_segment = tcp6_gso_segment,
|
|
.gro_receive = tcp6_gro_receive,
|
|
.gro_complete = tcp6_gro_complete,
|
|
},
|
|
};
|
|
|
|
int __init tcpv6_offload_init(void)
|
|
{
|
|
return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
|
|
}
|