2019-05-19 12:08:55 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-05-12 12:56:15 +00:00
|
|
|
#include <linux/kernel.h>
|
2011-11-28 05:22:18 +00:00
|
|
|
#include <linux/skbuff.h>
|
2012-01-24 21:03:33 +00:00
|
|
|
#include <linux/export.h>
|
2011-11-28 05:22:18 +00:00
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/ipv6.h>
|
|
|
|
#include <linux/if_vlan.h>
|
2021-12-29 00:49:13 +00:00
|
|
|
#include <linux/filter.h>
|
2017-08-09 12:41:19 +00:00
|
|
|
#include <net/dsa.h>
|
2017-10-02 08:41:16 +00:00
|
|
|
#include <net/dst_metadata.h>
|
2011-11-28 05:22:18 +00:00
|
|
|
#include <net/ip.h>
|
2012-07-18 08:11:12 +00:00
|
|
|
#include <net/ipv6.h>
|
2016-08-09 04:38:24 +00:00
|
|
|
#include <net/gre.h>
|
|
|
|
#include <net/pptp.h>
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 08:59:26 +00:00
|
|
|
#include <net/tipc.h>
|
2013-03-19 06:39:30 +00:00
|
|
|
#include <linux/igmp.h>
|
|
|
|
#include <linux/icmp.h>
|
|
|
|
#include <linux/sctp.h>
|
|
|
|
#include <linux/dccp.h>
|
2011-11-28 05:22:18 +00:00
|
|
|
#include <linux/if_tunnel.h>
|
|
|
|
#include <linux/if_pppox.h>
|
|
|
|
#include <linux/ppp_defs.h>
|
2015-05-12 12:56:16 +00:00
|
|
|
#include <linux/stddef.h>
|
2015-05-12 12:56:19 +00:00
|
|
|
#include <linux/if_ether.h>
|
2022-02-28 19:58:56 +00:00
|
|
|
#include <linux/if_hsr.h>
|
2015-06-04 16:16:46 +00:00
|
|
|
#include <linux/mpls.h>
|
2017-05-23 16:40:44 +00:00
|
|
|
#include <linux/tcp.h>
|
2021-01-12 19:07:13 +00:00
|
|
|
#include <linux/ptp_classify.h>
|
2015-05-12 12:56:07 +00:00
|
|
|
#include <net/flow_dissector.h>
|
2023-05-29 11:48:29 +00:00
|
|
|
#include <net/pkt_cls.h>
|
2014-09-05 23:20:26 +00:00
|
|
|
#include <scsi/fc/fc_fcoe.h>
|
2017-12-21 09:17:42 +00:00
|
|
|
#include <uapi/linux/batadv_packet.h>
|
2018-09-14 14:46:18 +00:00
|
|
|
#include <linux/bpf.h>
|
2019-07-09 07:30:49 +00:00
|
|
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
|
|
|
#include <net/netfilter/nf_conntrack_core.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_labels.h>
|
|
|
|
#endif
|
2020-05-31 08:28:36 +00:00
|
|
|
#include <linux/bpf-netns.h>
|
2018-09-14 14:46:18 +00:00
|
|
|
|
2015-09-02 04:19:17 +00:00
|
|
|
static void dissector_set_key(struct flow_dissector *flow_dissector,
|
|
|
|
enum flow_dissector_key_id key_id)
|
2015-05-12 12:56:15 +00:00
|
|
|
{
|
2023-07-28 23:22:15 +00:00
|
|
|
flow_dissector->used_keys |= (1ULL << key_id);
|
2015-05-12 12:56:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
|
|
|
|
const struct flow_dissector_key *key,
|
|
|
|
unsigned int key_count)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
memset(flow_dissector, 0, sizeof(*flow_dissector));
|
|
|
|
|
|
|
|
for (i = 0; i < key_count; i++, key++) {
|
2020-11-06 08:11:49 +00:00
|
|
|
/* User should make sure that every key target offset is within
|
2015-05-12 12:56:15 +00:00
|
|
|
* boundaries of unsigned short.
|
|
|
|
*/
|
|
|
|
BUG_ON(key->offset > USHRT_MAX);
|
2015-09-02 04:19:17 +00:00
|
|
|
BUG_ON(dissector_uses_key(flow_dissector,
|
|
|
|
key->key_id));
|
2015-05-12 12:56:15 +00:00
|
|
|
|
2015-09-02 04:19:17 +00:00
|
|
|
dissector_set_key(flow_dissector, key->key_id);
|
2015-05-12 12:56:15 +00:00
|
|
|
flow_dissector->offset[key->key_id] = key->offset;
|
|
|
|
}
|
|
|
|
|
2015-06-04 16:16:39 +00:00
|
|
|
/* Ensure that the dissector always includes control and basic key.
|
|
|
|
* That way we are able to avoid handling lack of these in fast path.
|
2015-05-12 12:56:15 +00:00
|
|
|
*/
|
2015-09-02 04:19:17 +00:00
|
|
|
BUG_ON(!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_CONTROL));
|
|
|
|
BUG_ON(!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_BASIC));
|
2015-05-12 12:56:15 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(skb_flow_dissector_init);
|
|
|
|
|
2020-05-31 08:28:37 +00:00
|
|
|
#ifdef CONFIG_BPF_SYSCALL
|
2020-06-25 14:13:54 +00:00
|
|
|
int flow_dissector_bpf_prog_attach_check(struct net *net,
|
|
|
|
struct bpf_prog *prog)
|
2018-09-14 14:46:18 +00:00
|
|
|
{
|
2020-05-31 08:28:36 +00:00
|
|
|
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
|
2019-10-07 16:21:02 +00:00
|
|
|
|
|
|
|
if (net == &init_net) {
|
|
|
|
/* BPF flow dissector in the root namespace overrides
|
|
|
|
* any per-net-namespace one. When attaching to root,
|
|
|
|
* make sure we don't have any BPF program attached
|
|
|
|
* to the non-root namespaces.
|
|
|
|
*/
|
|
|
|
struct net *ns;
|
|
|
|
|
|
|
|
for_each_net(ns) {
|
2019-10-11 08:29:45 +00:00
|
|
|
if (ns == &init_net)
|
|
|
|
continue;
|
2020-06-25 14:13:55 +00:00
|
|
|
if (rcu_access_pointer(ns->bpf.run_array[type]))
|
2020-05-31 08:28:35 +00:00
|
|
|
return -EEXIST;
|
2019-10-07 16:21:02 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Make sure root flow dissector is not attached
|
|
|
|
* when attaching to the non-root namespace.
|
|
|
|
*/
|
2020-06-25 14:13:55 +00:00
|
|
|
if (rcu_access_pointer(init_net.bpf.run_array[type]))
|
2020-05-31 08:28:35 +00:00
|
|
|
return -EEXIST;
|
2019-10-07 16:21:02 +00:00
|
|
|
}
|
|
|
|
|
2020-05-31 08:28:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2020-05-31 08:28:37 +00:00
|
|
|
#endif /* CONFIG_BPF_SYSCALL */
|
2020-05-21 08:34:35 +00:00
|
|
|
|
2013-10-02 11:39:24 +00:00
|
|
|
/**
|
2014-08-26 00:03:46 +00:00
|
|
|
* __skb_flow_get_ports - extract the upper layer ports and return them
|
|
|
|
* @skb: sk_buff to extract the ports from
|
2013-10-02 11:39:24 +00:00
|
|
|
* @thoff: transport header offset
|
|
|
|
* @ip_proto: protocol for which to get port offset
|
2014-08-26 00:03:46 +00:00
|
|
|
* @data: raw buffer pointer to the packet, if NULL use skb->data
|
|
|
|
* @hlen: packet header length, if @data is NULL use skb_headlen(skb)
|
2013-10-02 11:39:24 +00:00
|
|
|
*
|
|
|
|
* The function will try to retrieve the ports at offset thoff + poff where poff
|
|
|
|
* is the protocol port offset returned from proto_ports_offset
|
|
|
|
*/
|
2014-08-23 19:13:41 +00:00
|
|
|
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
|
2021-03-14 11:11:23 +00:00
|
|
|
const void *data, int hlen)
|
2013-10-02 11:39:24 +00:00
|
|
|
{
|
|
|
|
int poff = proto_ports_offset(ip_proto);
|
|
|
|
|
2014-08-23 19:13:41 +00:00
|
|
|
if (!data) {
|
|
|
|
data = skb->data;
|
|
|
|
hlen = skb_headlen(skb);
|
|
|
|
}
|
|
|
|
|
2013-10-02 11:39:24 +00:00
|
|
|
if (poff >= 0) {
|
|
|
|
__be32 *ports, _ports;
|
|
|
|
|
2014-08-23 19:13:41 +00:00
|
|
|
ports = __skb_header_pointer(skb, thoff + poff,
|
|
|
|
sizeof(_ports), data, hlen, &_ports);
|
2013-10-02 11:39:24 +00:00
|
|
|
if (ports)
|
|
|
|
return *ports;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-08-23 19:13:41 +00:00
|
|
|
EXPORT_SYMBOL(__skb_flow_get_ports);
|
2013-10-02 11:39:24 +00:00
|
|
|
|
2019-10-29 13:50:52 +00:00
|
|
|
static bool icmp_has_id(u8 type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case ICMP_ECHO:
|
|
|
|
case ICMP_ECHOREPLY:
|
|
|
|
case ICMP_TIMESTAMP:
|
|
|
|
case ICMP_TIMESTAMPREPLY:
|
|
|
|
case ICMPV6_ECHO_REQUEST:
|
|
|
|
case ICMPV6_ECHO_REPLY:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* skb_flow_get_icmp_tci - extract ICMP(6) Type, Code and Identifier fields
|
|
|
|
* @skb: sk_buff to extract from
|
|
|
|
* @key_icmp: struct flow_dissector_key_icmp to fill
|
|
|
|
* @data: raw buffer pointer to the packet
|
2020-01-09 00:59:56 +00:00
|
|
|
* @thoff: offset to extract at
|
2019-10-29 13:50:52 +00:00
|
|
|
* @hlen: packet header length
|
|
|
|
*/
|
|
|
|
void skb_flow_get_icmp_tci(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector_key_icmp *key_icmp,
|
2021-03-14 11:11:23 +00:00
|
|
|
const void *data, int thoff, int hlen)
|
2019-10-29 13:50:52 +00:00
|
|
|
{
|
|
|
|
struct icmphdr *ih, _ih;
|
|
|
|
|
|
|
|
ih = __skb_header_pointer(skb, thoff, sizeof(_ih), data, hlen, &_ih);
|
|
|
|
if (!ih)
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_icmp->type = ih->type;
|
|
|
|
key_icmp->code = ih->code;
|
|
|
|
|
|
|
|
/* As we use 0 to signal that the Id field is not present,
|
|
|
|
* avoid confusion with packets without such field
|
|
|
|
*/
|
|
|
|
if (icmp_has_id(ih->type))
|
2021-03-12 20:08:57 +00:00
|
|
|
key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1;
|
2019-10-29 13:50:52 +00:00
|
|
|
else
|
|
|
|
key_icmp->id = 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(skb_flow_get_icmp_tci);
|
|
|
|
|
|
|
|
/* If FLOW_DISSECTOR_KEY_ICMP is set, dissect an ICMP packet
|
|
|
|
* using skb_flow_get_icmp_tci().
|
2019-10-29 13:50:51 +00:00
|
|
|
*/
|
|
|
|
static void __skb_flow_dissect_icmp(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data,
|
|
|
|
int thoff, int hlen)
|
2019-10-29 13:50:51 +00:00
|
|
|
{
|
|
|
|
struct flow_dissector_key_icmp *key_icmp;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_icmp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ICMP,
|
|
|
|
target_container);
|
2019-10-29 13:50:52 +00:00
|
|
|
|
|
|
|
skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen);
|
2019-10-29 13:50:51 +00:00
|
|
|
}
|
|
|
|
|
2023-08-01 01:40:58 +00:00
|
|
|
static void __skb_flow_dissect_ah(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, const void *data,
|
|
|
|
int nhoff, int hlen)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_ipsec *key_ah;
|
|
|
|
struct ip_auth_hdr _hdr, *hdr;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPSEC))
|
|
|
|
return;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_ah = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPSEC,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
key_ah->spi = hdr->spi;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __skb_flow_dissect_esp(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, const void *data,
|
|
|
|
int nhoff, int hlen)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_ipsec *key_esp;
|
|
|
|
struct ip_esp_hdr _hdr, *hdr;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPSEC))
|
|
|
|
return;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_esp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPSEC,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
key_esp->spi = hdr->spi;
|
|
|
|
}
|
|
|
|
|
2022-09-08 17:16:41 +00:00
|
|
|
static void __skb_flow_dissect_l2tpv3(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, const void *data,
|
|
|
|
int nhoff, int hlen)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_l2tpv3 *key_l2tpv3;
|
|
|
|
struct {
|
|
|
|
__be32 session_id;
|
|
|
|
} *hdr, _hdr;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_L2TPV3))
|
|
|
|
return;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_l2tpv3 = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_L2TPV3,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
key_l2tpv3->session_id = hdr->session_id;
|
|
|
|
}
|
|
|
|
|
2019-06-19 06:41:02 +00:00
|
|
|
void skb_flow_dissect_meta(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_meta *meta;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_META))
|
|
|
|
return;
|
|
|
|
|
|
|
|
meta = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_META,
|
|
|
|
target_container);
|
|
|
|
meta->ingress_ifindex = skb->skb_iif;
|
2023-05-29 11:48:29 +00:00
|
|
|
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
|
|
|
|
if (tc_skb_ext_tc_enabled()) {
|
|
|
|
struct tc_skb_ext *ext;
|
|
|
|
|
|
|
|
ext = skb_ext_find(skb, TC_SKB_EXT);
|
|
|
|
if (ext)
|
|
|
|
meta->l2_miss = ext->l2_miss;
|
|
|
|
}
|
|
|
|
#endif
|
2019-06-19 06:41:02 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(skb_flow_dissect_meta);
|
|
|
|
|
2017-10-02 08:41:16 +00:00
|
|
|
static void
|
|
|
|
skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_control *ctrl;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ctrl = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_CONTROL,
|
|
|
|
target_container);
|
|
|
|
ctrl->addr_type = type;
|
|
|
|
}
|
|
|
|
|
2019-07-09 07:30:49 +00:00
|
|
|
void
|
|
|
|
skb_flow_dissect_ct(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-01-19 08:31:50 +00:00
|
|
|
void *target_container, u16 *ctinfo_map,
|
2021-12-14 17:24:34 +00:00
|
|
|
size_t mapsize, bool post_ct, u16 zone)
|
2019-07-09 07:30:49 +00:00
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
|
|
|
struct flow_dissector_key_ct *key;
|
|
|
|
enum ip_conntrack_info ctinfo;
|
|
|
|
struct nf_conn_labels *cl;
|
|
|
|
struct nf_conn *ct;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CT))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
2021-01-19 08:31:50 +00:00
|
|
|
if (!ct && !post_ct)
|
2019-07-09 07:30:49 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
key = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_CT,
|
|
|
|
target_container);
|
|
|
|
|
2021-01-19 08:31:50 +00:00
|
|
|
if (!ct) {
|
|
|
|
key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
|
|
|
|
TCA_FLOWER_KEY_CT_FLAGS_INVALID;
|
2021-12-14 17:24:34 +00:00
|
|
|
key->ct_zone = zone;
|
2021-01-19 08:31:50 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-07-09 07:30:49 +00:00
|
|
|
if (ctinfo < mapsize)
|
|
|
|
key->ct_state = ctinfo_map[ctinfo];
|
|
|
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)
|
|
|
|
key->ct_zone = ct->zone.id;
|
|
|
|
#endif
|
|
|
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
|
2022-11-09 19:39:07 +00:00
|
|
|
key->ct_mark = READ_ONCE(ct->mark);
|
2019-07-09 07:30:49 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
cl = nf_ct_labels_find(ct);
|
|
|
|
if (cl)
|
|
|
|
memcpy(key->ct_labels, cl->bits, sizeof(key->ct_labels));
|
|
|
|
#endif /* CONFIG_NF_CONNTRACK */
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(skb_flow_dissect_ct);
|
|
|
|
|
2017-12-04 10:31:48 +00:00
|
|
|
void
|
|
|
|
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container)
|
2017-10-02 08:41:16 +00:00
|
|
|
{
|
|
|
|
struct ip_tunnel_info *info;
|
|
|
|
struct ip_tunnel_key *key;
|
|
|
|
|
|
|
|
/* A quick check to see if there might be something to do. */
|
|
|
|
if (!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_KEYID) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
2018-07-17 16:27:17 +00:00
|
|
|
FLOW_DISSECTOR_KEY_ENC_PORTS) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
2018-08-07 15:36:00 +00:00
|
|
|
FLOW_DISSECTOR_KEY_ENC_IP) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_OPTS))
|
2017-10-02 08:41:16 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
info = skb_tunnel_info(skb);
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
key = &info->key;
|
|
|
|
|
|
|
|
switch (ip_tunnel_info_af(info)) {
|
|
|
|
case AF_INET:
|
|
|
|
skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
|
|
flow_dissector,
|
|
|
|
target_container);
|
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
|
|
|
|
struct flow_dissector_key_ipv4_addrs *ipv4;
|
|
|
|
|
|
|
|
ipv4 = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
|
|
|
|
target_container);
|
|
|
|
ipv4->src = key->u.ipv4.src;
|
|
|
|
ipv4->dst = key->u.ipv4.dst;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
flow_dissector,
|
|
|
|
target_container);
|
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
|
|
|
|
struct flow_dissector_key_ipv6_addrs *ipv6;
|
|
|
|
|
|
|
|
ipv6 = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
|
|
|
|
target_container);
|
|
|
|
ipv6->src = key->u.ipv6.src;
|
|
|
|
ipv6->dst = key->u.ipv6.dst;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
|
|
|
|
struct flow_dissector_key_keyid *keyid;
|
|
|
|
|
|
|
|
keyid = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_KEYID,
|
|
|
|
target_container);
|
|
|
|
keyid->keyid = tunnel_id_to_key32(key->tun_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
|
|
|
|
struct flow_dissector_key_ports *tp;
|
|
|
|
|
|
|
|
tp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_PORTS,
|
|
|
|
target_container);
|
|
|
|
tp->src = key->tp_src;
|
|
|
|
tp->dst = key->tp_dst;
|
|
|
|
}
|
2018-07-17 16:27:17 +00:00
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
|
|
|
|
struct flow_dissector_key_ip *ip;
|
|
|
|
|
|
|
|
ip = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IP,
|
|
|
|
target_container);
|
|
|
|
ip->tos = key->tos;
|
|
|
|
ip->ttl = key->ttl;
|
|
|
|
}
|
2018-08-07 15:36:00 +00:00
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
|
|
|
|
struct flow_dissector_key_enc_opts *enc_opt;
|
ip_tunnel: convert __be16 tunnel flags to bitmaps
Historically, tunnel flags like TUNNEL_CSUM or TUNNEL_ERSPAN_OPT
have been defined as __be16. Now all of those 16 bits are occupied
and there's no more free space for new flags.
It can't be simply switched to a bigger container with no
adjustments to the values, since it's an explicit Endian storage,
and on LE systems (__be16)0x0001 equals to
(__be64)0x0001000000000000.
We could probably define new 64-bit flags depending on the
Endianness, i.e. (__be64)0x0001 on BE and (__be64)0x00010000... on
LE, but that would introduce an Endianness dependency and spawn a
ton of Sparse warnings. To mitigate them, all of those places which
were adjusted with this change would be touched anyway, so why not
define stuff properly if there's no choice.
Define IP_TUNNEL_*_BIT counterparts as a bit number instead of the
value already coded and a fistful of <16 <-> bitmap> converters and
helpers. The two flags which have a different bit position are
SIT_ISATAP_BIT and VTI_ISVTI_BIT, as they were defined not as
__cpu_to_be16(), but as (__force __be16), i.e. had different
positions on LE and BE. Now they both have strongly defined places.
Change all __be16 fields which were used to store those flags, to
IP_TUNNEL_DECLARE_FLAGS() -> DECLARE_BITMAP(__IP_TUNNEL_FLAG_NUM) ->
unsigned long[1] for now, and replace all TUNNEL_* occurrences to
their bitmap counterparts. Use the converters in the places which talk
to the userspace, hardware (NFP) or other hosts (GRE header). The rest
must explicitly use the new flags only. This must be done at once,
otherwise there will be too many conversions throughout the code in
the intermediate commits.
Finally, disable the old __be16 flags for use in the kernel code
(except for the two 'irregular' flags mentioned above), to prevent
any accidental (mis)use of them. For the userspace, nothing is
changed, only additions were made.
Most noticeable bloat-o-meter difference (.text):
vmlinux: 307/-1 (306)
gre.ko: 62/0 (62)
ip_gre.ko: 941/-217 (724) [*]
ip_tunnel.ko: 390/-900 (-510) [**]
ip_vti.ko: 138/0 (138)
ip6_gre.ko: 534/-18 (516) [*]
ip6_tunnel.ko: 118/-10 (108)
[*] gre_flags_to_tnl_flags() grew, but still is inlined
[**] ip_tunnel_find() got uninlined, hence such decrease
The average code size increase in non-extreme case is 100-200 bytes
per module, mostly due to sizeof(long) > sizeof(__be16), as
%__IP_TUNNEL_FLAG_NUM is less than %BITS_PER_LONG and the compilers
are able to expand the majority of bitmap_*() calls here into direct
operations on scalars.
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-27 15:23:53 +00:00
|
|
|
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
|
|
|
|
u32 val;
|
2018-08-07 15:36:00 +00:00
|
|
|
|
|
|
|
enc_opt = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_OPTS,
|
|
|
|
target_container);
|
|
|
|
|
ip_tunnel: convert __be16 tunnel flags to bitmaps
Historically, tunnel flags like TUNNEL_CSUM or TUNNEL_ERSPAN_OPT
have been defined as __be16. Now all of those 16 bits are occupied
and there's no more free space for new flags.
It can't be simply switched to a bigger container with no
adjustments to the values, since it's an explicit Endian storage,
and on LE systems (__be16)0x0001 equals to
(__be64)0x0001000000000000.
We could probably define new 64-bit flags depending on the
Endianness, i.e. (__be64)0x0001 on BE and (__be64)0x00010000... on
LE, but that would introduce an Endianness dependency and spawn a
ton of Sparse warnings. To mitigate them, all of those places which
were adjusted with this change would be touched anyway, so why not
define stuff properly if there's no choice.
Define IP_TUNNEL_*_BIT counterparts as a bit number instead of the
value already coded and a fistful of <16 <-> bitmap> converters and
helpers. The two flags which have a different bit position are
SIT_ISATAP_BIT and VTI_ISVTI_BIT, as they were defined not as
__cpu_to_be16(), but as (__force __be16), i.e. had different
positions on LE and BE. Now they both have strongly defined places.
Change all __be16 fields which were used to store those flags, to
IP_TUNNEL_DECLARE_FLAGS() -> DECLARE_BITMAP(__IP_TUNNEL_FLAG_NUM) ->
unsigned long[1] for now, and replace all TUNNEL_* occurrences to
their bitmap counterparts. Use the converters in the places which talk
to the userspace, hardware (NFP) or other hosts (GRE header). The rest
must explicitly use the new flags only. This must be done at once,
otherwise there will be too many conversions throughout the code in
the intermediate commits.
Finally, disable the old __be16 flags for use in the kernel code
(except for the two 'irregular' flags mentioned above), to prevent
any accidental (mis)use of them. For the userspace, nothing is
changed, only additions were made.
Most noticeable bloat-o-meter difference (.text):
vmlinux: 307/-1 (306)
gre.ko: 62/0 (62)
ip_gre.ko: 941/-217 (724) [*]
ip_tunnel.ko: 390/-900 (-510) [**]
ip_vti.ko: 138/0 (138)
ip6_gre.ko: 534/-18 (516) [*]
ip6_tunnel.ko: 118/-10 (108)
[*] gre_flags_to_tnl_flags() grew, but still is inlined
[**] ip_tunnel_find() got uninlined, hence such decrease
The average code size increase in non-extreme case is 100-200 bytes
per module, mostly due to sizeof(long) > sizeof(__be16), as
%__IP_TUNNEL_FLAG_NUM is less than %BITS_PER_LONG and the compilers
are able to expand the majority of bitmap_*() calls here into direct
operations on scalars.
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-27 15:23:53 +00:00
|
|
|
if (!info->options_len)
|
|
|
|
return;
|
|
|
|
|
|
|
|
enc_opt->len = info->options_len;
|
|
|
|
ip_tunnel_info_opts_get(enc_opt->data, info);
|
|
|
|
|
|
|
|
ip_tunnel_set_options_present(flags);
|
|
|
|
ip_tunnel_flags_and(flags, info->key.tun_flags, flags);
|
|
|
|
|
|
|
|
val = find_next_bit(flags, __IP_TUNNEL_FLAG_NUM,
|
|
|
|
IP_TUNNEL_GENEVE_OPT_BIT);
|
|
|
|
enc_opt->dst_opt_type = val < __IP_TUNNEL_FLAG_NUM ? val : 0;
|
2018-08-07 15:36:00 +00:00
|
|
|
}
|
2017-10-02 08:41:16 +00:00
|
|
|
}
|
2017-12-04 10:31:48 +00:00
|
|
|
EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
|
2017-10-02 08:41:16 +00:00
|
|
|
|
2020-07-22 22:03:00 +00:00
|
|
|
void skb_flow_dissect_hash(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_hash *key;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH))
|
|
|
|
return;
|
|
|
|
|
|
|
|
key = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_HASH,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
key->hash = skb_get_hash_raw(skb);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(skb_flow_dissect_hash);
|
|
|
|
|
2017-03-06 15:39:52 +00:00
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_mpls(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data, int nhoff,
|
|
|
|
int hlen, int lse_index, bool *entropy_label)
|
2017-03-06 15:39:52 +00:00
|
|
|
{
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
struct mpls_label *hdr, _hdr;
|
|
|
|
u32 entry, label, bos;
|
2017-03-06 15:39:52 +00:00
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector,
|
2017-04-22 20:52:46 +00:00
|
|
|
FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
|
|
|
|
!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
|
2017-03-06 15:39:52 +00:00
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
if (lse_index >= FLOW_DIS_MPLS_MAX)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
2017-03-06 15:39:52 +00:00
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
|
|
|
|
hlen, &_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
entry = ntohl(hdr->entry);
|
2017-04-22 20:52:46 +00:00
|
|
|
label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
|
2017-04-22 20:52:46 +00:00
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
|
|
|
|
struct flow_dissector_key_mpls *key_mpls;
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
struct flow_dissector_mpls_lse *lse;
|
2017-04-22 20:52:46 +00:00
|
|
|
|
|
|
|
key_mpls = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_MPLS,
|
|
|
|
target_container);
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
lse = &key_mpls->ls[lse_index];
|
|
|
|
|
|
|
|
lse->mpls_ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
|
|
|
|
lse->mpls_bos = bos;
|
|
|
|
lse->mpls_tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
|
|
|
|
lse->mpls_label = label;
|
|
|
|
dissector_set_mpls_lse(key_mpls, lse_index);
|
2017-04-22 20:52:46 +00:00
|
|
|
}
|
|
|
|
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
if (*entropy_label &&
|
|
|
|
dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
|
|
|
|
struct flow_dissector_key_keyid *key_keyid;
|
|
|
|
|
2017-03-06 15:39:52 +00:00
|
|
|
key_keyid = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
|
|
|
|
target_container);
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
key_keyid->keyid = cpu_to_be32(label);
|
2017-03-06 15:39:52 +00:00
|
|
|
}
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
|
|
|
|
*entropy_label = label == MPLS_LABEL_ENTROPY;
|
|
|
|
|
|
|
|
return bos ? FLOW_DISSECT_RET_OUT_GOOD : FLOW_DISSECT_RET_PROTO_AGAIN;
|
2017-03-06 15:39:52 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 15:39:51 +00:00
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_arp(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data,
|
|
|
|
int nhoff, int hlen)
|
2017-03-06 15:39:51 +00:00
|
|
|
{
|
|
|
|
struct flow_dissector_key_arp *key_arp;
|
|
|
|
struct {
|
|
|
|
unsigned char ar_sha[ETH_ALEN];
|
|
|
|
unsigned char ar_sip[4];
|
|
|
|
unsigned char ar_tha[ETH_ALEN];
|
|
|
|
unsigned char ar_tip[4];
|
|
|
|
} *arp_eth, _arp_eth;
|
|
|
|
const struct arphdr *arp;
|
2017-04-06 14:25:07 +00:00
|
|
|
struct arphdr _arp;
|
2017-03-06 15:39:51 +00:00
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
|
|
|
|
hlen, &_arp);
|
|
|
|
if (!arp)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
|
|
|
|
arp->ar_pro != htons(ETH_P_IP) ||
|
|
|
|
arp->ar_hln != ETH_ALEN ||
|
|
|
|
arp->ar_pln != 4 ||
|
|
|
|
(arp->ar_op != htons(ARPOP_REPLY) &&
|
|
|
|
arp->ar_op != htons(ARPOP_REQUEST)))
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
|
|
|
|
sizeof(_arp_eth), data,
|
|
|
|
hlen, &_arp_eth);
|
|
|
|
if (!arp_eth)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
key_arp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ARP,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
|
|
|
|
memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
|
|
|
|
|
|
|
|
/* Only store the lower byte of the opcode;
|
|
|
|
* this covers ARPOP_REPLY and ARPOP_REQUEST.
|
|
|
|
*/
|
|
|
|
key_arp->op = ntohs(arp->ar_op) & 0xff;
|
|
|
|
|
|
|
|
ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
|
|
|
|
ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
|
|
|
|
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
}
|
|
|
|
|
2023-06-08 10:56:46 +00:00
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_cfm(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, const void *data,
|
|
|
|
int nhoff, int hlen)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_cfm *key, *hdr, _hdr;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CFM))
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(*key), data, hlen, &_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
key = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_CFM,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
key->mdl_ver = hdr->mdl_ver;
|
|
|
|
key->opcode = hdr->opcode;
|
|
|
|
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
}
|
|
|
|
|
2017-03-06 15:39:55 +00:00
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_gre(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector_key_control *key_control,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data,
|
2017-03-06 15:39:55 +00:00
|
|
|
__be16 *p_proto, int *p_nhoff, int *p_hlen,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_keyid *key_keyid;
|
|
|
|
struct gre_base_hdr *hdr, _hdr;
|
|
|
|
int offset = 0;
|
|
|
|
u16 gre_ver;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
|
|
|
|
data, *p_hlen, &_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
/* Only look inside GRE without routing */
|
|
|
|
if (hdr->flags & GRE_ROUTING)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
/* Only look inside GRE for version 0 and 1 */
|
|
|
|
gre_ver = ntohs(hdr->flags & GRE_VERSION);
|
|
|
|
if (gre_ver > 1)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
*p_proto = hdr->protocol;
|
|
|
|
if (gre_ver) {
|
|
|
|
/* Version1 must be PPTP, and check the flags */
|
|
|
|
if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += sizeof(struct gre_base_hdr);
|
|
|
|
|
|
|
|
if (hdr->flags & GRE_CSUM)
|
2019-12-09 18:31:43 +00:00
|
|
|
offset += sizeof_field(struct gre_full_hdr, csum) +
|
|
|
|
sizeof_field(struct gre_full_hdr, reserved1);
|
2017-03-06 15:39:55 +00:00
|
|
|
|
|
|
|
if (hdr->flags & GRE_KEY) {
|
|
|
|
const __be32 *keyid;
|
|
|
|
__be32 _keyid;
|
|
|
|
|
|
|
|
keyid = __skb_header_pointer(skb, *p_nhoff + offset,
|
|
|
|
sizeof(_keyid),
|
|
|
|
data, *p_hlen, &_keyid);
|
|
|
|
if (!keyid)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_GRE_KEYID)) {
|
|
|
|
key_keyid = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_GRE_KEYID,
|
|
|
|
target_container);
|
|
|
|
if (gre_ver == 0)
|
|
|
|
key_keyid->keyid = *keyid;
|
|
|
|
else
|
|
|
|
key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
|
|
|
|
}
|
2019-12-09 18:31:43 +00:00
|
|
|
offset += sizeof_field(struct gre_full_hdr, key);
|
2017-03-06 15:39:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr->flags & GRE_SEQ)
|
2019-12-09 18:31:43 +00:00
|
|
|
offset += sizeof_field(struct pptp_gre_header, seq);
|
2017-03-06 15:39:55 +00:00
|
|
|
|
|
|
|
if (gre_ver == 0) {
|
|
|
|
if (*p_proto == htons(ETH_P_TEB)) {
|
|
|
|
const struct ethhdr *eth;
|
|
|
|
struct ethhdr _eth;
|
|
|
|
|
|
|
|
eth = __skb_header_pointer(skb, *p_nhoff + offset,
|
|
|
|
sizeof(_eth),
|
|
|
|
data, *p_hlen, &_eth);
|
|
|
|
if (!eth)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
*p_proto = eth->h_proto;
|
|
|
|
offset += sizeof(*eth);
|
|
|
|
|
|
|
|
/* Cap headers that we access via pointers at the
|
|
|
|
* end of the Ethernet header as our maximum alignment
|
|
|
|
* at that point is only 2 bytes.
|
|
|
|
*/
|
|
|
|
if (NET_IP_ALIGN)
|
|
|
|
*p_hlen = *p_nhoff + offset;
|
|
|
|
}
|
|
|
|
} else { /* version 1, must be PPTP */
|
|
|
|
u8 _ppp_hdr[PPP_HDRLEN];
|
|
|
|
u8 *ppp_hdr;
|
|
|
|
|
|
|
|
if (hdr->flags & GRE_ACK)
|
2019-12-09 18:31:43 +00:00
|
|
|
offset += sizeof_field(struct pptp_gre_header, ack);
|
2017-03-06 15:39:55 +00:00
|
|
|
|
|
|
|
ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
|
|
|
|
sizeof(_ppp_hdr),
|
|
|
|
data, *p_hlen, _ppp_hdr);
|
|
|
|
if (!ppp_hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
switch (PPP_PROTOCOL(ppp_hdr)) {
|
|
|
|
case PPP_IP:
|
|
|
|
*p_proto = htons(ETH_P_IP);
|
|
|
|
break;
|
|
|
|
case PPP_IPV6:
|
|
|
|
*p_proto = htons(ETH_P_IPV6);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Could probably catch some more like MPLS */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += PPP_HDRLEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
*p_nhoff += offset;
|
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
2017-09-01 21:04:11 +00:00
|
|
|
return FLOW_DISSECT_RET_PROTO_AGAIN;
|
2017-03-06 15:39:55 +00:00
|
|
|
}
|
|
|
|
|
2017-12-21 09:17:42 +00:00
|
|
|
/**
|
|
|
|
* __skb_flow_dissect_batadv() - dissect batman-adv header
|
|
|
|
* @skb: sk_buff to with the batman-adv header
|
|
|
|
* @key_control: flow dissectors control key
|
|
|
|
* @data: raw buffer pointer to the packet, if NULL use skb->data
|
|
|
|
* @p_proto: pointer used to update the protocol to process next
|
|
|
|
* @p_nhoff: pointer used to update inner network header offset
|
|
|
|
* @hlen: packet header length
|
|
|
|
* @flags: any combination of FLOW_DISSECTOR_F_*
|
|
|
|
*
|
|
|
|
* ETH_P_BATMAN packets are tried to be dissected. Only
|
|
|
|
* &struct batadv_unicast packets are actually processed because they contain an
|
|
|
|
* inner ethernet header and are usually followed by actual network header. This
|
|
|
|
* allows the flow dissector to continue processing the packet.
|
|
|
|
*
|
|
|
|
* Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
|
|
|
|
* FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
|
|
|
|
* otherwise FLOW_DISSECT_RET_OUT_BAD
|
|
|
|
*/
|
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_batadv(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector_key_control *key_control,
|
2021-03-14 11:11:23 +00:00
|
|
|
const void *data, __be16 *p_proto, int *p_nhoff,
|
|
|
|
int hlen, unsigned int flags)
|
2017-12-21 09:17:42 +00:00
|
|
|
{
|
|
|
|
struct {
|
|
|
|
struct batadv_unicast_packet batadv_unicast;
|
|
|
|
struct ethhdr eth;
|
|
|
|
} *hdr, _hdr;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
|
|
|
|
&_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
*p_proto = hdr->eth.h_proto;
|
|
|
|
*p_nhoff += sizeof(*hdr);
|
|
|
|
|
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
return FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
}
|
|
|
|
|
2017-05-23 16:40:44 +00:00
|
|
|
static void
|
|
|
|
__skb_flow_dissect_tcp(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data,
|
|
|
|
int thoff, int hlen)
|
2017-05-23 16:40:44 +00:00
|
|
|
{
|
|
|
|
struct flow_dissector_key_tcp *key_tcp;
|
|
|
|
struct tcphdr *th, _th;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
|
|
|
|
if (!th)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_tcp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_TCP,
|
|
|
|
target_container);
|
|
|
|
key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
|
|
|
|
}
|
|
|
|
|
2019-12-03 10:40:12 +00:00
|
|
|
static void
|
|
|
|
__skb_flow_dissect_ports(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data,
|
|
|
|
int nhoff, u8 ip_proto, int hlen)
|
2019-12-03 10:40:12 +00:00
|
|
|
{
|
|
|
|
enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
|
|
|
|
struct flow_dissector_key_ports *key_ports;
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
|
|
|
|
dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
|
|
|
|
else if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_PORTS_RANGE))
|
|
|
|
dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
|
|
|
|
|
|
|
|
if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_ports = skb_flow_dissector_target(flow_dissector,
|
|
|
|
dissector_ports,
|
|
|
|
target_container);
|
|
|
|
key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
|
|
|
|
data, hlen);
|
|
|
|
}
|
|
|
|
|
2017-06-01 18:37:37 +00:00
|
|
|
static void
|
|
|
|
__skb_flow_dissect_ipv4(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data,
|
|
|
|
const struct iphdr *iph)
|
2017-06-01 18:37:37 +00:00
|
|
|
{
|
|
|
|
struct flow_dissector_key_ip *key_ip;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_ip = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IP,
|
|
|
|
target_container);
|
|
|
|
key_ip->tos = iph->tos;
|
|
|
|
key_ip->ttl = iph->ttl;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
__skb_flow_dissect_ipv6(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data,
|
|
|
|
const struct ipv6hdr *iph)
|
2017-06-01 18:37:37 +00:00
|
|
|
{
|
|
|
|
struct flow_dissector_key_ip *key_ip;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_ip = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IP,
|
|
|
|
target_container);
|
|
|
|
key_ip->tos = ipv6_get_dsfield(iph);
|
|
|
|
key_ip->ttl = iph->hop_limit;
|
|
|
|
}
|
|
|
|
|
2017-09-01 21:04:12 +00:00
|
|
|
/* Maximum number of protocol headers that can be parsed in
|
|
|
|
* __skb_flow_dissect
|
|
|
|
*/
|
|
|
|
#define MAX_FLOW_DISSECT_HDRS 15
|
|
|
|
|
|
|
|
static bool skb_flow_dissect_allowed(int *num_hdrs)
|
|
|
|
{
|
|
|
|
++*num_hdrs;
|
|
|
|
|
|
|
|
return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
|
|
|
|
}
|
|
|
|
|
2018-09-14 14:46:18 +00:00
|
|
|
static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container)
|
|
|
|
{
|
2020-01-17 07:05:32 +00:00
|
|
|
struct flow_dissector_key_ports *key_ports = NULL;
|
2018-09-14 14:46:18 +00:00
|
|
|
struct flow_dissector_key_control *key_control;
|
|
|
|
struct flow_dissector_key_basic *key_basic;
|
|
|
|
struct flow_dissector_key_addrs *key_addrs;
|
2019-07-25 22:52:30 +00:00
|
|
|
struct flow_dissector_key_tags *key_tags;
|
2018-09-14 14:46:18 +00:00
|
|
|
|
|
|
|
key_control = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
target_container);
|
|
|
|
key_control->thoff = flow_keys->thoff;
|
|
|
|
if (flow_keys->is_frag)
|
|
|
|
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
|
|
|
|
if (flow_keys->is_first_frag)
|
|
|
|
key_control->flags |= FLOW_DIS_FIRST_FRAG;
|
|
|
|
if (flow_keys->is_encap)
|
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
|
|
|
|
|
|
|
key_basic = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
target_container);
|
|
|
|
key_basic->n_proto = flow_keys->n_proto;
|
|
|
|
key_basic->ip_proto = flow_keys->ip_proto;
|
|
|
|
|
|
|
|
if (flow_keys->addr_proto == ETH_P_IP &&
|
|
|
|
dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
|
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
|
|
target_container);
|
|
|
|
key_addrs->v4addrs.src = flow_keys->ipv4_src;
|
|
|
|
key_addrs->v4addrs.dst = flow_keys->ipv4_dst;
|
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
|
|
|
} else if (flow_keys->addr_proto == ETH_P_IPV6 &&
|
|
|
|
dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
|
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
target_container);
|
2021-04-16 19:31:51 +00:00
|
|
|
memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
|
|
|
|
sizeof(key_addrs->v6addrs.src));
|
|
|
|
memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
|
|
|
|
sizeof(key_addrs->v6addrs.dst));
|
2018-09-14 14:46:18 +00:00
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
|
|
|
}
|
|
|
|
|
2020-01-17 07:05:32 +00:00
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
|
2018-09-14 14:46:18 +00:00
|
|
|
key_ports = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_PORTS,
|
|
|
|
target_container);
|
2020-01-17 07:05:32 +00:00
|
|
|
else if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_PORTS_RANGE))
|
|
|
|
key_ports = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_PORTS_RANGE,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
if (key_ports) {
|
2018-09-14 14:46:18 +00:00
|
|
|
key_ports->src = flow_keys->sport;
|
|
|
|
key_ports->dst = flow_keys->dport;
|
|
|
|
}
|
2019-07-25 22:52:30 +00:00
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
|
|
|
|
key_tags = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_FLOW_LABEL,
|
|
|
|
target_container);
|
|
|
|
key_tags->flow_label = ntohl(flow_keys->flow_label);
|
|
|
|
}
|
2018-09-14 14:46:18 +00:00
|
|
|
}
|
|
|
|
|
2022-08-21 11:35:16 +00:00
|
|
|
u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
|
|
|
|
__be16 proto, int nhoff, int hlen, unsigned int flags)
|
2019-04-22 15:55:44 +00:00
|
|
|
{
|
|
|
|
struct bpf_flow_keys *flow_keys = ctx->flow_keys;
|
|
|
|
u32 result;
|
2019-01-28 16:53:53 +00:00
|
|
|
|
|
|
|
/* Pass parameters to the BPF program */
|
|
|
|
memset(flow_keys, 0, sizeof(*flow_keys));
|
2019-04-22 15:55:44 +00:00
|
|
|
flow_keys->n_proto = proto;
|
|
|
|
flow_keys->nhoff = nhoff;
|
2019-01-28 16:53:53 +00:00
|
|
|
flow_keys->thoff = flow_keys->nhoff;
|
|
|
|
|
2019-07-25 22:52:25 +00:00
|
|
|
BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG !=
|
|
|
|
(int)FLOW_DISSECTOR_F_PARSE_1ST_FRAG);
|
|
|
|
BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL !=
|
|
|
|
(int)FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
|
|
|
|
BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP !=
|
|
|
|
(int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
|
|
|
|
flow_keys->flags = flags;
|
|
|
|
|
2020-02-24 14:01:43 +00:00
|
|
|
result = bpf_prog_run_pin_on_cpu(prog, ctx);
|
2019-01-28 16:53:53 +00:00
|
|
|
|
2019-04-22 15:55:44 +00:00
|
|
|
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
|
2019-01-28 16:53:53 +00:00
|
|
|
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
|
2019-04-22 15:55:44 +00:00
|
|
|
flow_keys->nhoff, hlen);
|
2019-01-28 16:53:53 +00:00
|
|
|
|
2022-08-21 11:35:16 +00:00
|
|
|
return result;
|
2019-01-28 16:53:53 +00:00
|
|
|
}
|
|
|
|
|
Networking changes for 6.0.
Core
----
- Refactor the forward memory allocation to better cope with memory
pressure with many open sockets, moving from a per socket cache to
a per-CPU one
- Replace rwlocks with RCU for better fairness in ping, raw sockets
and IP multicast router.
- Network-side support for IO uring zero-copy send.
- A few skb drop reason improvements, including codegen the source file
with string mapping instead of using macro magic.
- Rename reference tracking helpers to a more consistent
netdev_* schema.
- Adapt u64_stats_t type to address load/store tearing issues.
- Refine debug helper usage to reduce the log noise caused by bots.
BPF
---
- Improve socket map performance, avoiding skb cloning on read
operation.
- Add support for 64 bits enum, to match types exposed by kernel.
- Introduce support for sleepable uprobes program.
- Introduce support for enum textual representation in libbpf.
- New helpers to implement synproxy with eBPF/XDP.
- Improve loop performances, inlining indirect calls when
possible.
- Removed all the deprecated libbpf APIs.
- Implement new eBPF-based LSM flavor.
- Add type match support, which allow accurate queries to the
eBPF used types.
- A few TCP congetsion control framework usability improvements.
- Add new infrastructure to manipulate CT entries via eBPF programs.
- Allow for livepatch (KLP) and BPF trampolines to attach to the same
kernel function.
Protocols
---------
- Introduce per network namespace lookup tables for unix sockets,
increasing scalability and reducing contention.
- Preparation work for Wi-Fi 7 Multi-Link Operation (MLO) support.
- Add support to forciby close TIME_WAIT TCP sockets via user-space
tools.
- Significant performance improvement for the TLS 1.3 receive path,
both for zero-copy and not-zero-copy.
- Support for changing the initial MTPCP subflow priority/backup
status
- Introduce virtually contingus buffers for sockets over RDMA,
to cope better with memory pressure.
- Extend CAN ethtool support with timestamping capabilities
- Refactor CAN build infrastructure to allow building only the needed
features.
Driver API
----------
- Remove devlink mutex to allow parallel commands on multiple links.
- Add support for pause stats in distributed switch.
- Implement devlink helpers to query and flash line cards.
- New helper for phy mode to register conversion.
New hardware / drivers
----------------------
- Ethernet DSA driver for the rockchip mt7531 on BPI-R2 Pro.
- Ethernet DSA driver for the Renesas RZ/N1 A5PSW switch.
- Ethernet DSA driver for the Microchip LAN937x switch.
- Ethernet PHY driver for the Aquantia AQR113C EPHY.
- CAN driver for the OBD-II ELM327 interface.
- CAN driver for RZ/N1 SJA1000 CAN controller.
- Bluetooth: Infineon CYW55572 Wi-Fi plus Bluetooth combo device.
Drivers
-------
- Intel Ethernet NICs:
- i40e: add support for vlan pruning
- i40e: add support for XDP framented packets
- ice: improved vlan offload support
- ice: add support for PPPoE offload
- Mellanox Ethernet (mlx5)
- refactor packet steering offload for performance and scalability
- extend support for TC offload
- refactor devlink code to clean-up the locking schema
- support stacked vlans for bridge offloads
- use TLS objects pool to improve connection rate
- Netronome Ethernet NICs (nfp):
- extend support for IPv6 fields mangling offload
- add support for vepa mode in HW bridge
- better support for virtio data path acceleration (VDPA)
- enable TSO by default
- Microsoft vNIC driver (mana)
- add support for XDP redirect
- Others Ethernet drivers:
- bonding: add per-port priority support
- microchip lan743x: extend phy support
- Fungible funeth: support UDP segmentation offload and XDP xmit
- Solarflare EF100: add support for virtual function representors
- MediaTek SoC: add XDP support
- Mellanox Ethernet/IB switch (mlxsw):
- dropped support for unreleased H/W (XM router).
- improved stats accuracy
- unified bridge model coversion improving scalability
(parts 1-6)
- support for PTP in Spectrum-2 asics
- Broadcom PHYs
- add PTP support for BCM54210E
- add support for the BCM53128 internal PHY
- Marvell Ethernet switches (prestera):
- implement support for multicast forwarding offload
- Embedded Ethernet switches:
- refactor OcteonTx MAC filter for better scalability
- improve TC H/W offload for the Felix driver
- refactor the Microchip ksz8 and ksz9477 drivers to share
the probe code (parts 1, 2), add support for phylink
mac configuration
- Other WiFi:
- Microchip wilc1000: diable WEP support and enable WPA3
- Atheros ath10k: encapsulation offload support
Old code removal:
- Neterion vxge ethernet driver: this is untouched since more than
10 years.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-----BEGIN PGP SIGNATURE-----
iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmLqN+oSHHBhYmVuaUBy
ZWRoYXQuY29tAAoJECkkeY3MjxOkB9kQAI9VqW0c3SfiTJnkVBEIovZ6Tnh5stD2
UYFkh1BdchLsYxi7W4XMpVPSzRztiTP87mIx5c/KvIzj+QNeWL1XWRJSPdI9HhTD
pTAA/tM2OG7bqrbyQiKDNfpQdNl7+kk1RwnYd+f9RFl1QVuIJaYhmjVwrsN5xF/+
jUsotpROarM2dGFWiFwJbKhP2zMDT+6qEEahM8pEPggKhv8wRLYjany2cZVEe4e0
WGUpbINAS8gEKm0Ob922WaDfDrcK/N1Z0jNz/kMaENkK18Vvc7F6bCO0DzAawKX9
QZMMwm6mHp3EThflJAMAzCGIYiIcwLhykgdyj8rrjPhFrWbMD2Sdsbo21HOXU/8j
u4aAhVl+d+h7emmbgBoJ8sycVJ7BQlXz7lX20sTgADv9xI4/dPhQ17CMRuwX6fXX
JSrn6P6e1LTV5CEg6vrlSPnKPY6uhFn/cPw47FxCjRwJ9phVnp+8uZWQmf9Pz3yf
Ok/tcj+juFbsmuOshHy2cbRkuNZNS0oRWlSTBo5795ZwOLSakMonR3L+ev2aOvzz
DVrFp2Y/iIVwMSFdCbouYdYnhArPRhOAtCmZc2afY8aBN7aaMgrdTy3+mzUoHy3I
FG3K+VuKpfi0vY4zn6ZoLZDIpyXIoJJ93RcSGltD32t3Dp1RaQMVEI4s45k05PVm
1nYpXKHA8qML
=hxEG
-----END PGP SIGNATURE-----
Merge tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking changes from Paolo Abeni:
"Core:
- Refactor the forward memory allocation to better cope with memory
pressure with many open sockets, moving from a per socket cache to
a per-CPU one
- Replace rwlocks with RCU for better fairness in ping, raw sockets
and IP multicast router.
- Network-side support for IO uring zero-copy send.
- A few skb drop reason improvements, including codegen the source
file with string mapping instead of using macro magic.
- Rename reference tracking helpers to a more consistent netdev_*
schema.
- Adapt u64_stats_t type to address load/store tearing issues.
- Refine debug helper usage to reduce the log noise caused by bots.
BPF:
- Improve socket map performance, avoiding skb cloning on read
operation.
- Add support for 64 bits enum, to match types exposed by kernel.
- Introduce support for sleepable uprobes program.
- Introduce support for enum textual representation in libbpf.
- New helpers to implement synproxy with eBPF/XDP.
- Improve loop performances, inlining indirect calls when possible.
- Removed all the deprecated libbpf APIs.
- Implement new eBPF-based LSM flavor.
- Add type match support, which allow accurate queries to the eBPF
used types.
- A few TCP congetsion control framework usability improvements.
- Add new infrastructure to manipulate CT entries via eBPF programs.
- Allow for livepatch (KLP) and BPF trampolines to attach to the same
kernel function.
Protocols:
- Introduce per network namespace lookup tables for unix sockets,
increasing scalability and reducing contention.
- Preparation work for Wi-Fi 7 Multi-Link Operation (MLO) support.
- Add support to forciby close TIME_WAIT TCP sockets via user-space
tools.
- Significant performance improvement for the TLS 1.3 receive path,
both for zero-copy and not-zero-copy.
- Support for changing the initial MTPCP subflow priority/backup
status
- Introduce virtually contingus buffers for sockets over RDMA, to
cope better with memory pressure.
- Extend CAN ethtool support with timestamping capabilities
- Refactor CAN build infrastructure to allow building only the needed
features.
Driver API:
- Remove devlink mutex to allow parallel commands on multiple links.
- Add support for pause stats in distributed switch.
- Implement devlink helpers to query and flash line cards.
- New helper for phy mode to register conversion.
New hardware / drivers:
- Ethernet DSA driver for the rockchip mt7531 on BPI-R2 Pro.
- Ethernet DSA driver for the Renesas RZ/N1 A5PSW switch.
- Ethernet DSA driver for the Microchip LAN937x switch.
- Ethernet PHY driver for the Aquantia AQR113C EPHY.
- CAN driver for the OBD-II ELM327 interface.
- CAN driver for RZ/N1 SJA1000 CAN controller.
- Bluetooth: Infineon CYW55572 Wi-Fi plus Bluetooth combo device.
Drivers:
- Intel Ethernet NICs:
- i40e: add support for vlan pruning
- i40e: add support for XDP framented packets
- ice: improved vlan offload support
- ice: add support for PPPoE offload
- Mellanox Ethernet (mlx5)
- refactor packet steering offload for performance and scalability
- extend support for TC offload
- refactor devlink code to clean-up the locking schema
- support stacked vlans for bridge offloads
- use TLS objects pool to improve connection rate
- Netronome Ethernet NICs (nfp):
- extend support for IPv6 fields mangling offload
- add support for vepa mode in HW bridge
- better support for virtio data path acceleration (VDPA)
- enable TSO by default
- Microsoft vNIC driver (mana)
- add support for XDP redirect
- Others Ethernet drivers:
- bonding: add per-port priority support
- microchip lan743x: extend phy support
- Fungible funeth: support UDP segmentation offload and XDP xmit
- Solarflare EF100: add support for virtual function representors
- MediaTek SoC: add XDP support
- Mellanox Ethernet/IB switch (mlxsw):
- dropped support for unreleased H/W (XM router).
- improved stats accuracy
- unified bridge model coversion improving scalability (parts 1-6)
- support for PTP in Spectrum-2 asics
- Broadcom PHYs
- add PTP support for BCM54210E
- add support for the BCM53128 internal PHY
- Marvell Ethernet switches (prestera):
- implement support for multicast forwarding offload
- Embedded Ethernet switches:
- refactor OcteonTx MAC filter for better scalability
- improve TC H/W offload for the Felix driver
- refactor the Microchip ksz8 and ksz9477 drivers to share the
probe code (parts 1, 2), add support for phylink mac
configuration
- Other WiFi:
- Microchip wilc1000: diable WEP support and enable WPA3
- Atheros ath10k: encapsulation offload support
Old code removal:
- Neterion vxge ethernet driver: this is untouched since more than 10 years"
* tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1890 commits)
doc: sfp-phylink: Fix a broken reference
wireguard: selftests: support UML
wireguard: allowedips: don't corrupt stack when detecting overflow
wireguard: selftests: update config fragments
wireguard: ratelimiter: use hrtimer in selftest
net/mlx5e: xsk: Discard unaligned XSK frames on striding RQ
net: usb: ax88179_178a: Bind only to vendor-specific interface
selftests: net: fix IOAM test skip return code
net: usb: make USB_RTL8153_ECM non user configurable
net: marvell: prestera: remove reduntant code
octeontx2-pf: Reduce minimum mtu size to 60
net: devlink: Fix missing mutex_unlock() call
net/tls: Remove redundant workqueue flush before destroy
net: txgbe: Fix an error handling path in txgbe_probe()
net: dsa: Fix spelling mistakes and cleanup code
Documentation: devlink: add add devlink-selftests to the table of contents
dccp: put dccp_qpolicy_full() and dccp_qpolicy_push() in the same lock
net: ionic: fix error check for vlan flags in ionic_set_nic_features()
net: ice: fix error NETIF_F_HW_VLAN_CTAG_FILTER check in ice_vsi_sync_fltr()
nfp: flower: add support for tunnel offload without key ID
...
2022-08-03 23:29:08 +00:00
|
|
|
static bool is_pppoe_ses_hdr_valid(const struct pppoe_hdr *hdr)
|
2022-07-18 12:18:10 +00:00
|
|
|
{
|
Networking changes for 6.0.
Core
----
- Refactor the forward memory allocation to better cope with memory
pressure with many open sockets, moving from a per socket cache to
a per-CPU one
- Replace rwlocks with RCU for better fairness in ping, raw sockets
and IP multicast router.
- Network-side support for IO uring zero-copy send.
- A few skb drop reason improvements, including codegen the source file
with string mapping instead of using macro magic.
- Rename reference tracking helpers to a more consistent
netdev_* schema.
- Adapt u64_stats_t type to address load/store tearing issues.
- Refine debug helper usage to reduce the log noise caused by bots.
BPF
---
- Improve socket map performance, avoiding skb cloning on read
operation.
- Add support for 64 bits enum, to match types exposed by kernel.
- Introduce support for sleepable uprobes program.
- Introduce support for enum textual representation in libbpf.
- New helpers to implement synproxy with eBPF/XDP.
- Improve loop performances, inlining indirect calls when
possible.
- Removed all the deprecated libbpf APIs.
- Implement new eBPF-based LSM flavor.
- Add type match support, which allow accurate queries to the
eBPF used types.
- A few TCP congetsion control framework usability improvements.
- Add new infrastructure to manipulate CT entries via eBPF programs.
- Allow for livepatch (KLP) and BPF trampolines to attach to the same
kernel function.
Protocols
---------
- Introduce per network namespace lookup tables for unix sockets,
increasing scalability and reducing contention.
- Preparation work for Wi-Fi 7 Multi-Link Operation (MLO) support.
- Add support to forciby close TIME_WAIT TCP sockets via user-space
tools.
- Significant performance improvement for the TLS 1.3 receive path,
both for zero-copy and not-zero-copy.
- Support for changing the initial MTPCP subflow priority/backup
status
- Introduce virtually contingus buffers for sockets over RDMA,
to cope better with memory pressure.
- Extend CAN ethtool support with timestamping capabilities
- Refactor CAN build infrastructure to allow building only the needed
features.
Driver API
----------
- Remove devlink mutex to allow parallel commands on multiple links.
- Add support for pause stats in distributed switch.
- Implement devlink helpers to query and flash line cards.
- New helper for phy mode to register conversion.
New hardware / drivers
----------------------
- Ethernet DSA driver for the rockchip mt7531 on BPI-R2 Pro.
- Ethernet DSA driver for the Renesas RZ/N1 A5PSW switch.
- Ethernet DSA driver for the Microchip LAN937x switch.
- Ethernet PHY driver for the Aquantia AQR113C EPHY.
- CAN driver for the OBD-II ELM327 interface.
- CAN driver for RZ/N1 SJA1000 CAN controller.
- Bluetooth: Infineon CYW55572 Wi-Fi plus Bluetooth combo device.
Drivers
-------
- Intel Ethernet NICs:
- i40e: add support for vlan pruning
- i40e: add support for XDP framented packets
- ice: improved vlan offload support
- ice: add support for PPPoE offload
- Mellanox Ethernet (mlx5)
- refactor packet steering offload for performance and scalability
- extend support for TC offload
- refactor devlink code to clean-up the locking schema
- support stacked vlans for bridge offloads
- use TLS objects pool to improve connection rate
- Netronome Ethernet NICs (nfp):
- extend support for IPv6 fields mangling offload
- add support for vepa mode in HW bridge
- better support for virtio data path acceleration (VDPA)
- enable TSO by default
- Microsoft vNIC driver (mana)
- add support for XDP redirect
- Others Ethernet drivers:
- bonding: add per-port priority support
- microchip lan743x: extend phy support
- Fungible funeth: support UDP segmentation offload and XDP xmit
- Solarflare EF100: add support for virtual function representors
- MediaTek SoC: add XDP support
- Mellanox Ethernet/IB switch (mlxsw):
- dropped support for unreleased H/W (XM router).
- improved stats accuracy
- unified bridge model coversion improving scalability
(parts 1-6)
- support for PTP in Spectrum-2 asics
- Broadcom PHYs
- add PTP support for BCM54210E
- add support for the BCM53128 internal PHY
- Marvell Ethernet switches (prestera):
- implement support for multicast forwarding offload
- Embedded Ethernet switches:
- refactor OcteonTx MAC filter for better scalability
- improve TC H/W offload for the Felix driver
- refactor the Microchip ksz8 and ksz9477 drivers to share
the probe code (parts 1, 2), add support for phylink
mac configuration
- Other WiFi:
- Microchip wilc1000: diable WEP support and enable WPA3
- Atheros ath10k: encapsulation offload support
Old code removal:
- Neterion vxge ethernet driver: this is untouched since more than
10 years.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-----BEGIN PGP SIGNATURE-----
iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmLqN+oSHHBhYmVuaUBy
ZWRoYXQuY29tAAoJECkkeY3MjxOkB9kQAI9VqW0c3SfiTJnkVBEIovZ6Tnh5stD2
UYFkh1BdchLsYxi7W4XMpVPSzRztiTP87mIx5c/KvIzj+QNeWL1XWRJSPdI9HhTD
pTAA/tM2OG7bqrbyQiKDNfpQdNl7+kk1RwnYd+f9RFl1QVuIJaYhmjVwrsN5xF/+
jUsotpROarM2dGFWiFwJbKhP2zMDT+6qEEahM8pEPggKhv8wRLYjany2cZVEe4e0
WGUpbINAS8gEKm0Ob922WaDfDrcK/N1Z0jNz/kMaENkK18Vvc7F6bCO0DzAawKX9
QZMMwm6mHp3EThflJAMAzCGIYiIcwLhykgdyj8rrjPhFrWbMD2Sdsbo21HOXU/8j
u4aAhVl+d+h7emmbgBoJ8sycVJ7BQlXz7lX20sTgADv9xI4/dPhQ17CMRuwX6fXX
JSrn6P6e1LTV5CEg6vrlSPnKPY6uhFn/cPw47FxCjRwJ9phVnp+8uZWQmf9Pz3yf
Ok/tcj+juFbsmuOshHy2cbRkuNZNS0oRWlSTBo5795ZwOLSakMonR3L+ev2aOvzz
DVrFp2Y/iIVwMSFdCbouYdYnhArPRhOAtCmZc2afY8aBN7aaMgrdTy3+mzUoHy3I
FG3K+VuKpfi0vY4zn6ZoLZDIpyXIoJJ93RcSGltD32t3Dp1RaQMVEI4s45k05PVm
1nYpXKHA8qML
=hxEG
-----END PGP SIGNATURE-----
Merge tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking changes from Paolo Abeni:
"Core:
- Refactor the forward memory allocation to better cope with memory
pressure with many open sockets, moving from a per socket cache to
a per-CPU one
- Replace rwlocks with RCU for better fairness in ping, raw sockets
and IP multicast router.
- Network-side support for IO uring zero-copy send.
- A few skb drop reason improvements, including codegen the source
file with string mapping instead of using macro magic.
- Rename reference tracking helpers to a more consistent netdev_*
schema.
- Adapt u64_stats_t type to address load/store tearing issues.
- Refine debug helper usage to reduce the log noise caused by bots.
BPF:
- Improve socket map performance, avoiding skb cloning on read
operation.
- Add support for 64 bits enum, to match types exposed by kernel.
- Introduce support for sleepable uprobes program.
- Introduce support for enum textual representation in libbpf.
- New helpers to implement synproxy with eBPF/XDP.
- Improve loop performances, inlining indirect calls when possible.
- Removed all the deprecated libbpf APIs.
- Implement new eBPF-based LSM flavor.
- Add type match support, which allow accurate queries to the eBPF
used types.
- A few TCP congetsion control framework usability improvements.
- Add new infrastructure to manipulate CT entries via eBPF programs.
- Allow for livepatch (KLP) and BPF trampolines to attach to the same
kernel function.
Protocols:
- Introduce per network namespace lookup tables for unix sockets,
increasing scalability and reducing contention.
- Preparation work for Wi-Fi 7 Multi-Link Operation (MLO) support.
- Add support to forciby close TIME_WAIT TCP sockets via user-space
tools.
- Significant performance improvement for the TLS 1.3 receive path,
both for zero-copy and not-zero-copy.
- Support for changing the initial MTPCP subflow priority/backup
status
- Introduce virtually contingus buffers for sockets over RDMA, to
cope better with memory pressure.
- Extend CAN ethtool support with timestamping capabilities
- Refactor CAN build infrastructure to allow building only the needed
features.
Driver API:
- Remove devlink mutex to allow parallel commands on multiple links.
- Add support for pause stats in distributed switch.
- Implement devlink helpers to query and flash line cards.
- New helper for phy mode to register conversion.
New hardware / drivers:
- Ethernet DSA driver for the rockchip mt7531 on BPI-R2 Pro.
- Ethernet DSA driver for the Renesas RZ/N1 A5PSW switch.
- Ethernet DSA driver for the Microchip LAN937x switch.
- Ethernet PHY driver for the Aquantia AQR113C EPHY.
- CAN driver for the OBD-II ELM327 interface.
- CAN driver for RZ/N1 SJA1000 CAN controller.
- Bluetooth: Infineon CYW55572 Wi-Fi plus Bluetooth combo device.
Drivers:
- Intel Ethernet NICs:
- i40e: add support for vlan pruning
- i40e: add support for XDP framented packets
- ice: improved vlan offload support
- ice: add support for PPPoE offload
- Mellanox Ethernet (mlx5)
- refactor packet steering offload for performance and scalability
- extend support for TC offload
- refactor devlink code to clean-up the locking schema
- support stacked vlans for bridge offloads
- use TLS objects pool to improve connection rate
- Netronome Ethernet NICs (nfp):
- extend support for IPv6 fields mangling offload
- add support for vepa mode in HW bridge
- better support for virtio data path acceleration (VDPA)
- enable TSO by default
- Microsoft vNIC driver (mana)
- add support for XDP redirect
- Others Ethernet drivers:
- bonding: add per-port priority support
- microchip lan743x: extend phy support
- Fungible funeth: support UDP segmentation offload and XDP xmit
- Solarflare EF100: add support for virtual function representors
- MediaTek SoC: add XDP support
- Mellanox Ethernet/IB switch (mlxsw):
- dropped support for unreleased H/W (XM router).
- improved stats accuracy
- unified bridge model coversion improving scalability (parts 1-6)
- support for PTP in Spectrum-2 asics
- Broadcom PHYs
- add PTP support for BCM54210E
- add support for the BCM53128 internal PHY
- Marvell Ethernet switches (prestera):
- implement support for multicast forwarding offload
- Embedded Ethernet switches:
- refactor OcteonTx MAC filter for better scalability
- improve TC H/W offload for the Felix driver
- refactor the Microchip ksz8 and ksz9477 drivers to share the
probe code (parts 1, 2), add support for phylink mac
configuration
- Other WiFi:
- Microchip wilc1000: diable WEP support and enable WPA3
- Atheros ath10k: encapsulation offload support
Old code removal:
- Neterion vxge ethernet driver: this is untouched since more than 10 years"
* tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1890 commits)
doc: sfp-phylink: Fix a broken reference
wireguard: selftests: support UML
wireguard: allowedips: don't corrupt stack when detecting overflow
wireguard: selftests: update config fragments
wireguard: ratelimiter: use hrtimer in selftest
net/mlx5e: xsk: Discard unaligned XSK frames on striding RQ
net: usb: ax88179_178a: Bind only to vendor-specific interface
selftests: net: fix IOAM test skip return code
net: usb: make USB_RTL8153_ECM non user configurable
net: marvell: prestera: remove reduntant code
octeontx2-pf: Reduce minimum mtu size to 60
net: devlink: Fix missing mutex_unlock() call
net/tls: Remove redundant workqueue flush before destroy
net: txgbe: Fix an error handling path in txgbe_probe()
net: dsa: Fix spelling mistakes and cleanup code
Documentation: devlink: add add devlink-selftests to the table of contents
dccp: put dccp_qpolicy_full() and dccp_qpolicy_push() in the same lock
net: ionic: fix error check for vlan flags in ionic_set_nic_features()
net: ice: fix error NETIF_F_HW_VLAN_CTAG_FILTER check in ice_vsi_sync_fltr()
nfp: flower: add support for tunnel offload without key ID
...
2022-08-03 23:29:08 +00:00
|
|
|
return hdr->ver == 1 && hdr->type == 1 && hdr->code == 0;
|
2022-07-18 12:18:10 +00:00
|
|
|
}
|
|
|
|
|
2014-08-26 00:03:47 +00:00
|
|
|
/**
|
|
|
|
* __skb_flow_dissect - extract the flow_keys struct and return it
|
2019-04-22 15:55:46 +00:00
|
|
|
* @net: associated network namespace, derived from @skb if NULL
|
2014-08-26 00:03:47 +00:00
|
|
|
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
|
2015-05-12 12:56:16 +00:00
|
|
|
* @flow_dissector: list of keys to dissect
|
|
|
|
* @target_container: target structure to put dissected values into
|
2014-08-26 00:03:47 +00:00
|
|
|
* @data: raw buffer pointer to the packet, if NULL use skb->data
|
|
|
|
* @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
|
|
|
|
* @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
|
|
|
|
* @hlen: packet header length, if @data is NULL use skb_headlen(skb)
|
2019-03-25 16:17:21 +00:00
|
|
|
* @flags: flags that control the dissection process, e.g.
|
2019-05-31 21:05:06 +00:00
|
|
|
* FLOW_DISSECTOR_F_STOP_AT_ENCAP.
|
2014-08-26 00:03:47 +00:00
|
|
|
*
|
2015-05-12 12:56:16 +00:00
|
|
|
* The function will try to retrieve individual keys into target specified
|
|
|
|
* by flow_dissector from either the skbuff or a raw buffer specified by the
|
|
|
|
* rest parameters.
|
|
|
|
*
|
|
|
|
* Caller must take care of zeroing target container memory.
|
2014-08-26 00:03:47 +00:00
|
|
|
*/
|
2019-04-22 15:55:46 +00:00
|
|
|
bool __skb_flow_dissect(const struct net *net,
|
|
|
|
const struct sk_buff *skb,
|
2015-05-12 12:56:16 +00:00
|
|
|
struct flow_dissector *flow_dissector,
|
2021-03-14 11:11:23 +00:00
|
|
|
void *target_container, const void *data,
|
|
|
|
__be16 proto, int nhoff, int hlen, unsigned int flags)
|
2011-11-28 05:22:18 +00:00
|
|
|
{
|
2015-06-04 16:16:39 +00:00
|
|
|
struct flow_dissector_key_control *key_control;
|
2015-05-12 12:56:16 +00:00
|
|
|
struct flow_dissector_key_basic *key_basic;
|
|
|
|
struct flow_dissector_key_addrs *key_addrs;
|
2015-06-04 16:16:43 +00:00
|
|
|
struct flow_dissector_key_tags *key_tags;
|
2016-08-17 10:36:11 +00:00
|
|
|
struct flow_dissector_key_vlan *key_vlan;
|
2017-09-01 21:04:11 +00:00
|
|
|
enum flow_dissect_ret fdret;
|
2018-07-06 05:38:14 +00:00
|
|
|
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
bool mpls_el = false;
|
|
|
|
int mpls_lse = 0;
|
2017-09-01 21:04:12 +00:00
|
|
|
int num_hdrs = 0;
|
2015-06-25 13:10:32 +00:00
|
|
|
u8 ip_proto = 0;
|
2016-11-10 00:04:46 +00:00
|
|
|
bool ret;
|
2011-11-28 05:22:18 +00:00
|
|
|
|
2014-08-23 19:13:41 +00:00
|
|
|
if (!data) {
|
|
|
|
data = skb->data;
|
2016-08-17 10:36:10 +00:00
|
|
|
proto = skb_vlan_tag_present(skb) ?
|
|
|
|
skb->vlan_proto : skb->protocol;
|
2014-08-26 00:03:47 +00:00
|
|
|
nhoff = skb_network_offset(skb);
|
2014-08-23 19:13:41 +00:00
|
|
|
hlen = skb_headlen(skb);
|
2017-08-10 08:09:03 +00:00
|
|
|
#if IS_ENABLED(CONFIG_NET_DSA)
|
net: dsa: fix flow dissection on Tx path
Commit 43e665287f93 ("net-next: dsa: fix flow dissection") added an
ability to override protocol and network offset during flow dissection
for DSA-enabled devices (i.e. controllers shipped as switch CPU ports)
in order to fix skb hashing for RPS on Rx path.
However, skb_hash() and added part of code can be invoked not only on
Rx, but also on Tx path if we have a multi-queued device and:
- kernel is running on UP system or
- XPS is not configured.
The call stack in this two cases will be like: dev_queue_xmit() ->
__dev_queue_xmit() -> netdev_core_pick_tx() -> netdev_pick_tx() ->
skb_tx_hash() -> skb_get_hash().
The problem is that skbs queued for Tx have both network offset and
correct protocol already set up even after inserting a CPU tag by DSA
tagger, so calling tag_ops->flow_dissect() on this path actually only
breaks flow dissection and hashing.
This can be observed by adding debug prints just before and right after
tag_ops->flow_dissect() call to the related block of code:
Before the patch:
Rx path (RPS):
[ 19.240001] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 19.244271] tag_ops->flow_dissect()
[ 19.247811] Rx: proto: 0x0800, nhoff: 8 /* ETH_P_IP */
[ 19.215435] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 19.219746] tag_ops->flow_dissect()
[ 19.223241] Rx: proto: 0x0806, nhoff: 8 /* ETH_P_ARP */
[ 18.654057] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 18.658332] tag_ops->flow_dissect()
[ 18.661826] Rx: proto: 0x8100, nhoff: 8 /* ETH_P_8021Q */
Tx path (UP system):
[ 18.759560] Tx: proto: 0x0800, nhoff: 26 /* ETH_P_IP */
[ 18.763933] tag_ops->flow_dissect()
[ 18.767485] Tx: proto: 0x920b, nhoff: 34 /* junk */
[ 22.800020] Tx: proto: 0x0806, nhoff: 26 /* ETH_P_ARP */
[ 22.804392] tag_ops->flow_dissect()
[ 22.807921] Tx: proto: 0x920b, nhoff: 34 /* junk */
[ 16.898342] Tx: proto: 0x86dd, nhoff: 26 /* ETH_P_IPV6 */
[ 16.902705] tag_ops->flow_dissect()
[ 16.906227] Tx: proto: 0x920b, nhoff: 34 /* junk */
After:
Rx path (RPS):
[ 16.520993] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 16.525260] tag_ops->flow_dissect()
[ 16.528808] Rx: proto: 0x0800, nhoff: 8 /* ETH_P_IP */
[ 15.484807] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 15.490417] tag_ops->flow_dissect()
[ 15.495223] Rx: proto: 0x0806, nhoff: 8 /* ETH_P_ARP */
[ 17.134621] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 17.138895] tag_ops->flow_dissect()
[ 17.142388] Rx: proto: 0x8100, nhoff: 8 /* ETH_P_8021Q */
Tx path (UP system):
[ 15.499558] Tx: proto: 0x0800, nhoff: 26 /* ETH_P_IP */
[ 20.664689] Tx: proto: 0x0806, nhoff: 26 /* ETH_P_ARP */
[ 18.565782] Tx: proto: 0x86dd, nhoff: 26 /* ETH_P_IPV6 */
In order to fix that we can add the check 'proto == htons(ETH_P_XDSA)'
to prevent code from calling tag_ops->flow_dissect() on Tx.
I also decided to initialize 'offset' variable so tagger callbacks can
now safely leave it untouched without provoking a chaos.
Fixes: 43e665287f93 ("net-next: dsa: fix flow dissection")
Signed-off-by: Alexander Lobakin <alobakin@dlink.ru>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-05 10:02:35 +00:00
|
|
|
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
|
|
|
|
proto == htons(ETH_P_XDSA))) {
|
2022-11-14 12:42:11 +00:00
|
|
|
struct metadata_dst *md_dst = skb_metadata_dst(skb);
|
2017-08-09 12:41:19 +00:00
|
|
|
const struct dsa_device_ops *ops;
|
net: dsa: fix flow dissection on Tx path
Commit 43e665287f93 ("net-next: dsa: fix flow dissection") added an
ability to override protocol and network offset during flow dissection
for DSA-enabled devices (i.e. controllers shipped as switch CPU ports)
in order to fix skb hashing for RPS on Rx path.
However, skb_hash() and added part of code can be invoked not only on
Rx, but also on Tx path if we have a multi-queued device and:
- kernel is running on UP system or
- XPS is not configured.
The call stack in this two cases will be like: dev_queue_xmit() ->
__dev_queue_xmit() -> netdev_core_pick_tx() -> netdev_pick_tx() ->
skb_tx_hash() -> skb_get_hash().
The problem is that skbs queued for Tx have both network offset and
correct protocol already set up even after inserting a CPU tag by DSA
tagger, so calling tag_ops->flow_dissect() on this path actually only
breaks flow dissection and hashing.
This can be observed by adding debug prints just before and right after
tag_ops->flow_dissect() call to the related block of code:
Before the patch:
Rx path (RPS):
[ 19.240001] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 19.244271] tag_ops->flow_dissect()
[ 19.247811] Rx: proto: 0x0800, nhoff: 8 /* ETH_P_IP */
[ 19.215435] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 19.219746] tag_ops->flow_dissect()
[ 19.223241] Rx: proto: 0x0806, nhoff: 8 /* ETH_P_ARP */
[ 18.654057] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 18.658332] tag_ops->flow_dissect()
[ 18.661826] Rx: proto: 0x8100, nhoff: 8 /* ETH_P_8021Q */
Tx path (UP system):
[ 18.759560] Tx: proto: 0x0800, nhoff: 26 /* ETH_P_IP */
[ 18.763933] tag_ops->flow_dissect()
[ 18.767485] Tx: proto: 0x920b, nhoff: 34 /* junk */
[ 22.800020] Tx: proto: 0x0806, nhoff: 26 /* ETH_P_ARP */
[ 22.804392] tag_ops->flow_dissect()
[ 22.807921] Tx: proto: 0x920b, nhoff: 34 /* junk */
[ 16.898342] Tx: proto: 0x86dd, nhoff: 26 /* ETH_P_IPV6 */
[ 16.902705] tag_ops->flow_dissect()
[ 16.906227] Tx: proto: 0x920b, nhoff: 34 /* junk */
After:
Rx path (RPS):
[ 16.520993] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 16.525260] tag_ops->flow_dissect()
[ 16.528808] Rx: proto: 0x0800, nhoff: 8 /* ETH_P_IP */
[ 15.484807] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 15.490417] tag_ops->flow_dissect()
[ 15.495223] Rx: proto: 0x0806, nhoff: 8 /* ETH_P_ARP */
[ 17.134621] Rx: proto: 0x00f8, nhoff: 0 /* ETH_P_XDSA */
[ 17.138895] tag_ops->flow_dissect()
[ 17.142388] Rx: proto: 0x8100, nhoff: 8 /* ETH_P_8021Q */
Tx path (UP system):
[ 15.499558] Tx: proto: 0x0800, nhoff: 26 /* ETH_P_IP */
[ 20.664689] Tx: proto: 0x0806, nhoff: 26 /* ETH_P_ARP */
[ 18.565782] Tx: proto: 0x86dd, nhoff: 26 /* ETH_P_IPV6 */
In order to fix that we can add the check 'proto == htons(ETH_P_XDSA)'
to prevent code from calling tag_ops->flow_dissect() on Tx.
I also decided to initialize 'offset' variable so tagger callbacks can
now safely leave it untouched without provoking a chaos.
Fixes: 43e665287f93 ("net-next: dsa: fix flow dissection")
Signed-off-by: Alexander Lobakin <alobakin@dlink.ru>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-05 10:02:35 +00:00
|
|
|
int offset = 0;
|
2017-08-09 12:41:19 +00:00
|
|
|
|
|
|
|
ops = skb->dev->dsa_ptr->tag_ops;
|
2021-06-14 13:58:19 +00:00
|
|
|
/* Only DSA header taggers break flow dissection */
|
2022-11-14 12:42:11 +00:00
|
|
|
if (ops->needed_headroom &&
|
|
|
|
(!md_dst || md_dst->type != METADATA_HW_PORT_MUX)) {
|
2020-09-26 19:32:08 +00:00
|
|
|
if (ops->flow_dissect)
|
|
|
|
ops->flow_dissect(skb, &proto, &offset);
|
|
|
|
else
|
|
|
|
dsa_tag_generic_flow_dissect(skb,
|
|
|
|
&proto,
|
|
|
|
&offset);
|
2017-08-09 12:41:19 +00:00
|
|
|
hlen -= offset;
|
|
|
|
nhoff += offset;
|
|
|
|
}
|
|
|
|
}
|
2017-08-10 08:09:03 +00:00
|
|
|
#endif
|
2014-08-23 19:13:41 +00:00
|
|
|
}
|
|
|
|
|
2015-06-04 16:16:39 +00:00
|
|
|
/* It is ensured by skb_flow_dissector_init() that control key will
|
|
|
|
* be always present.
|
|
|
|
*/
|
|
|
|
key_control = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
target_container);
|
|
|
|
|
2015-05-12 12:56:16 +00:00
|
|
|
/* It is ensured by skb_flow_dissector_init() that basic key will
|
|
|
|
* be always present.
|
|
|
|
*/
|
|
|
|
key_basic = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
target_container);
|
2011-11-28 05:22:18 +00:00
|
|
|
|
2018-09-24 20:49:57 +00:00
|
|
|
if (skb) {
|
2019-04-22 15:55:46 +00:00
|
|
|
if (!net) {
|
|
|
|
if (skb->dev)
|
|
|
|
net = dev_net(skb->dev);
|
|
|
|
else if (skb->sk)
|
|
|
|
net = sock_net(skb->sk);
|
|
|
|
}
|
2019-04-22 15:55:47 +00:00
|
|
|
}
|
2019-01-28 16:53:53 +00:00
|
|
|
|
2019-04-22 15:55:47 +00:00
|
|
|
WARN_ON_ONCE(!net);
|
|
|
|
if (net) {
|
2020-05-31 08:28:36 +00:00
|
|
|
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
|
2020-06-25 14:13:55 +00:00
|
|
|
struct bpf_prog_array *run_array;
|
2020-05-31 08:28:36 +00:00
|
|
|
|
2019-04-22 15:55:47 +00:00
|
|
|
rcu_read_lock();
|
2020-06-25 14:13:55 +00:00
|
|
|
run_array = rcu_dereference(init_net.bpf.run_array[type]);
|
|
|
|
if (!run_array)
|
|
|
|
run_array = rcu_dereference(net->bpf.run_array[type]);
|
2019-10-07 16:21:02 +00:00
|
|
|
|
2020-06-25 14:13:55 +00:00
|
|
|
if (run_array) {
|
2019-04-22 15:55:47 +00:00
|
|
|
struct bpf_flow_keys flow_keys;
|
|
|
|
struct bpf_flow_dissector ctx = {
|
|
|
|
.flow_keys = &flow_keys,
|
|
|
|
.data = data,
|
|
|
|
.data_end = data + hlen,
|
|
|
|
};
|
|
|
|
__be16 n_proto = proto;
|
2020-06-25 14:13:55 +00:00
|
|
|
struct bpf_prog *prog;
|
2022-08-21 11:35:16 +00:00
|
|
|
u32 result;
|
2019-04-22 15:55:47 +00:00
|
|
|
|
|
|
|
if (skb) {
|
|
|
|
ctx.skb = skb;
|
|
|
|
/* we can't use 'proto' in the skb case
|
|
|
|
* because it might be set to skb->vlan_proto
|
|
|
|
* which has been pulled from the data
|
|
|
|
*/
|
|
|
|
n_proto = skb->protocol;
|
|
|
|
}
|
|
|
|
|
2020-06-25 14:13:55 +00:00
|
|
|
prog = READ_ONCE(run_array->items[0].prog);
|
2022-08-21 11:35:16 +00:00
|
|
|
result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
|
|
|
|
hlen, flags);
|
2022-08-21 11:35:17 +00:00
|
|
|
if (result == BPF_FLOW_DISSECTOR_CONTINUE)
|
|
|
|
goto dissect_continue;
|
2019-01-28 16:53:53 +00:00
|
|
|
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
|
|
|
|
target_container);
|
|
|
|
rcu_read_unlock();
|
2022-08-21 11:35:16 +00:00
|
|
|
return result == BPF_OK;
|
2019-01-28 16:53:53 +00:00
|
|
|
}
|
2022-08-21 11:35:17 +00:00
|
|
|
dissect_continue:
|
2018-09-14 14:46:18 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2015-09-02 04:19:17 +00:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
|
2015-05-12 12:56:19 +00:00
|
|
|
struct ethhdr *eth = eth_hdr(skb);
|
|
|
|
struct flow_dissector_key_eth_addrs *key_eth_addrs;
|
|
|
|
|
|
|
|
key_eth_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ETH_ADDRS,
|
|
|
|
target_container);
|
2022-04-06 21:15:21 +00:00
|
|
|
memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs));
|
2015-05-12 12:56:19 +00:00
|
|
|
}
|
|
|
|
|
2022-04-19 08:14:32 +00:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) {
|
|
|
|
struct flow_dissector_key_num_of_vlans *key_num_of_vlans;
|
|
|
|
|
|
|
|
key_num_of_vlans = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_NUM_OF_VLANS,
|
|
|
|
target_container);
|
|
|
|
key_num_of_vlans->num_of_vlans = 0;
|
|
|
|
}
|
|
|
|
|
2017-03-06 15:39:54 +00:00
|
|
|
proto_again:
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_CONTINUE;
|
|
|
|
|
2011-11-28 05:22:18 +00:00
|
|
|
switch (proto) {
|
2014-03-12 17:04:17 +00:00
|
|
|
case htons(ETH_P_IP): {
|
2011-11-28 05:22:18 +00:00
|
|
|
const struct iphdr *iph;
|
|
|
|
struct iphdr _iph;
|
2017-09-01 21:04:11 +00:00
|
|
|
|
2014-08-23 19:13:41 +00:00
|
|
|
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
|
2017-09-01 21:04:11 +00:00
|
|
|
if (!iph || iph->ihl < 5) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-11-07 16:37:28 +00:00
|
|
|
nhoff += iph->ihl * 4;
|
2011-11-28 05:22:18 +00:00
|
|
|
|
2013-11-07 16:37:28 +00:00
|
|
|
ip_proto = iph->protocol;
|
|
|
|
|
2016-02-24 17:29:38 +00:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
|
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
|
|
target_container);
|
|
|
|
|
flow_dissector: Fix out-of-bounds warnings
Fix the following out-of-bounds warnings:
net/core/flow_dissector.c: In function '__skb_flow_dissect':
>> net/core/flow_dissector.c:1104:4: warning: 'memcpy' offset [24, 39] from the object at '<unknown>' is out of the bounds of referenced subobject 'saddr' with type 'struct in6_addr' at offset 8 [-Warray-bounds]
1104 | memcpy(&key_addrs->v6addrs, &iph->saddr,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1105 | sizeof(key_addrs->v6addrs));
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from include/linux/ipv6.h:5,
from net/core/flow_dissector.c:6:
include/uapi/linux/ipv6.h:133:18: note: subobject 'saddr' declared here
133 | struct in6_addr saddr;
| ^~~~~
>> net/core/flow_dissector.c:1059:4: warning: 'memcpy' offset [16, 19] from the object at '<unknown>' is out of the bounds of referenced subobject 'saddr' with type 'unsigned int' at offset 12 [-Warray-bounds]
1059 | memcpy(&key_addrs->v4addrs, &iph->saddr,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1060 | sizeof(key_addrs->v4addrs));
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from include/linux/ip.h:17,
from net/core/flow_dissector.c:5:
include/uapi/linux/ip.h:103:9: note: subobject 'saddr' declared here
103 | __be32 saddr;
| ^~~~~
The problem is that the original code is trying to copy data into a
couple of struct members adjacent to each other in a single call to
memcpy(). So, the compiler legitimately complains about it. As these
are just a couple of members, fix this by copying each one of them in
separate calls to memcpy().
This helps with the ongoing efforts to globally enable -Warray-bounds
and get us closer to being able to tighten the FORTIFY_SOURCE routines
on memcpy().
Link: https://github.com/KSPP/linux/issues/109
Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/lkml/d5ae2e65-1f18-2577-246f-bada7eee6ccd@intel.com/
Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-26 19:25:11 +00:00
|
|
|
memcpy(&key_addrs->v4addrs.src, &iph->saddr,
|
|
|
|
sizeof(key_addrs->v4addrs.src));
|
|
|
|
memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
|
|
|
|
sizeof(key_addrs->v4addrs.dst));
|
2016-02-24 17:29:38 +00:00
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
|
|
|
}
|
2015-09-01 16:24:28 +00:00
|
|
|
|
2021-02-12 15:12:25 +00:00
|
|
|
__skb_flow_dissect_ipv4(skb, flow_dissector,
|
|
|
|
target_container, data, iph);
|
|
|
|
|
2015-09-01 16:24:28 +00:00
|
|
|
if (ip_is_fragment(iph)) {
|
2015-09-01 23:46:08 +00:00
|
|
|
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
|
2015-09-01 16:24:28 +00:00
|
|
|
|
|
|
|
if (iph->frag_off & htons(IP_OFFSET)) {
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
2015-09-01 16:24:28 +00:00
|
|
|
} else {
|
2015-09-01 23:46:08 +00:00
|
|
|
key_control->flags |= FLOW_DIS_FIRST_FRAG;
|
2017-09-01 21:04:11 +00:00
|
|
|
if (!(flags &
|
|
|
|
FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
2015-09-01 16:24:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-28 05:22:18 +00:00
|
|
|
break;
|
|
|
|
}
|
2014-03-12 17:04:17 +00:00
|
|
|
case htons(ETH_P_IPV6): {
|
2011-11-28 05:22:18 +00:00
|
|
|
const struct ipv6hdr *iph;
|
|
|
|
struct ipv6hdr _iph;
|
2014-07-02 04:33:01 +00:00
|
|
|
|
2014-08-23 19:13:41 +00:00
|
|
|
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
|
2017-09-01 21:04:11 +00:00
|
|
|
if (!iph) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2011-11-28 05:22:18 +00:00
|
|
|
|
|
|
|
ip_proto = iph->nexthdr;
|
|
|
|
nhoff += sizeof(struct ipv6hdr);
|
2014-07-02 04:33:01 +00:00
|
|
|
|
2015-09-02 04:19:17 +00:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
|
2016-02-24 17:29:57 +00:00
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
target_container);
|
2014-10-10 19:09:12 +00:00
|
|
|
|
flow_dissector: Fix out-of-bounds warnings
Fix the following out-of-bounds warnings:
net/core/flow_dissector.c: In function '__skb_flow_dissect':
>> net/core/flow_dissector.c:1104:4: warning: 'memcpy' offset [24, 39] from the object at '<unknown>' is out of the bounds of referenced subobject 'saddr' with type 'struct in6_addr' at offset 8 [-Warray-bounds]
1104 | memcpy(&key_addrs->v6addrs, &iph->saddr,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1105 | sizeof(key_addrs->v6addrs));
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from include/linux/ipv6.h:5,
from net/core/flow_dissector.c:6:
include/uapi/linux/ipv6.h:133:18: note: subobject 'saddr' declared here
133 | struct in6_addr saddr;
| ^~~~~
>> net/core/flow_dissector.c:1059:4: warning: 'memcpy' offset [16, 19] from the object at '<unknown>' is out of the bounds of referenced subobject 'saddr' with type 'unsigned int' at offset 12 [-Warray-bounds]
1059 | memcpy(&key_addrs->v4addrs, &iph->saddr,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1060 | sizeof(key_addrs->v4addrs));
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from include/linux/ip.h:17,
from net/core/flow_dissector.c:5:
include/uapi/linux/ip.h:103:9: note: subobject 'saddr' declared here
103 | __be32 saddr;
| ^~~~~
The problem is that the original code is trying to copy data into a
couple of struct members adjacent to each other in a single call to
memcpy(). So, the compiler legitimately complains about it. As these
are just a couple of members, fix this by copying each one of them in
separate calls to memcpy().
This helps with the ongoing efforts to globally enable -Warray-bounds
and get us closer to being able to tighten the FORTIFY_SOURCE routines
on memcpy().
Link: https://github.com/KSPP/linux/issues/109
Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/lkml/d5ae2e65-1f18-2577-246f-bada7eee6ccd@intel.com/
Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-26 19:25:11 +00:00
|
|
|
memcpy(&key_addrs->v6addrs.src, &iph->saddr,
|
|
|
|
sizeof(key_addrs->v6addrs.src));
|
|
|
|
memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
|
|
|
|
sizeof(key_addrs->v6addrs.dst));
|
2015-06-04 16:16:40 +00:00
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
2015-05-12 12:56:18 +00:00
|
|
|
}
|
2015-06-04 16:16:44 +00:00
|
|
|
|
2016-02-09 10:49:54 +00:00
|
|
|
if ((dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
|
|
|
|
(flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
|
|
|
|
ip6_flowlabel(iph)) {
|
|
|
|
__be32 flow_label = ip6_flowlabel(iph);
|
|
|
|
|
2015-09-02 04:19:17 +00:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
|
2015-06-04 16:16:44 +00:00
|
|
|
key_tags = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_FLOW_LABEL,
|
|
|
|
target_container);
|
|
|
|
key_tags->flow_label = ntohl(flow_label);
|
2015-05-22 09:05:58 +00:00
|
|
|
}
|
2017-09-01 21:04:11 +00:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
2014-07-02 04:33:01 +00:00
|
|
|
}
|
|
|
|
|
2017-06-01 18:37:37 +00:00
|
|
|
__skb_flow_dissect_ipv6(skb, flow_dissector,
|
|
|
|
target_container, data, iph);
|
|
|
|
|
2011-11-28 05:22:18 +00:00
|
|
|
break;
|
|
|
|
}
|
2014-03-12 17:04:17 +00:00
|
|
|
case htons(ETH_P_8021AD):
|
|
|
|
case htons(ETH_P_8021Q): {
|
2018-07-06 05:38:14 +00:00
|
|
|
const struct vlan_hdr *vlan = NULL;
|
2016-10-24 21:40:30 +00:00
|
|
|
struct vlan_hdr _vlan;
|
2018-07-06 05:38:12 +00:00
|
|
|
__be16 saved_vlan_tpid = proto;
|
2011-11-28 05:22:18 +00:00
|
|
|
|
2018-07-06 05:38:14 +00:00
|
|
|
if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
|
|
|
|
skb && skb_vlan_tag_present(skb)) {
|
2016-08-17 10:36:10 +00:00
|
|
|
proto = skb->protocol;
|
2018-07-06 05:38:14 +00:00
|
|
|
} else {
|
2016-08-17 10:36:10 +00:00
|
|
|
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
|
|
|
|
data, hlen, &_vlan);
|
2017-09-01 21:04:11 +00:00
|
|
|
if (!vlan) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-08-17 10:36:10 +00:00
|
|
|
proto = vlan->h_vlan_encapsulated_proto;
|
|
|
|
nhoff += sizeof(*vlan);
|
|
|
|
}
|
2011-11-28 05:22:18 +00:00
|
|
|
|
flow_dissector: Do not count vlan tags inside tunnel payload
We've met the problem that when there is a vlan tag inside
GRE encapsulation, the match of num_of_vlans fails.
It is caused by the vlan tag inside GRE payload has been
counted into num_of_vlans, which is not expected.
One example packet is like this:
Ethernet II, Src: Broadcom_68:56:07 (00:10:18:68:56:07)
Dst: Broadcom_68:56:08 (00:10:18:68:56:08)
802.1Q Virtual LAN, PRI: 0, DEI: 0, ID: 100
Internet Protocol Version 4, Src: 192.168.1.4, Dst: 192.168.1.200
Generic Routing Encapsulation (Transparent Ethernet bridging)
Ethernet II, Src: Broadcom_68:58:07 (00:10:18:68:58:07)
Dst: Broadcom_68:58:08 (00:10:18:68:58:08)
802.1Q Virtual LAN, PRI: 0, DEI: 0, ID: 200
...
It should match the (num_of_vlans 1) rule, but it matches
the (num_of_vlans 2) rule.
The vlan tags inside the GRE or other tunnel encapsulated payload
should not be taken into num_of_vlans.
The fix is to stop counting the vlan number when the encapsulation
bit is set.
Fixes: 34951fcf26c5 ("flow_dissector: Add number of vlan tags dissector")
Signed-off-by: Qingqing Yang <qingqing.yang@broadcom.com>
Reviewed-by: Boris Sukholitko <boris.sukholitko@broadcom.com>
Link: https://lore.kernel.org/r/20220919074808.136640-1-qingqing.yang@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-09-19 07:48:08 +00:00
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_NUM_OF_VLANS) &&
|
|
|
|
!(key_control->flags & FLOW_DIS_ENCAPSULATION)) {
|
2022-04-19 08:14:32 +00:00
|
|
|
struct flow_dissector_key_num_of_vlans *key_nvs;
|
|
|
|
|
|
|
|
key_nvs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_NUM_OF_VLANS,
|
|
|
|
target_container);
|
|
|
|
key_nvs->num_of_vlans++;
|
|
|
|
}
|
|
|
|
|
2018-07-06 05:38:14 +00:00
|
|
|
if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
|
|
|
|
dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
|
|
|
|
} else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
|
|
|
|
dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
|
|
|
|
} else {
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, dissector_vlan)) {
|
2016-08-17 10:36:11 +00:00
|
|
|
key_vlan = skb_flow_dissector_target(flow_dissector,
|
2018-07-06 05:38:14 +00:00
|
|
|
dissector_vlan,
|
2015-06-04 16:16:43 +00:00
|
|
|
target_container);
|
|
|
|
|
2018-07-06 05:38:14 +00:00
|
|
|
if (!vlan) {
|
2016-08-17 10:36:11 +00:00
|
|
|
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
|
2018-11-07 17:07:03 +00:00
|
|
|
key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
|
2016-08-17 10:36:11 +00:00
|
|
|
} else {
|
|
|
|
key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
|
2016-08-17 10:36:10 +00:00
|
|
|
VLAN_VID_MASK;
|
2016-08-17 10:36:11 +00:00
|
|
|
key_vlan->vlan_priority =
|
|
|
|
(ntohs(vlan->h_vlan_TCI) &
|
|
|
|
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
|
|
|
}
|
2018-07-06 05:38:12 +00:00
|
|
|
key_vlan->vlan_tpid = saved_vlan_tpid;
|
2022-04-06 11:22:41 +00:00
|
|
|
key_vlan->vlan_eth_type = proto;
|
2015-06-04 16:16:43 +00:00
|
|
|
}
|
|
|
|
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
2011-11-28 05:22:18 +00:00
|
|
|
}
|
2014-03-12 17:04:17 +00:00
|
|
|
case htons(ETH_P_PPP_SES): {
|
2011-11-28 05:22:18 +00:00
|
|
|
struct {
|
|
|
|
struct pppoe_hdr hdr;
|
|
|
|
__be16 proto;
|
|
|
|
} *hdr, _hdr;
|
2022-07-18 12:18:10 +00:00
|
|
|
u16 ppp_proto;
|
|
|
|
|
2014-08-23 19:13:41 +00:00
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
|
2017-09-01 21:04:11 +00:00
|
|
|
if (!hdr) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Networking changes for 6.0.
Core
----
- Refactor the forward memory allocation to better cope with memory
pressure with many open sockets, moving from a per socket cache to
a per-CPU one
- Replace rwlocks with RCU for better fairness in ping, raw sockets
and IP multicast router.
- Network-side support for IO uring zero-copy send.
- A few skb drop reason improvements, including codegen the source file
with string mapping instead of using macro magic.
- Rename reference tracking helpers to a more consistent
netdev_* schema.
- Adapt u64_stats_t type to address load/store tearing issues.
- Refine debug helper usage to reduce the log noise caused by bots.
BPF
---
- Improve socket map performance, avoiding skb cloning on read
operation.
- Add support for 64 bits enum, to match types exposed by kernel.
- Introduce support for sleepable uprobes program.
- Introduce support for enum textual representation in libbpf.
- New helpers to implement synproxy with eBPF/XDP.
- Improve loop performances, inlining indirect calls when
possible.
- Removed all the deprecated libbpf APIs.
- Implement new eBPF-based LSM flavor.
- Add type match support, which allow accurate queries to the
eBPF used types.
- A few TCP congetsion control framework usability improvements.
- Add new infrastructure to manipulate CT entries via eBPF programs.
- Allow for livepatch (KLP) and BPF trampolines to attach to the same
kernel function.
Protocols
---------
- Introduce per network namespace lookup tables for unix sockets,
increasing scalability and reducing contention.
- Preparation work for Wi-Fi 7 Multi-Link Operation (MLO) support.
- Add support to forciby close TIME_WAIT TCP sockets via user-space
tools.
- Significant performance improvement for the TLS 1.3 receive path,
both for zero-copy and not-zero-copy.
- Support for changing the initial MTPCP subflow priority/backup
status
- Introduce virtually contingus buffers for sockets over RDMA,
to cope better with memory pressure.
- Extend CAN ethtool support with timestamping capabilities
- Refactor CAN build infrastructure to allow building only the needed
features.
Driver API
----------
- Remove devlink mutex to allow parallel commands on multiple links.
- Add support for pause stats in distributed switch.
- Implement devlink helpers to query and flash line cards.
- New helper for phy mode to register conversion.
New hardware / drivers
----------------------
- Ethernet DSA driver for the rockchip mt7531 on BPI-R2 Pro.
- Ethernet DSA driver for the Renesas RZ/N1 A5PSW switch.
- Ethernet DSA driver for the Microchip LAN937x switch.
- Ethernet PHY driver for the Aquantia AQR113C EPHY.
- CAN driver for the OBD-II ELM327 interface.
- CAN driver for RZ/N1 SJA1000 CAN controller.
- Bluetooth: Infineon CYW55572 Wi-Fi plus Bluetooth combo device.
Drivers
-------
- Intel Ethernet NICs:
- i40e: add support for vlan pruning
- i40e: add support for XDP framented packets
- ice: improved vlan offload support
- ice: add support for PPPoE offload
- Mellanox Ethernet (mlx5)
- refactor packet steering offload for performance and scalability
- extend support for TC offload
- refactor devlink code to clean-up the locking schema
- support stacked vlans for bridge offloads
- use TLS objects pool to improve connection rate
- Netronome Ethernet NICs (nfp):
- extend support for IPv6 fields mangling offload
- add support for vepa mode in HW bridge
- better support for virtio data path acceleration (VDPA)
- enable TSO by default
- Microsoft vNIC driver (mana)
- add support for XDP redirect
- Others Ethernet drivers:
- bonding: add per-port priority support
- microchip lan743x: extend phy support
- Fungible funeth: support UDP segmentation offload and XDP xmit
- Solarflare EF100: add support for virtual function representors
- MediaTek SoC: add XDP support
- Mellanox Ethernet/IB switch (mlxsw):
- dropped support for unreleased H/W (XM router).
- improved stats accuracy
- unified bridge model coversion improving scalability
(parts 1-6)
- support for PTP in Spectrum-2 asics
- Broadcom PHYs
- add PTP support for BCM54210E
- add support for the BCM53128 internal PHY
- Marvell Ethernet switches (prestera):
- implement support for multicast forwarding offload
- Embedded Ethernet switches:
- refactor OcteonTx MAC filter for better scalability
- improve TC H/W offload for the Felix driver
- refactor the Microchip ksz8 and ksz9477 drivers to share
the probe code (parts 1, 2), add support for phylink
mac configuration
- Other WiFi:
- Microchip wilc1000: diable WEP support and enable WPA3
- Atheros ath10k: encapsulation offload support
Old code removal:
- Neterion vxge ethernet driver: this is untouched since more than
10 years.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-----BEGIN PGP SIGNATURE-----
iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmLqN+oSHHBhYmVuaUBy
ZWRoYXQuY29tAAoJECkkeY3MjxOkB9kQAI9VqW0c3SfiTJnkVBEIovZ6Tnh5stD2
UYFkh1BdchLsYxi7W4XMpVPSzRztiTP87mIx5c/KvIzj+QNeWL1XWRJSPdI9HhTD
pTAA/tM2OG7bqrbyQiKDNfpQdNl7+kk1RwnYd+f9RFl1QVuIJaYhmjVwrsN5xF/+
jUsotpROarM2dGFWiFwJbKhP2zMDT+6qEEahM8pEPggKhv8wRLYjany2cZVEe4e0
WGUpbINAS8gEKm0Ob922WaDfDrcK/N1Z0jNz/kMaENkK18Vvc7F6bCO0DzAawKX9
QZMMwm6mHp3EThflJAMAzCGIYiIcwLhykgdyj8rrjPhFrWbMD2Sdsbo21HOXU/8j
u4aAhVl+d+h7emmbgBoJ8sycVJ7BQlXz7lX20sTgADv9xI4/dPhQ17CMRuwX6fXX
JSrn6P6e1LTV5CEg6vrlSPnKPY6uhFn/cPw47FxCjRwJ9phVnp+8uZWQmf9Pz3yf
Ok/tcj+juFbsmuOshHy2cbRkuNZNS0oRWlSTBo5795ZwOLSakMonR3L+ev2aOvzz
DVrFp2Y/iIVwMSFdCbouYdYnhArPRhOAtCmZc2afY8aBN7aaMgrdTy3+mzUoHy3I
FG3K+VuKpfi0vY4zn6ZoLZDIpyXIoJJ93RcSGltD32t3Dp1RaQMVEI4s45k05PVm
1nYpXKHA8qML
=hxEG
-----END PGP SIGNATURE-----
Merge tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking changes from Paolo Abeni:
"Core:
- Refactor the forward memory allocation to better cope with memory
pressure with many open sockets, moving from a per socket cache to
a per-CPU one
- Replace rwlocks with RCU for better fairness in ping, raw sockets
and IP multicast router.
- Network-side support for IO uring zero-copy send.
- A few skb drop reason improvements, including codegen the source
file with string mapping instead of using macro magic.
- Rename reference tracking helpers to a more consistent netdev_*
schema.
- Adapt u64_stats_t type to address load/store tearing issues.
- Refine debug helper usage to reduce the log noise caused by bots.
BPF:
- Improve socket map performance, avoiding skb cloning on read
operation.
- Add support for 64 bits enum, to match types exposed by kernel.
- Introduce support for sleepable uprobes program.
- Introduce support for enum textual representation in libbpf.
- New helpers to implement synproxy with eBPF/XDP.
- Improve loop performances, inlining indirect calls when possible.
- Removed all the deprecated libbpf APIs.
- Implement new eBPF-based LSM flavor.
- Add type match support, which allow accurate queries to the eBPF
used types.
- A few TCP congetsion control framework usability improvements.
- Add new infrastructure to manipulate CT entries via eBPF programs.
- Allow for livepatch (KLP) and BPF trampolines to attach to the same
kernel function.
Protocols:
- Introduce per network namespace lookup tables for unix sockets,
increasing scalability and reducing contention.
- Preparation work for Wi-Fi 7 Multi-Link Operation (MLO) support.
- Add support to forciby close TIME_WAIT TCP sockets via user-space
tools.
- Significant performance improvement for the TLS 1.3 receive path,
both for zero-copy and not-zero-copy.
- Support for changing the initial MTPCP subflow priority/backup
status
- Introduce virtually contingus buffers for sockets over RDMA, to
cope better with memory pressure.
- Extend CAN ethtool support with timestamping capabilities
- Refactor CAN build infrastructure to allow building only the needed
features.
Driver API:
- Remove devlink mutex to allow parallel commands on multiple links.
- Add support for pause stats in distributed switch.
- Implement devlink helpers to query and flash line cards.
- New helper for phy mode to register conversion.
New hardware / drivers:
- Ethernet DSA driver for the rockchip mt7531 on BPI-R2 Pro.
- Ethernet DSA driver for the Renesas RZ/N1 A5PSW switch.
- Ethernet DSA driver for the Microchip LAN937x switch.
- Ethernet PHY driver for the Aquantia AQR113C EPHY.
- CAN driver for the OBD-II ELM327 interface.
- CAN driver for RZ/N1 SJA1000 CAN controller.
- Bluetooth: Infineon CYW55572 Wi-Fi plus Bluetooth combo device.
Drivers:
- Intel Ethernet NICs:
- i40e: add support for vlan pruning
- i40e: add support for XDP framented packets
- ice: improved vlan offload support
- ice: add support for PPPoE offload
- Mellanox Ethernet (mlx5)
- refactor packet steering offload for performance and scalability
- extend support for TC offload
- refactor devlink code to clean-up the locking schema
- support stacked vlans for bridge offloads
- use TLS objects pool to improve connection rate
- Netronome Ethernet NICs (nfp):
- extend support for IPv6 fields mangling offload
- add support for vepa mode in HW bridge
- better support for virtio data path acceleration (VDPA)
- enable TSO by default
- Microsoft vNIC driver (mana)
- add support for XDP redirect
- Others Ethernet drivers:
- bonding: add per-port priority support
- microchip lan743x: extend phy support
- Fungible funeth: support UDP segmentation offload and XDP xmit
- Solarflare EF100: add support for virtual function representors
- MediaTek SoC: add XDP support
- Mellanox Ethernet/IB switch (mlxsw):
- dropped support for unreleased H/W (XM router).
- improved stats accuracy
- unified bridge model coversion improving scalability (parts 1-6)
- support for PTP in Spectrum-2 asics
- Broadcom PHYs
- add PTP support for BCM54210E
- add support for the BCM53128 internal PHY
- Marvell Ethernet switches (prestera):
- implement support for multicast forwarding offload
- Embedded Ethernet switches:
- refactor OcteonTx MAC filter for better scalability
- improve TC H/W offload for the Felix driver
- refactor the Microchip ksz8 and ksz9477 drivers to share the
probe code (parts 1, 2), add support for phylink mac
configuration
- Other WiFi:
- Microchip wilc1000: diable WEP support and enable WPA3
- Atheros ath10k: encapsulation offload support
Old code removal:
- Neterion vxge ethernet driver: this is untouched since more than 10 years"
* tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1890 commits)
doc: sfp-phylink: Fix a broken reference
wireguard: selftests: support UML
wireguard: allowedips: don't corrupt stack when detecting overflow
wireguard: selftests: update config fragments
wireguard: ratelimiter: use hrtimer in selftest
net/mlx5e: xsk: Discard unaligned XSK frames on striding RQ
net: usb: ax88179_178a: Bind only to vendor-specific interface
selftests: net: fix IOAM test skip return code
net: usb: make USB_RTL8153_ECM non user configurable
net: marvell: prestera: remove reduntant code
octeontx2-pf: Reduce minimum mtu size to 60
net: devlink: Fix missing mutex_unlock() call
net/tls: Remove redundant workqueue flush before destroy
net: txgbe: Fix an error handling path in txgbe_probe()
net: dsa: Fix spelling mistakes and cleanup code
Documentation: devlink: add add devlink-selftests to the table of contents
dccp: put dccp_qpolicy_full() and dccp_qpolicy_push() in the same lock
net: ionic: fix error check for vlan flags in ionic_set_nic_features()
net: ice: fix error NETIF_F_HW_VLAN_CTAG_FILTER check in ice_vsi_sync_fltr()
nfp: flower: add support for tunnel offload without key ID
...
2022-08-03 23:29:08 +00:00
|
|
|
if (!is_pppoe_ses_hdr_valid(&hdr->hdr)) {
|
2022-07-18 12:18:10 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* least significant bit of the most significant octet
|
|
|
|
* indicates if protocol field was compressed
|
|
|
|
*/
|
|
|
|
ppp_proto = ntohs(hdr->proto);
|
|
|
|
if (ppp_proto & 0x0100) {
|
|
|
|
ppp_proto = ppp_proto >> 8;
|
|
|
|
nhoff += PPPOE_SES_HLEN - 1;
|
|
|
|
} else {
|
|
|
|
nhoff += PPPOE_SES_HLEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ppp_proto == PPP_IP) {
|
2017-09-01 21:04:11 +00:00
|
|
|
proto = htons(ETH_P_IP);
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
2022-07-18 12:18:10 +00:00
|
|
|
} else if (ppp_proto == PPP_IPV6) {
|
2017-09-01 21:04:11 +00:00
|
|
|
proto = htons(ETH_P_IPV6);
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
2022-07-18 12:18:10 +00:00
|
|
|
} else if (ppp_proto == PPP_MPLS_UC) {
|
|
|
|
proto = htons(ETH_P_MPLS_UC);
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
} else if (ppp_proto == PPP_MPLS_MC) {
|
|
|
|
proto = htons(ETH_P_MPLS_MC);
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
} else if (ppp_proto_is_valid(ppp_proto)) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
} else {
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
2011-11-28 05:22:18 +00:00
|
|
|
}
|
2022-07-18 12:18:10 +00:00
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_PPPOE)) {
|
|
|
|
struct flow_dissector_key_pppoe *key_pppoe;
|
|
|
|
|
|
|
|
key_pppoe = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_PPPOE,
|
|
|
|
target_container);
|
|
|
|
key_pppoe->session_id = hdr->hdr.sid;
|
|
|
|
key_pppoe->ppp_proto = htons(ppp_proto);
|
|
|
|
key_pppoe->type = htons(ETH_P_PPP_SES);
|
|
|
|
}
|
2017-09-01 21:04:11 +00:00
|
|
|
break;
|
2011-11-28 05:22:18 +00:00
|
|
|
}
|
2015-01-22 16:10:32 +00:00
|
|
|
case htons(ETH_P_TIPC): {
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 08:59:26 +00:00
|
|
|
struct tipc_basic_hdr *hdr, _hdr;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
|
|
|
|
data, hlen, &_hdr);
|
2017-09-01 21:04:11 +00:00
|
|
|
if (!hdr) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2015-05-12 12:56:16 +00:00
|
|
|
|
2015-09-02 04:19:17 +00:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 08:59:26 +00:00
|
|
|
FLOW_DISSECTOR_KEY_TIPC)) {
|
2015-05-12 12:56:16 +00:00
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 08:59:26 +00:00
|
|
|
FLOW_DISSECTOR_KEY_TIPC,
|
2015-05-12 12:56:16 +00:00
|
|
|
target_container);
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 08:59:26 +00:00
|
|
|
key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
|
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
|
2015-05-12 12:56:16 +00:00
|
|
|
}
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
2015-01-22 16:10:32 +00:00
|
|
|
}
|
2015-06-04 16:16:46 +00:00
|
|
|
|
|
|
|
case htons(ETH_P_MPLS_UC):
|
2017-03-06 15:39:52 +00:00
|
|
|
case htons(ETH_P_MPLS_MC):
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
|
2017-03-06 15:39:52 +00:00
|
|
|
target_container, data,
|
flow_dissector: Parse multiple MPLS Label Stack Entries
The current MPLS dissector only parses the first MPLS Label Stack
Entry (second LSE can be parsed too, but only to set a key_id).
This patch adds the possibility to parse several LSEs by making
__skb_flow_dissect_mpls() return FLOW_DISSECT_RET_PROTO_AGAIN as long
as the Bottom Of Stack bit hasn't been seen, up to a maximum of
FLOW_DIS_MPLS_MAX entries.
FLOW_DIS_MPLS_MAX is arbitrarily set to 7. This should be enough for
many practical purposes, without wasting too much space.
To record the parsed values, flow_dissector_key_mpls is modified to
store an array of stack entries, instead of just the values of the
first one. A bit field, "used_lses", is also added to keep track of
the LSEs that have been set. The objective is to avoid defining a
new FLOW_DISSECTOR_KEY_MPLS_XX for each level of the MPLS stack.
TC flower is adapted for the new struct flow_dissector_key_mpls layout.
Matching on several MPLS Label Stack Entries will be added in the next
patch.
The NFP and MLX5 drivers are also adapted: nfp_flower_compile_mac() and
mlx5's parse_tunnel() now verify that the rule only uses the first LSE
and fail if it doesn't.
Finally, the behaviour of the FLOW_DISSECTOR_KEY_MPLS_ENTROPY key is
slightly modified. Instead of recording the first Entropy Label, it
now records the last one. This shouldn't have any consequences since
there doesn't seem to have any user of FLOW_DISSECTOR_KEY_MPLS_ENTROPY
in the tree. We'd probably better do a hash of all parsed MPLS labels
instead (excluding reserved labels) anyway. That'd give better entropy
and would probably also simplify the code. But that's not the purpose
of this patch, so I'm keeping that as a future possible improvement.
Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-26 12:29:00 +00:00
|
|
|
nhoff, hlen, mpls_lse,
|
|
|
|
&mpls_el);
|
|
|
|
nhoff += sizeof(struct mpls_label);
|
|
|
|
mpls_lse++;
|
2017-09-01 21:04:11 +00:00
|
|
|
break;
|
2014-09-05 23:20:26 +00:00
|
|
|
case htons(ETH_P_FCOE):
|
2017-09-01 21:04:11 +00:00
|
|
|
if ((hlen - nhoff) < FCOE_HEADER_LEN) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2016-02-24 17:29:51 +00:00
|
|
|
|
|
|
|
nhoff += FCOE_HEADER_LEN;
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
2017-01-11 13:05:42 +00:00
|
|
|
|
|
|
|
case htons(ETH_P_ARP):
|
2017-03-06 15:39:51 +00:00
|
|
|
case htons(ETH_P_RARP):
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = __skb_flow_dissect_arp(skb, flow_dissector,
|
2017-03-06 15:39:51 +00:00
|
|
|
target_container, data,
|
2017-09-01 21:04:11 +00:00
|
|
|
nhoff, hlen);
|
|
|
|
break;
|
|
|
|
|
2017-12-21 09:17:42 +00:00
|
|
|
case htons(ETH_P_BATMAN):
|
|
|
|
fdret = __skb_flow_dissect_batadv(skb, key_control, data,
|
|
|
|
&proto, &nhoff, hlen, flags);
|
|
|
|
break;
|
|
|
|
|
2021-01-12 19:07:13 +00:00
|
|
|
case htons(ETH_P_1588): {
|
|
|
|
struct ptp_header *hdr, _hdr;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
|
|
|
|
hlen, &_hdr);
|
|
|
|
if (!hdr) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-09-13 06:39:05 +00:00
|
|
|
nhoff += sizeof(struct ptp_header);
|
2021-01-12 19:07:13 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-03-10 07:35:05 +00:00
|
|
|
case htons(ETH_P_PRP):
|
2022-02-28 19:58:56 +00:00
|
|
|
case htons(ETH_P_HSR): {
|
|
|
|
struct hsr_tag *hdr, _hdr;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen,
|
|
|
|
&_hdr);
|
|
|
|
if (!hdr) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
proto = hdr->encap_proto;
|
|
|
|
nhoff += HSR_HLEN;
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-06-08 10:56:46 +00:00
|
|
|
case htons(ETH_P_CFM):
|
|
|
|
fdret = __skb_flow_dissect_cfm(skb, flow_dissector,
|
|
|
|
target_container, data,
|
|
|
|
nhoff, hlen);
|
|
|
|
break;
|
|
|
|
|
2017-09-01 21:04:11 +00:00
|
|
|
default:
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process result of proto processing */
|
|
|
|
switch (fdret) {
|
|
|
|
case FLOW_DISSECT_RET_OUT_GOOD:
|
|
|
|
goto out_good;
|
|
|
|
case FLOW_DISSECT_RET_PROTO_AGAIN:
|
2017-09-01 21:04:12 +00:00
|
|
|
if (skb_flow_dissect_allowed(&num_hdrs))
|
|
|
|
goto proto_again;
|
|
|
|
goto out_good;
|
2017-09-01 21:04:11 +00:00
|
|
|
case FLOW_DISSECT_RET_CONTINUE:
|
|
|
|
case FLOW_DISSECT_RET_IPPROTO_AGAIN:
|
|
|
|
break;
|
|
|
|
case FLOW_DISSECT_RET_OUT_BAD:
|
2011-11-28 05:22:18 +00:00
|
|
|
default:
|
2015-09-01 16:24:26 +00:00
|
|
|
goto out_bad;
|
2011-11-28 05:22:18 +00:00
|
|
|
}
|
|
|
|
|
2015-06-12 16:01:06 +00:00
|
|
|
ip_proto_again:
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_CONTINUE;
|
|
|
|
|
2011-11-28 05:22:18 +00:00
|
|
|
switch (ip_proto) {
|
2017-03-06 15:39:55 +00:00
|
|
|
case IPPROTO_GRE:
|
cls_flower: Fix inability to match GRE/IPIP packets
When a packet of a new flow arrives in openvswitch kernel module, it dissects
the packet and passes the extracted flow key to ovs-vswtichd daemon. If hw-
offload configuration is enabled, the daemon creates a new TC flower entry to
bypass openvswitch kernel module for the flow (TC flower can also offload flows
to NICs but this time that does not matter).
In this processing flow, I found the following issue in cases of GRE/IPIP
packets.
When ovs_flow_key_extract() in openvswitch module parses a packet of a new
GRE (or IPIP) flow received on non-tunneling vports, it extracts information
of the outer IP header for ip_proto/src_ip/dst_ip match keys.
This means ovs-vswitchd creates a TC flower entry with IP protocol/addresses
match keys whose values are those of the outer IP header. OTOH, TC flower,
which uses flow_dissector (different parser from openvswitch module), extracts
information of the inner IP header.
The following flow is an example to describe the issue in more detail.
<----------- Outer IP -----------------> <---------- Inner IP ---------->
+----------+--------------+--------------+----------+----------+----------+
| ip_proto | src_ip | dst_ip | ip_proto | src_ip | dst_ip |
| 47 (GRE) | 192.168.10.1 | 192.168.10.2 | 6 (TCP) | 10.0.0.1 | 10.0.0.2 |
+----------+--------------+--------------+----------+----------+----------+
In this case, TC flower entry and extracted information are shown as below:
- ovs-vswitchd creates TC flower entry with:
- ip_proto: 47
- src_ip: 192.168.10.1
- dst_ip: 192.168.10.2
- TC flower extracts below for IP header matches:
- ip_proto: 6
- src_ip: 10.0.0.1
- dst_ip: 10.0.0.2
Thus, GRE or IPIP packets never match the TC flower entry, as each
dissector behaves differently.
IMHO, the behavior of TC flower (flow dissector) does not look correct,
as ip_proto/src_ip/dst_ip in TC flower match means the outermost IP
header information except for GRE/IPIP cases. This patch adds a new
flow_dissector flag FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP which skips
dissection of the encapsulated inner GRE/IPIP header in TC flower
classifier.
Signed-off-by: Yoshiki Komachi <komachi.yoshiki@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-29 09:21:41 +00:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
|
2017-03-06 15:39:55 +00:00
|
|
|
target_container, data,
|
2017-09-01 21:04:11 +00:00
|
|
|
&proto, &nhoff, &hlen, flags);
|
|
|
|
break;
|
|
|
|
|
2015-06-12 16:01:06 +00:00
|
|
|
case NEXTHDR_HOP:
|
|
|
|
case NEXTHDR_ROUTING:
|
|
|
|
case NEXTHDR_DEST: {
|
|
|
|
u8 _opthdr[2], *opthdr;
|
|
|
|
|
|
|
|
if (proto != htons(ETH_P_IPV6))
|
|
|
|
break;
|
|
|
|
|
|
|
|
opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
|
|
|
|
data, hlen, &_opthdr);
|
2017-09-01 21:04:11 +00:00
|
|
|
if (!opthdr) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2015-06-12 16:01:06 +00:00
|
|
|
|
2015-06-13 02:31:32 +00:00
|
|
|
ip_proto = opthdr[0];
|
|
|
|
nhoff += (opthdr[1] + 1) << 3;
|
2015-06-12 16:01:06 +00:00
|
|
|
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
|
|
|
|
break;
|
2015-06-12 16:01:06 +00:00
|
|
|
}
|
2015-09-01 16:24:29 +00:00
|
|
|
case NEXTHDR_FRAGMENT: {
|
|
|
|
struct frag_hdr _fh, *fh;
|
|
|
|
|
|
|
|
if (proto != htons(ETH_P_IPV6))
|
|
|
|
break;
|
|
|
|
|
|
|
|
fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
|
|
|
|
data, hlen, &_fh);
|
|
|
|
|
2017-09-01 21:04:11 +00:00
|
|
|
if (!fh) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2015-09-01 16:24:29 +00:00
|
|
|
|
2015-09-01 23:46:08 +00:00
|
|
|
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
|
2015-09-01 16:24:29 +00:00
|
|
|
|
|
|
|
nhoff += sizeof(_fh);
|
2016-02-24 17:29:44 +00:00
|
|
|
ip_proto = fh->nexthdr;
|
2015-09-01 16:24:29 +00:00
|
|
|
|
|
|
|
if (!(fh->frag_off & htons(IP6_OFFSET))) {
|
2015-09-01 23:46:08 +00:00
|
|
|
key_control->flags |= FLOW_DIS_FIRST_FRAG;
|
2017-09-01 21:04:11 +00:00
|
|
|
if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
|
|
|
|
fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
2015-09-01 16:24:29 +00:00
|
|
|
}
|
2017-09-01 21:04:11 +00:00
|
|
|
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
2015-09-01 16:24:29 +00:00
|
|
|
}
|
2011-11-28 05:22:18 +00:00
|
|
|
case IPPROTO_IPIP:
|
cls_flower: Fix inability to match GRE/IPIP packets
When a packet of a new flow arrives in openvswitch kernel module, it dissects
the packet and passes the extracted flow key to ovs-vswtichd daemon. If hw-
offload configuration is enabled, the daemon creates a new TC flower entry to
bypass openvswitch kernel module for the flow (TC flower can also offload flows
to NICs but this time that does not matter).
In this processing flow, I found the following issue in cases of GRE/IPIP
packets.
When ovs_flow_key_extract() in openvswitch module parses a packet of a new
GRE (or IPIP) flow received on non-tunneling vports, it extracts information
of the outer IP header for ip_proto/src_ip/dst_ip match keys.
This means ovs-vswitchd creates a TC flower entry with IP protocol/addresses
match keys whose values are those of the outer IP header. OTOH, TC flower,
which uses flow_dissector (different parser from openvswitch module), extracts
information of the inner IP header.
The following flow is an example to describe the issue in more detail.
<----------- Outer IP -----------------> <---------- Inner IP ---------->
+----------+--------------+--------------+----------+----------+----------+
| ip_proto | src_ip | dst_ip | ip_proto | src_ip | dst_ip |
| 47 (GRE) | 192.168.10.1 | 192.168.10.2 | 6 (TCP) | 10.0.0.1 | 10.0.0.2 |
+----------+--------------+--------------+----------+----------+----------+
In this case, TC flower entry and extracted information are shown as below:
- ovs-vswitchd creates TC flower entry with:
- ip_proto: 47
- src_ip: 192.168.10.1
- dst_ip: 192.168.10.2
- TC flower extracts below for IP header matches:
- ip_proto: 6
- src_ip: 10.0.0.1
- dst_ip: 10.0.0.2
Thus, GRE or IPIP packets never match the TC flower entry, as each
dissector behaves differently.
IMHO, the behavior of TC flower (flow dissector) does not look correct,
as ip_proto/src_ip/dst_ip in TC flower match means the outermost IP
header information except for GRE/IPIP cases. This patch adds a new
flow_dissector flag FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP which skips
dissection of the encapsulated inner GRE/IPIP header in TC flower
classifier.
Signed-off-by: Yoshiki Komachi <komachi.yoshiki@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-29 09:21:41 +00:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-07-29 18:07:36 +00:00
|
|
|
proto = htons(ETH_P_IP);
|
2015-09-01 16:24:32 +00:00
|
|
|
|
2015-09-01 23:46:08 +00:00
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
2017-09-01 21:04:11 +00:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
2015-09-01 16:24:32 +00:00
|
|
|
|
2013-07-29 18:07:42 +00:00
|
|
|
case IPPROTO_IPV6:
|
cls_flower: Fix inability to match GRE/IPIP packets
When a packet of a new flow arrives in openvswitch kernel module, it dissects
the packet and passes the extracted flow key to ovs-vswtichd daemon. If hw-
offload configuration is enabled, the daemon creates a new TC flower entry to
bypass openvswitch kernel module for the flow (TC flower can also offload flows
to NICs but this time that does not matter).
In this processing flow, I found the following issue in cases of GRE/IPIP
packets.
When ovs_flow_key_extract() in openvswitch module parses a packet of a new
GRE (or IPIP) flow received on non-tunneling vports, it extracts information
of the outer IP header for ip_proto/src_ip/dst_ip match keys.
This means ovs-vswitchd creates a TC flower entry with IP protocol/addresses
match keys whose values are those of the outer IP header. OTOH, TC flower,
which uses flow_dissector (different parser from openvswitch module), extracts
information of the inner IP header.
The following flow is an example to describe the issue in more detail.
<----------- Outer IP -----------------> <---------- Inner IP ---------->
+----------+--------------+--------------+----------+----------+----------+
| ip_proto | src_ip | dst_ip | ip_proto | src_ip | dst_ip |
| 47 (GRE) | 192.168.10.1 | 192.168.10.2 | 6 (TCP) | 10.0.0.1 | 10.0.0.2 |
+----------+--------------+--------------+----------+----------+----------+
In this case, TC flower entry and extracted information are shown as below:
- ovs-vswitchd creates TC flower entry with:
- ip_proto: 47
- src_ip: 192.168.10.1
- dst_ip: 192.168.10.2
- TC flower extracts below for IP header matches:
- ip_proto: 6
- src_ip: 10.0.0.1
- dst_ip: 10.0.0.2
Thus, GRE or IPIP packets never match the TC flower entry, as each
dissector behaves differently.
IMHO, the behavior of TC flower (flow dissector) does not look correct,
as ip_proto/src_ip/dst_ip in TC flower match means the outermost IP
header information except for GRE/IPIP cases. This patch adds a new
flow_dissector flag FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP which skips
dissection of the encapsulated inner GRE/IPIP header in TC flower
classifier.
Signed-off-by: Yoshiki Komachi <komachi.yoshiki@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-29 09:21:41 +00:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-07-29 18:07:42 +00:00
|
|
|
proto = htons(ETH_P_IPV6);
|
2015-09-01 16:24:32 +00:00
|
|
|
|
2015-09-01 23:46:08 +00:00
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
2017-09-01 21:04:11 +00:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
|
2015-09-01 16:24:32 +00:00
|
|
|
|
2015-06-04 16:16:46 +00:00
|
|
|
case IPPROTO_MPLS:
|
|
|
|
proto = htons(ETH_P_MPLS_UC);
|
2017-09-01 21:04:11 +00:00
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
|
2017-05-23 16:40:44 +00:00
|
|
|
case IPPROTO_TCP:
|
|
|
|
__skb_flow_dissect_tcp(skb, flow_dissector, target_container,
|
|
|
|
data, nhoff, hlen);
|
|
|
|
break;
|
2017-09-01 21:04:11 +00:00
|
|
|
|
2019-10-29 13:50:51 +00:00
|
|
|
case IPPROTO_ICMP:
|
|
|
|
case IPPROTO_ICMPV6:
|
|
|
|
__skb_flow_dissect_icmp(skb, flow_dissector, target_container,
|
|
|
|
data, nhoff, hlen);
|
|
|
|
break;
|
2022-09-08 17:16:41 +00:00
|
|
|
case IPPROTO_L2TP:
|
|
|
|
__skb_flow_dissect_l2tpv3(skb, flow_dissector, target_container,
|
|
|
|
data, nhoff, hlen);
|
|
|
|
break;
|
2023-08-01 01:40:58 +00:00
|
|
|
case IPPROTO_ESP:
|
|
|
|
__skb_flow_dissect_esp(skb, flow_dissector, target_container,
|
|
|
|
data, nhoff, hlen);
|
|
|
|
break;
|
|
|
|
case IPPROTO_AH:
|
|
|
|
__skb_flow_dissect_ah(skb, flow_dissector, target_container,
|
|
|
|
data, nhoff, hlen);
|
|
|
|
break;
|
2011-11-28 05:22:18 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-12-03 10:40:12 +00:00
|
|
|
if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT))
|
|
|
|
__skb_flow_dissect_ports(skb, flow_dissector, target_container,
|
|
|
|
data, nhoff, ip_proto, hlen);
|
2014-10-10 19:09:12 +00:00
|
|
|
|
2017-09-01 21:04:11 +00:00
|
|
|
/* Process result of IP proto processing */
|
|
|
|
switch (fdret) {
|
|
|
|
case FLOW_DISSECT_RET_PROTO_AGAIN:
|
2017-09-01 21:04:12 +00:00
|
|
|
if (skb_flow_dissect_allowed(&num_hdrs))
|
|
|
|
goto proto_again;
|
|
|
|
break;
|
2017-09-01 21:04:11 +00:00
|
|
|
case FLOW_DISSECT_RET_IPPROTO_AGAIN:
|
2017-09-01 21:04:12 +00:00
|
|
|
if (skb_flow_dissect_allowed(&num_hdrs))
|
|
|
|
goto ip_proto_again;
|
|
|
|
break;
|
2017-09-01 21:04:11 +00:00
|
|
|
case FLOW_DISSECT_RET_OUT_GOOD:
|
|
|
|
case FLOW_DISSECT_RET_CONTINUE:
|
|
|
|
break;
|
|
|
|
case FLOW_DISSECT_RET_OUT_BAD:
|
|
|
|
default:
|
|
|
|
goto out_bad;
|
|
|
|
}
|
|
|
|
|
2015-09-01 16:24:26 +00:00
|
|
|
out_good:
|
|
|
|
ret = true;
|
|
|
|
|
2016-11-10 00:04:46 +00:00
|
|
|
out:
|
2018-01-17 22:21:13 +00:00
|
|
|
key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
|
2015-09-01 16:24:26 +00:00
|
|
|
key_basic->n_proto = proto;
|
|
|
|
key_basic->ip_proto = ip_proto;
|
|
|
|
|
|
|
|
return ret;
|
2016-11-10 00:04:46 +00:00
|
|
|
|
|
|
|
out_bad:
|
|
|
|
ret = false;
|
|
|
|
goto out;
|
2011-11-28 05:22:18 +00:00
|
|
|
}
|
2014-08-23 19:13:41 +00:00
|
|
|
EXPORT_SYMBOL(__skb_flow_dissect);
|
2013-01-21 00:39:24 +00:00
|
|
|
|
2021-11-15 17:23:03 +00:00
|
|
|
static siphash_aligned_key_t hashrnd;
|
2013-10-23 18:06:00 +00:00
|
|
|
static __always_inline void __flow_hash_secret_init(void)
|
|
|
|
{
|
|
|
|
net_get_random_once(&hashrnd, sizeof(hashrnd));
|
|
|
|
}
|
|
|
|
|
2019-10-22 14:57:46 +00:00
|
|
|
static const void *flow_keys_hash_start(const struct flow_keys *flow)
|
2015-06-04 16:16:39 +00:00
|
|
|
{
|
2019-10-22 14:57:46 +00:00
|
|
|
BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
|
|
|
|
return &flow->FLOW_KEYS_HASH_START_FIELD;
|
2015-06-04 16:16:39 +00:00
|
|
|
}
|
|
|
|
|
2015-09-02 04:19:17 +00:00
|
|
|
static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
|
2015-06-04 16:16:39 +00:00
|
|
|
{
|
2015-06-04 16:16:40 +00:00
|
|
|
size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
|
2019-11-02 20:12:51 +00:00
|
|
|
|
2015-06-04 16:16:39 +00:00
|
|
|
BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
|
2015-06-04 16:16:40 +00:00
|
|
|
|
|
|
|
switch (flow->control.addr_type) {
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
|
|
|
diff -= sizeof(flow->addrs.v4addrs);
|
|
|
|
break;
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
|
|
|
diff -= sizeof(flow->addrs.v6addrs);
|
|
|
|
break;
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 08:59:26 +00:00
|
|
|
case FLOW_DISSECTOR_KEY_TIPC:
|
|
|
|
diff -= sizeof(flow->addrs.tipckey);
|
2015-06-04 16:16:41 +00:00
|
|
|
break;
|
2015-06-04 16:16:40 +00:00
|
|
|
}
|
2019-10-22 14:57:46 +00:00
|
|
|
return sizeof(*flow) - diff;
|
2015-06-04 16:16:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
__be32 flow_get_u32_src(const struct flow_keys *flow)
|
|
|
|
{
|
|
|
|
switch (flow->control.addr_type) {
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
|
|
|
return flow->addrs.v4addrs.src;
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
|
|
|
return (__force __be32)ipv6_addr_hash(
|
|
|
|
&flow->addrs.v6addrs.src);
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 08:59:26 +00:00
|
|
|
case FLOW_DISSECTOR_KEY_TIPC:
|
|
|
|
return flow->addrs.tipckey.key;
|
2015-06-04 16:16:40 +00:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flow_get_u32_src);
|
|
|
|
|
|
|
|
__be32 flow_get_u32_dst(const struct flow_keys *flow)
|
|
|
|
{
|
|
|
|
switch (flow->control.addr_type) {
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
|
|
|
return flow->addrs.v4addrs.dst;
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
|
|
|
return (__force __be32)ipv6_addr_hash(
|
|
|
|
&flow->addrs.v6addrs.dst);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flow_get_u32_dst);
|
|
|
|
|
2021-07-28 10:54:18 +00:00
|
|
|
/* Sort the source and destination IP and the ports,
|
2019-10-29 13:50:50 +00:00
|
|
|
* to have consistent hash within the two directions
|
|
|
|
*/
|
2015-06-04 16:16:40 +00:00
|
|
|
static inline void __flow_hash_consistentify(struct flow_keys *keys)
|
|
|
|
{
|
|
|
|
int addr_diff, i;
|
|
|
|
|
|
|
|
switch (keys->control.addr_type) {
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
net: core: fix flow symmetric hash
__flow_hash_consistentify() wrongly swaps ipv4 addresses in few cases.
This function is indirectly used by __skb_get_hash_symmetric(), which is
used to fanout packets in AF_PACKET.
Intrusion detection systems may be impacted by this issue.
__flow_hash_consistentify() computes the addresses difference then swaps
them if the difference is negative. In few cases src - dst and dst - src
are both negative.
The following snippet mimics __flow_hash_consistentify():
```
#include <stdio.h>
#include <stdint.h>
int main(int argc, char** argv) {
int diffs_d, diffd_s;
uint32_t dst = 0xb225a8c0; /* 178.37.168.192 --> 192.168.37.178 */
uint32_t src = 0x3225a8c0; /* 50.37.168.192 --> 192.168.37.50 */
uint32_t dst2 = 0x3325a8c0; /* 51.37.168.192 --> 192.168.37.51 */
diffs_d = src - dst;
diffd_s = dst - src;
printf("src:%08x dst:%08x, diff(s-d)=%d(0x%x) diff(d-s)=%d(0x%x)\n",
src, dst, diffs_d, diffs_d, diffd_s, diffd_s);
diffs_d = src - dst2;
diffd_s = dst2 - src;
printf("src:%08x dst:%08x, diff(s-d)=%d(0x%x) diff(d-s)=%d(0x%x)\n",
src, dst2, diffs_d, diffs_d, diffd_s, diffd_s);
return 0;
}
```
Results:
src:3225a8c0 dst:b225a8c0, \
diff(s-d)=-2147483648(0x80000000) \
diff(d-s)=-2147483648(0x80000000)
src:3225a8c0 dst:3325a8c0, \
diff(s-d)=-16777216(0xff000000) \
diff(d-s)=16777216(0x1000000)
In the first case the addresses differences are always < 0, therefore
__flow_hash_consistentify() always swaps, thus dst->src and src->dst
packets have differents hashes.
Fixes: c3f8324188fa8 ("net: Add full IPv6 addresses to flow_keys")
Signed-off-by: Ludovic Cintrat <ludovic.cintrat@gatewatcher.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2022-09-07 10:08:13 +00:00
|
|
|
if ((__force u32)keys->addrs.v4addrs.dst <
|
|
|
|
(__force u32)keys->addrs.v4addrs.src)
|
2015-06-04 16:16:40 +00:00
|
|
|
swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
|
2021-07-28 10:54:18 +00:00
|
|
|
|
|
|
|
if ((__force u16)keys->ports.dst <
|
|
|
|
(__force u16)keys->ports.src) {
|
2015-06-04 16:16:40 +00:00
|
|
|
swap(keys->ports.src, keys->ports.dst);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
|
|
|
addr_diff = memcmp(&keys->addrs.v6addrs.dst,
|
|
|
|
&keys->addrs.v6addrs.src,
|
|
|
|
sizeof(keys->addrs.v6addrs.dst));
|
2021-07-28 10:54:18 +00:00
|
|
|
if (addr_diff < 0) {
|
2015-06-04 16:16:40 +00:00
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
swap(keys->addrs.v6addrs.src.s6_addr32[i],
|
|
|
|
keys->addrs.v6addrs.dst.s6_addr32[i]);
|
2021-07-28 10:54:18 +00:00
|
|
|
}
|
|
|
|
if ((__force u16)keys->ports.dst <
|
|
|
|
(__force u16)keys->ports.src) {
|
2015-06-04 16:16:40 +00:00
|
|
|
swap(keys->ports.src, keys->ports.dst);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2013-10-23 18:06:00 +00:00
|
|
|
}
|
|
|
|
|
2019-10-22 14:57:46 +00:00
|
|
|
static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
|
|
|
|
const siphash_key_t *keyval)
|
2014-07-02 04:32:05 +00:00
|
|
|
{
|
|
|
|
u32 hash;
|
|
|
|
|
2015-06-04 16:16:40 +00:00
|
|
|
__flow_hash_consistentify(keys);
|
2014-07-02 04:32:05 +00:00
|
|
|
|
2019-10-22 14:57:46 +00:00
|
|
|
hash = siphash(flow_keys_hash_start(keys),
|
|
|
|
flow_keys_hash_length(keys), keyval);
|
2014-07-02 04:32:05 +00:00
|
|
|
if (!hash)
|
|
|
|
hash = 1;
|
|
|
|
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 flow_hash_from_keys(struct flow_keys *keys)
|
|
|
|
{
|
2015-05-01 18:30:12 +00:00
|
|
|
__flow_hash_secret_init();
|
2019-10-22 14:57:46 +00:00
|
|
|
return __flow_hash_from_keys(keys, &hashrnd);
|
2014-07-02 04:32:05 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flow_hash_from_keys);
|
|
|
|
|
2015-05-01 18:30:12 +00:00
|
|
|
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
|
2019-10-22 14:57:46 +00:00
|
|
|
struct flow_keys *keys,
|
|
|
|
const siphash_key_t *keyval)
|
2015-05-01 18:30:12 +00:00
|
|
|
{
|
2015-09-01 16:24:33 +00:00
|
|
|
skb_flow_dissect_flow_keys(skb, keys,
|
|
|
|
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
|
2015-05-01 18:30:12 +00:00
|
|
|
|
|
|
|
return __flow_hash_from_keys(keys, keyval);
|
|
|
|
}
|
|
|
|
|
2015-05-01 18:30:17 +00:00
|
|
|
struct _flow_keys_digest_data {
|
|
|
|
__be16 n_proto;
|
|
|
|
u8 ip_proto;
|
|
|
|
u8 padding;
|
|
|
|
__be32 ports;
|
|
|
|
__be32 src;
|
|
|
|
__be32 dst;
|
|
|
|
};
|
|
|
|
|
|
|
|
void make_flow_keys_digest(struct flow_keys_digest *digest,
|
|
|
|
const struct flow_keys *flow)
|
|
|
|
{
|
|
|
|
struct _flow_keys_digest_data *data =
|
|
|
|
(struct _flow_keys_digest_data *)digest;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
|
|
|
|
|
|
|
|
memset(digest, 0, sizeof(*digest));
|
|
|
|
|
2015-05-12 12:56:16 +00:00
|
|
|
data->n_proto = flow->basic.n_proto;
|
|
|
|
data->ip_proto = flow->basic.ip_proto;
|
|
|
|
data->ports = flow->ports.ports;
|
2015-06-04 16:16:40 +00:00
|
|
|
data->src = flow->addrs.v4addrs.src;
|
|
|
|
data->dst = flow->addrs.v4addrs.dst;
|
2015-05-01 18:30:17 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(make_flow_keys_digest);
|
|
|
|
|
2016-07-01 20:07:50 +00:00
|
|
|
static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
|
|
|
|
|
2016-10-26 16:49:46 +00:00
|
|
|
u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
|
2016-07-01 20:07:50 +00:00
|
|
|
{
|
|
|
|
struct flow_keys keys;
|
|
|
|
|
|
|
|
__flow_hash_secret_init();
|
|
|
|
|
|
|
|
memset(&keys, 0, sizeof(keys));
|
2019-04-22 15:55:46 +00:00
|
|
|
__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
|
2023-09-05 10:36:10 +00:00
|
|
|
&keys, NULL, 0, 0, 0, 0);
|
2016-07-01 20:07:50 +00:00
|
|
|
|
2019-10-22 14:57:46 +00:00
|
|
|
return __flow_hash_from_keys(&keys, &hashrnd);
|
2016-07-01 20:07:50 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
|
|
|
|
|
2015-05-12 12:56:10 +00:00
|
|
|
/**
|
|
|
|
* __skb_get_hash: calculate a flow hash
|
|
|
|
* @skb: sk_buff to calculate flow hash from
|
|
|
|
*
|
|
|
|
* This function calculates a flow hash based on src/dst addresses
|
2014-03-24 22:34:47 +00:00
|
|
|
* and src/dst port numbers. Sets hash in skb to non-zero hash value
|
|
|
|
* on success, zero indicates no valid hash. Also, sets l4_hash in skb
|
2013-01-21 00:39:24 +00:00
|
|
|
* if hash is a canonical 4-tuple hash over transport ports.
|
|
|
|
*/
|
2013-12-16 06:12:06 +00:00
|
|
|
void __skb_get_hash(struct sk_buff *skb)
|
2013-01-21 00:39:24 +00:00
|
|
|
{
|
|
|
|
struct flow_keys keys;
|
2016-08-31 06:15:05 +00:00
|
|
|
u32 hash;
|
2013-01-21 00:39:24 +00:00
|
|
|
|
2015-05-01 18:30:12 +00:00
|
|
|
__flow_hash_secret_init();
|
|
|
|
|
2019-10-22 14:57:46 +00:00
|
|
|
hash = ___skb_get_hash(skb, &keys, &hashrnd);
|
2016-08-31 06:15:05 +00:00
|
|
|
|
|
|
|
__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
|
2013-01-21 00:39:24 +00:00
|
|
|
}
|
2013-12-16 06:12:06 +00:00
|
|
|
EXPORT_SYMBOL(__skb_get_hash);
|
2013-01-21 00:39:24 +00:00
|
|
|
|
2019-10-22 14:57:46 +00:00
|
|
|
__u32 skb_get_hash_perturb(const struct sk_buff *skb,
|
|
|
|
const siphash_key_t *perturb)
|
2015-05-01 18:30:12 +00:00
|
|
|
{
|
|
|
|
struct flow_keys keys;
|
|
|
|
|
|
|
|
return ___skb_get_hash(skb, &keys, perturb);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(skb_get_hash_perturb);
|
|
|
|
|
2021-03-14 11:11:23 +00:00
|
|
|
u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
|
2018-05-04 09:32:59 +00:00
|
|
|
const struct flow_keys_basic *keys, int hlen)
|
2013-03-19 06:39:30 +00:00
|
|
|
{
|
2015-06-04 16:16:39 +00:00
|
|
|
u32 poff = keys->control.thoff;
|
2013-03-19 06:39:30 +00:00
|
|
|
|
2016-02-24 17:29:44 +00:00
|
|
|
/* skip L4 headers for fragments after the first */
|
|
|
|
if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
|
|
|
|
!(keys->control.flags & FLOW_DIS_FIRST_FRAG))
|
|
|
|
return poff;
|
|
|
|
|
2015-05-12 12:56:16 +00:00
|
|
|
switch (keys->basic.ip_proto) {
|
2013-03-19 06:39:30 +00:00
|
|
|
case IPPROTO_TCP: {
|
2014-10-10 19:09:12 +00:00
|
|
|
/* access doff as u8 to avoid unaligned access */
|
|
|
|
const u8 *doff;
|
|
|
|
u8 _doff;
|
2013-03-19 06:39:30 +00:00
|
|
|
|
2014-10-10 19:09:12 +00:00
|
|
|
doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
|
|
|
|
data, hlen, &_doff);
|
|
|
|
if (!doff)
|
2013-03-19 06:39:30 +00:00
|
|
|
return poff;
|
|
|
|
|
2014-10-10 19:09:12 +00:00
|
|
|
poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
|
2013-03-19 06:39:30 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
case IPPROTO_UDPLITE:
|
|
|
|
poff += sizeof(struct udphdr);
|
|
|
|
break;
|
|
|
|
/* For the rest, we do not really care about header
|
|
|
|
* extensions at this point for now.
|
|
|
|
*/
|
|
|
|
case IPPROTO_ICMP:
|
|
|
|
poff += sizeof(struct icmphdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_ICMPV6:
|
|
|
|
poff += sizeof(struct icmp6hdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_IGMP:
|
|
|
|
poff += sizeof(struct igmphdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_DCCP:
|
|
|
|
poff += sizeof(struct dccp_hdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_SCTP:
|
|
|
|
poff += sizeof(struct sctphdr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return poff;
|
|
|
|
}
|
|
|
|
|
2015-05-12 12:56:14 +00:00
|
|
|
/**
|
|
|
|
* skb_get_poff - get the offset to the payload
|
|
|
|
* @skb: sk_buff to get the payload offset from
|
|
|
|
*
|
|
|
|
* The function will get the offset to the payload as far as it could
|
|
|
|
* be dissected. The main user is currently BPF, so that we can dynamically
|
2014-09-05 23:20:26 +00:00
|
|
|
* truncate packets without needing to push actual payload to the user
|
|
|
|
* space and can analyze headers only, instead.
|
|
|
|
*/
|
|
|
|
u32 skb_get_poff(const struct sk_buff *skb)
|
|
|
|
{
|
2018-05-04 09:32:59 +00:00
|
|
|
struct flow_keys_basic keys;
|
2014-09-05 23:20:26 +00:00
|
|
|
|
2019-04-22 15:55:46 +00:00
|
|
|
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
|
|
|
|
NULL, 0, 0, 0, 0))
|
2014-09-05 23:20:26 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
|
|
|
|
}
|
2015-05-12 12:56:16 +00:00
|
|
|
|
2015-09-02 04:19:17 +00:00
|
|
|
__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
|
2015-09-02 00:00:24 +00:00
|
|
|
{
|
|
|
|
memset(keys, 0, sizeof(*keys));
|
|
|
|
|
|
|
|
memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
|
|
|
|
sizeof(keys->addrs.v6addrs.src));
|
|
|
|
memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
|
|
|
|
sizeof(keys->addrs.v6addrs.dst));
|
|
|
|
keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
|
|
|
keys->ports.src = fl6->fl6_sport;
|
|
|
|
keys->ports.dst = fl6->fl6_dport;
|
|
|
|
keys->keyid.keyid = fl6->fl6_gre_key;
|
2018-06-04 09:36:05 +00:00
|
|
|
keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
|
2015-09-02 00:00:24 +00:00
|
|
|
keys->basic.ip_proto = fl6->flowi6_proto;
|
|
|
|
|
|
|
|
return flow_hash_from_keys(keys);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__get_hash_from_flowi6);
|
|
|
|
|
2015-05-12 12:56:16 +00:00
|
|
|
static const struct flow_dissector_key flow_keys_dissector_keys[] = {
|
2015-06-04 16:16:39 +00:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
.offset = offsetof(struct flow_keys, control),
|
|
|
|
},
|
2015-05-12 12:56:16 +00:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
.offset = offsetof(struct flow_keys, basic),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
2015-06-04 16:16:40 +00:00
|
|
|
.offset = offsetof(struct flow_keys, addrs.v4addrs),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
.offset = offsetof(struct flow_keys, addrs.v6addrs),
|
2015-05-12 12:56:16 +00:00
|
|
|
},
|
2015-06-04 16:16:41 +00:00
|
|
|
{
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 08:59:26 +00:00
|
|
|
.key_id = FLOW_DISSECTOR_KEY_TIPC,
|
|
|
|
.offset = offsetof(struct flow_keys, addrs.tipckey),
|
2015-06-04 16:16:41 +00:00
|
|
|
},
|
2015-05-12 12:56:16 +00:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_PORTS,
|
|
|
|
.offset = offsetof(struct flow_keys, ports),
|
|
|
|
},
|
2015-06-04 16:16:43 +00:00
|
|
|
{
|
2016-08-17 10:36:11 +00:00
|
|
|
.key_id = FLOW_DISSECTOR_KEY_VLAN,
|
|
|
|
.offset = offsetof(struct flow_keys, vlan),
|
2015-06-04 16:16:43 +00:00
|
|
|
},
|
2015-06-04 16:16:44 +00:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
|
|
|
|
.offset = offsetof(struct flow_keys, tags),
|
|
|
|
},
|
2015-06-04 16:16:45 +00:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
|
|
|
|
.offset = offsetof(struct flow_keys, keyid),
|
|
|
|
},
|
2015-05-12 12:56:16 +00:00
|
|
|
};
|
|
|
|
|
2016-07-01 20:07:50 +00:00
|
|
|
static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
.offset = offsetof(struct flow_keys, control),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
.offset = offsetof(struct flow_keys, basic),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
|
|
.offset = offsetof(struct flow_keys, addrs.v4addrs),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
.offset = offsetof(struct flow_keys, addrs.v6addrs),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_PORTS,
|
|
|
|
.offset = offsetof(struct flow_keys, ports),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-05-04 09:32:59 +00:00
|
|
|
static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = {
|
2015-06-04 16:16:39 +00:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
.offset = offsetof(struct flow_keys, control),
|
|
|
|
},
|
2015-05-12 12:56:16 +00:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
.offset = offsetof(struct flow_keys, basic),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
struct flow_dissector flow_keys_dissector __read_mostly;
|
|
|
|
EXPORT_SYMBOL(flow_keys_dissector);
|
|
|
|
|
2018-05-04 09:32:59 +00:00
|
|
|
struct flow_dissector flow_keys_basic_dissector __read_mostly;
|
|
|
|
EXPORT_SYMBOL(flow_keys_basic_dissector);
|
2015-05-12 12:56:16 +00:00
|
|
|
|
|
|
|
static int __init init_default_flow_dissectors(void)
|
|
|
|
{
|
|
|
|
skb_flow_dissector_init(&flow_keys_dissector,
|
|
|
|
flow_keys_dissector_keys,
|
|
|
|
ARRAY_SIZE(flow_keys_dissector_keys));
|
2016-07-01 20:07:50 +00:00
|
|
|
skb_flow_dissector_init(&flow_keys_dissector_symmetric,
|
|
|
|
flow_keys_dissector_symmetric_keys,
|
|
|
|
ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
|
2018-05-04 09:32:59 +00:00
|
|
|
skb_flow_dissector_init(&flow_keys_basic_dissector,
|
|
|
|
flow_keys_basic_dissector_keys,
|
|
|
|
ARRAY_SIZE(flow_keys_basic_dissector_keys));
|
2020-05-31 08:28:37 +00:00
|
|
|
return 0;
|
2020-05-21 08:34:35 +00:00
|
|
|
}
|
2016-11-22 19:17:30 +00:00
|
|
|
core_initcall(init_default_flow_dissectors);
|