2019-05-29 14:12:43 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-10-26 02:26:31 +00:00
|
|
|
/*
|
2017-03-20 23:32:28 +00:00
|
|
|
* Copyright (c) 2007-2017 Nicira, Inc.
|
2011-10-26 02:26:31 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/openvswitch.h>
|
2013-08-22 19:30:48 +00:00
|
|
|
#include <linux/sctp.h>
|
2011-10-26 02:26:31 +00:00
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/udp.h>
|
|
|
|
#include <linux/in6.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/if_vlan.h>
|
2014-10-06 12:05:13 +00:00
|
|
|
|
2015-08-26 18:31:48 +00:00
|
|
|
#include <net/dst.h>
|
2023-06-08 19:17:37 +00:00
|
|
|
#include <net/gso.h>
|
2011-10-26 02:26:31 +00:00
|
|
|
#include <net/ip.h>
|
2012-11-13 23:44:14 +00:00
|
|
|
#include <net/ipv6.h>
|
2015-08-27 22:25:46 +00:00
|
|
|
#include <net/ip6_fib.h>
|
2011-10-26 02:26:31 +00:00
|
|
|
#include <net/checksum.h>
|
|
|
|
#include <net/dsfield.h>
|
2014-10-06 12:05:13 +00:00
|
|
|
#include <net/mpls.h>
|
2013-08-22 19:30:48 +00:00
|
|
|
#include <net/sctp/checksum.h>
|
2011-10-26 02:26:31 +00:00
|
|
|
|
|
|
|
#include "datapath.h"
|
2023-08-11 14:12:48 +00:00
|
|
|
#include "drop.h"
|
2014-09-16 02:37:25 +00:00
|
|
|
#include "flow.h"
|
2015-08-26 18:31:48 +00:00
|
|
|
#include "conntrack.h"
|
2011-10-26 02:26:31 +00:00
|
|
|
#include "vport.h"
|
2017-11-07 13:07:02 +00:00
|
|
|
#include "flow_netlink.h"
|
2021-06-22 14:02:33 +00:00
|
|
|
#include "openvswitch_trace.h"
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
struct deferred_action {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
const struct nlattr *actions;
|
2017-03-20 23:32:27 +00:00
|
|
|
int actions_len;
|
2014-09-16 02:37:25 +00:00
|
|
|
|
|
|
|
/* Store pkt_key clone when creating deferred action. */
|
|
|
|
struct sw_flow_key pkt_key;
|
|
|
|
};
|
|
|
|
|
2015-08-26 18:31:48 +00:00
|
|
|
#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
|
|
|
|
struct ovs_frag_data {
|
|
|
|
unsigned long dst;
|
|
|
|
struct vport *vport;
|
|
|
|
struct ovs_skb_cb cb;
|
|
|
|
__be16 inner_protocol;
|
2016-10-05 13:01:57 +00:00
|
|
|
u16 network_offset; /* valid only for MPLS */
|
|
|
|
u16 vlan_tci;
|
2015-08-26 18:31:48 +00:00
|
|
|
__be16 vlan_proto;
|
|
|
|
unsigned int l2_len;
|
2016-11-10 15:28:19 +00:00
|
|
|
u8 mac_proto;
|
2015-08-26 18:31:48 +00:00
|
|
|
u8 l2_data[MAX_L2_LEN];
|
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
#define DEFERRED_ACTION_FIFO_SIZE 10
|
2016-09-13 14:08:54 +00:00
|
|
|
#define OVS_RECURSION_LIMIT 5
|
|
|
|
#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
|
2014-09-16 02:37:25 +00:00
|
|
|
struct action_fifo {
|
|
|
|
int head;
|
|
|
|
int tail;
|
|
|
|
/* Deferred action fifo queue storage. */
|
|
|
|
struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
|
|
|
|
};
|
|
|
|
|
2017-03-20 23:32:28 +00:00
|
|
|
struct action_flow_keys {
|
2016-09-13 14:08:54 +00:00
|
|
|
struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
|
|
|
|
};
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
static struct action_fifo __percpu *action_fifos;
|
2017-03-20 23:32:28 +00:00
|
|
|
static struct action_flow_keys __percpu *flow_keys;
|
2014-09-16 02:37:25 +00:00
|
|
|
static DEFINE_PER_CPU(int, exec_actions_level);
|
|
|
|
|
2017-03-20 23:32:28 +00:00
|
|
|
/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
|
|
|
|
* space. Return NULL if out of key spaces.
|
|
|
|
*/
|
|
|
|
static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
|
|
|
|
{
|
|
|
|
struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
|
|
|
|
int level = this_cpu_read(exec_actions_level);
|
|
|
|
struct sw_flow_key *key = NULL;
|
|
|
|
|
|
|
|
if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
|
|
|
|
key = &keys->key[level - 1];
|
|
|
|
*key = *key_;
|
|
|
|
}
|
|
|
|
|
|
|
|
return key;
|
|
|
|
}
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
static void action_fifo_init(struct action_fifo *fifo)
|
|
|
|
{
|
|
|
|
fifo->head = 0;
|
|
|
|
fifo->tail = 0;
|
|
|
|
}
|
|
|
|
|
2014-11-06 14:58:52 +00:00
|
|
|
static bool action_fifo_is_empty(const struct action_fifo *fifo)
|
2014-09-16 02:37:25 +00:00
|
|
|
{
|
|
|
|
return (fifo->head == fifo->tail);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
|
|
|
|
{
|
|
|
|
if (action_fifo_is_empty(fifo))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &fifo->fifo[fifo->tail++];
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
|
|
|
|
{
|
|
|
|
if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &fifo->fifo[fifo->head++];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return true if fifo is not full */
|
|
|
|
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
|
2017-03-20 23:32:27 +00:00
|
|
|
const struct sw_flow_key *key,
|
|
|
|
const struct nlattr *actions,
|
|
|
|
const int actions_len)
|
2014-09-16 02:37:25 +00:00
|
|
|
{
|
|
|
|
struct action_fifo *fifo;
|
|
|
|
struct deferred_action *da;
|
|
|
|
|
|
|
|
fifo = this_cpu_ptr(action_fifos);
|
|
|
|
da = action_fifo_put(fifo);
|
|
|
|
if (da) {
|
|
|
|
da->skb = skb;
|
2017-03-20 23:32:27 +00:00
|
|
|
da->actions = actions;
|
|
|
|
da->actions_len = actions_len;
|
2014-09-16 02:37:25 +00:00
|
|
|
da->pkt_key = *key;
|
|
|
|
}
|
|
|
|
|
|
|
|
return da;
|
|
|
|
}
|
|
|
|
|
2014-11-06 14:55:14 +00:00
|
|
|
static void invalidate_flow_key(struct sw_flow_key *key)
|
|
|
|
{
|
2016-11-10 15:28:18 +00:00
|
|
|
key->mac_proto |= SW_FLOW_KEY_INVALID;
|
2014-11-06 14:55:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_flow_key_valid(const struct sw_flow_key *key)
|
|
|
|
{
|
2016-11-10 15:28:18 +00:00
|
|
|
return !(key->mac_proto & SW_FLOW_KEY_INVALID);
|
2014-11-06 14:55:14 +00:00
|
|
|
}
|
|
|
|
|
2017-03-20 23:32:30 +00:00
|
|
|
static int clone_execute(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *key,
|
|
|
|
u32 recirc_id,
|
|
|
|
const struct nlattr *actions, int len,
|
|
|
|
bool last, bool clone_flow_key);
|
|
|
|
|
2019-03-26 00:43:46 +00:00
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *key,
|
|
|
|
const struct nlattr *attr, int len);
|
|
|
|
|
2014-11-06 14:55:14 +00:00
|
|
|
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
|
2019-12-21 03:20:46 +00:00
|
|
|
__be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
|
2014-10-06 12:05:13 +00:00
|
|
|
{
|
2019-07-07 14:01:54 +00:00
|
|
|
int err;
|
2014-10-06 12:05:13 +00:00
|
|
|
|
2019-12-21 03:20:46 +00:00
|
|
|
err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
|
2019-07-07 14:01:54 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
2014-10-06 12:05:13 +00:00
|
|
|
|
2019-12-21 03:20:46 +00:00
|
|
|
if (!mac_len)
|
|
|
|
key->mac_proto = MAC_PROTO_NONE;
|
|
|
|
|
2014-11-06 14:55:14 +00:00
|
|
|
invalidate_flow_key(key);
|
2014-10-06 12:05:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-06 14:55:14 +00:00
|
|
|
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
|
|
|
|
const __be16 ethertype)
|
2014-10-06 12:05:13 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
Fixed updating of ethertype in function skb_mpls_pop
The skb_mpls_pop was not updating ethertype of an ethernet packet if the
packet was originally received from a non ARPHRD_ETHER device.
In the below OVS data path flow, since the device corresponding to port 7
is an l3 device (ARPHRD_NONE) the skb_mpls_pop function does not update
the ethertype of the packet even though the previous push_eth action had
added an ethernet header to the packet.
recirc_id(0),in_port(7),eth_type(0x8847),
mpls(label=12/0xfffff,tc=0/0,ttl=0/0x0,bos=1/1),
actions:push_eth(src=00:00:00:00:00:00,dst=00:00:00:00:00:00),
pop_mpls(eth_type=0x800),4
Fixes: ed246cee09b9 ("net: core: move pop MPLS functionality from OvS to core helper")
Signed-off-by: Martin Varghese <martin.varghese@nokia.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-02 05:19:51 +00:00
|
|
|
err = skb_mpls_pop(skb, ethertype, skb->mac_len,
|
|
|
|
ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
|
2019-07-07 14:01:55 +00:00
|
|
|
if (err)
|
2014-10-06 12:05:13 +00:00
|
|
|
return err;
|
|
|
|
|
2019-12-21 03:20:46 +00:00
|
|
|
if (ethertype == htons(ETH_P_TEB))
|
|
|
|
key->mac_proto = MAC_PROTO_ETHERNET;
|
|
|
|
|
2014-11-06 14:55:14 +00:00
|
|
|
invalidate_flow_key(key);
|
2014-10-06 12:05:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
const __be32 *mpls_lse, const __be32 *mask)
|
2014-10-06 12:05:13 +00:00
|
|
|
{
|
2016-09-30 17:08:07 +00:00
|
|
|
struct mpls_shim_hdr *stack;
|
2015-02-05 21:40:49 +00:00
|
|
|
__be32 lse;
|
2014-10-06 12:05:13 +00:00
|
|
|
int err;
|
|
|
|
|
2020-12-03 09:46:06 +00:00
|
|
|
if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-09-30 17:08:07 +00:00
|
|
|
stack = mpls_hdr(skb);
|
|
|
|
lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
|
2019-07-07 14:01:56 +00:00
|
|
|
err = skb_mpls_update_lse(skb, lse);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2014-10-06 12:05:13 +00:00
|
|
|
|
2019-11-04 01:57:44 +00:00
|
|
|
flow_key->mpls.lse[0] = lse;
|
2014-10-06 12:05:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-06 14:55:14 +00:00
|
|
|
static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2014-11-19 13:05:02 +00:00
|
|
|
err = skb_vlan_pop(skb);
|
2016-09-07 16:56:59 +00:00
|
|
|
if (skb_vlan_tag_present(skb)) {
|
2014-11-19 13:05:02 +00:00
|
|
|
invalidate_flow_key(key);
|
2016-09-07 16:56:59 +00:00
|
|
|
} else {
|
|
|
|
key->eth.vlan.tci = 0;
|
|
|
|
key->eth.vlan.tpid = 0;
|
|
|
|
}
|
2014-11-19 13:05:02 +00:00
|
|
|
return err;
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
2014-11-06 14:55:14 +00:00
|
|
|
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
|
|
|
|
const struct ovs_action_push_vlan *vlan)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
2016-09-07 16:56:59 +00:00
|
|
|
if (skb_vlan_tag_present(skb)) {
|
2014-11-06 14:55:14 +00:00
|
|
|
invalidate_flow_key(key);
|
2016-09-07 16:56:59 +00:00
|
|
|
} else {
|
|
|
|
key->eth.vlan.tci = vlan->vlan_tci;
|
|
|
|
key->eth.vlan.tpid = vlan->vlan_tpid;
|
|
|
|
}
|
2014-11-19 13:05:02 +00:00
|
|
|
return skb_vlan_push(skb, vlan->vlan_tpid,
|
2018-11-08 17:44:50 +00:00
|
|
|
ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
/* 'src' is already properly masked. */
|
|
|
|
static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
|
|
|
|
{
|
|
|
|
u16 *dst = (u16 *)dst_;
|
|
|
|
const u16 *src = (const u16 *)src_;
|
|
|
|
const u16 *mask = (const u16 *)mask_;
|
|
|
|
|
2015-08-26 18:31:45 +00:00
|
|
|
OVS_SET_MASKED(dst[0], src[0], mask[0]);
|
|
|
|
OVS_SET_MASKED(dst[1], src[1], mask[1]);
|
|
|
|
OVS_SET_MASKED(dst[2], src[2], mask[2]);
|
2015-02-05 21:40:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
const struct ovs_key_ethernet *key,
|
|
|
|
const struct ovs_key_ethernet *mask)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
int err;
|
2015-02-05 21:40:49 +00:00
|
|
|
|
2014-11-19 13:05:01 +00:00
|
|
|
err = skb_ensure_writable(skb, ETH_HLEN);
|
2011-10-26 02:26:31 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
2013-06-13 18:11:44 +00:00
|
|
|
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
|
|
|
|
mask->eth_src);
|
|
|
|
ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
|
|
|
|
mask->eth_dst);
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2016-02-19 23:29:30 +00:00
|
|
|
skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
|
2013-06-13 18:11:44 +00:00
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
|
|
|
|
ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
|
2011-10-26 02:26:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-10 15:28:23 +00:00
|
|
|
/* pop_eth does not support VLAN packets as this action is never called
|
|
|
|
* for them.
|
|
|
|
*/
|
|
|
|
static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
|
|
|
|
{
|
2020-10-02 22:44:28 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = skb_eth_pop(skb);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2016-11-10 15:28:23 +00:00
|
|
|
|
|
|
|
/* safe right before invalidate_flow_key */
|
|
|
|
key->mac_proto = MAC_PROTO_NONE;
|
|
|
|
invalidate_flow_key(key);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
|
|
|
|
const struct ovs_action_push_eth *ethh)
|
|
|
|
{
|
2020-10-02 22:44:28 +00:00
|
|
|
int err;
|
2016-11-10 15:28:23 +00:00
|
|
|
|
2020-10-02 22:44:28 +00:00
|
|
|
err = skb_eth_push(skb, ethh->addresses.eth_dst,
|
|
|
|
ethh->addresses.eth_src);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2016-11-10 15:28:23 +00:00
|
|
|
|
|
|
|
/* safe right before invalidate_flow_key */
|
|
|
|
key->mac_proto = MAC_PROTO_ETHERNET;
|
|
|
|
invalidate_flow_key(key);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-09-21 19:42:35 +00:00
|
|
|
static noinline_for_stack int push_nsh(struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *key,
|
|
|
|
const struct nlattr *a)
|
2017-11-07 13:07:02 +00:00
|
|
|
{
|
2023-09-21 19:42:35 +00:00
|
|
|
u8 buffer[NSH_HDR_MAX_LEN];
|
|
|
|
struct nshhdr *nh = (struct nshhdr *)buffer;
|
2017-11-07 13:07:02 +00:00
|
|
|
int err;
|
|
|
|
|
2023-09-21 19:42:35 +00:00
|
|
|
err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2017-11-07 13:07:02 +00:00
|
|
|
err = nsh_push(skb, nh);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* safe right before invalidate_flow_key */
|
|
|
|
key->mac_proto = MAC_PROTO_NONE;
|
|
|
|
invalidate_flow_key(key);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = nsh_pop(skb);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* safe right before invalidate_flow_key */
|
|
|
|
if (skb->protocol == htons(ETH_P_TEB))
|
|
|
|
key->mac_proto = MAC_PROTO_ETHERNET;
|
|
|
|
else
|
|
|
|
key->mac_proto = MAC_PROTO_NONE;
|
|
|
|
invalidate_flow_key(key);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-03 16:56:54 +00:00
|
|
|
static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
|
|
|
|
__be32 addr, __be32 new_addr)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
int transport_len = skb->len - skb_transport_offset(skb);
|
|
|
|
|
2015-08-03 16:56:54 +00:00
|
|
|
if (nh->frag_off & htons(IP_OFFSET))
|
|
|
|
return;
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
if (nh->protocol == IPPROTO_TCP) {
|
|
|
|
if (likely(transport_len >= sizeof(struct tcphdr)))
|
|
|
|
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
|
2015-08-17 20:42:25 +00:00
|
|
|
addr, new_addr, true);
|
2011-10-26 02:26:31 +00:00
|
|
|
} else if (nh->protocol == IPPROTO_UDP) {
|
2012-03-06 23:05:46 +00:00
|
|
|
if (likely(transport_len >= sizeof(struct udphdr))) {
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
|
|
|
|
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
|
inet_proto_csum_replace4(&uh->check, skb,
|
2015-08-17 20:42:25 +00:00
|
|
|
addr, new_addr, true);
|
2012-03-06 23:05:46 +00:00
|
|
|
if (!uh->check)
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
}
|
|
|
|
}
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
2015-08-03 16:56:54 +00:00
|
|
|
}
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2015-08-03 16:56:54 +00:00
|
|
|
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
|
|
|
|
__be32 *addr, __be32 new_addr)
|
|
|
|
{
|
|
|
|
update_ip_l4_checksum(skb, nh, *addr, new_addr);
|
2011-10-26 02:26:31 +00:00
|
|
|
csum_replace4(&nh->check, *addr, new_addr);
|
2013-12-16 06:12:18 +00:00
|
|
|
skb_clear_hash(skb);
|
net: openvswitch: fix misuse of the cached connection on tuple changes
If packet headers changed, the cached nfct is no longer relevant
for the packet and attempt to re-use it leads to the incorrect packet
classification.
This issue is causing broken connectivity in OpenStack deployments
with OVS/OVN due to hairpin traffic being unexpectedly dropped.
The setup has datapath flows with several conntrack actions and tuple
changes between them:
actions:ct(commit,zone=8,mark=0/0x1,nat(src)),
set(eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:06)),
set(ipv4(src=172.18.2.10,dst=192.168.100.6,ttl=62)),
ct(zone=8),recirc(0x4)
After the first ct() action the packet headers are almost fully
re-written. The next ct() tries to re-use the existing nfct entry
and marks the packet as invalid, so it gets dropped later in the
pipeline.
Clearing the cached conntrack entry whenever packet tuple is changed
to avoid the issue.
The flow key should not be cleared though, because we should still
be able to match on the ct_state if the recirculation happens after
the tuple change but before the next ct() action.
Cc: stable@vger.kernel.org
Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action")
Reported-by: Frode Nordahl <frode.nordahl@canonical.com>
Link: https://mail.openvswitch.org/pipermail/ovs-discuss/2022-May/051829.html
Link: https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1967856
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Link: https://lore.kernel.org/r/20220606221140.488984-1-i.maximets@ovn.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-06-06 22:11:40 +00:00
|
|
|
ovs_ct_clear(skb, NULL);
|
2011-10-26 02:26:31 +00:00
|
|
|
*addr = new_addr;
|
|
|
|
}
|
|
|
|
|
2012-11-13 23:44:14 +00:00
|
|
|
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
|
|
|
|
__be32 addr[4], const __be32 new_addr[4])
|
|
|
|
{
|
|
|
|
int transport_len = skb->len - skb_transport_offset(skb);
|
|
|
|
|
2014-11-11 22:32:20 +00:00
|
|
|
if (l4_proto == NEXTHDR_TCP) {
|
2012-11-13 23:44:14 +00:00
|
|
|
if (likely(transport_len >= sizeof(struct tcphdr)))
|
|
|
|
inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
|
2015-08-17 20:42:25 +00:00
|
|
|
addr, new_addr, true);
|
2014-11-11 22:32:20 +00:00
|
|
|
} else if (l4_proto == NEXTHDR_UDP) {
|
2012-11-13 23:44:14 +00:00
|
|
|
if (likely(transport_len >= sizeof(struct udphdr))) {
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
|
|
|
|
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
|
inet_proto_csum_replace16(&uh->check, skb,
|
2015-08-17 20:42:25 +00:00
|
|
|
addr, new_addr, true);
|
2012-11-13 23:44:14 +00:00
|
|
|
if (!uh->check)
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
}
|
|
|
|
}
|
2014-11-11 22:32:20 +00:00
|
|
|
} else if (l4_proto == NEXTHDR_ICMP) {
|
|
|
|
if (likely(transport_len >= sizeof(struct icmp6hdr)))
|
|
|
|
inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
|
2015-08-17 20:42:25 +00:00
|
|
|
skb, addr, new_addr, true);
|
2012-11-13 23:44:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
|
|
|
|
const __be32 mask[4], __be32 masked[4])
|
|
|
|
{
|
2015-08-26 18:31:45 +00:00
|
|
|
masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
|
|
|
|
masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
|
|
|
|
masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
|
|
|
|
masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
|
2015-02-05 21:40:49 +00:00
|
|
|
}
|
|
|
|
|
2012-11-13 23:44:14 +00:00
|
|
|
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
|
|
|
|
__be32 addr[4], const __be32 new_addr[4],
|
|
|
|
bool recalculate_csum)
|
|
|
|
{
|
|
|
|
if (recalculate_csum)
|
|
|
|
update_ipv6_checksum(skb, l4_proto, addr, new_addr);
|
|
|
|
|
2013-12-16 06:12:18 +00:00
|
|
|
skb_clear_hash(skb);
|
net: openvswitch: fix misuse of the cached connection on tuple changes
If packet headers changed, the cached nfct is no longer relevant
for the packet and attempt to re-use it leads to the incorrect packet
classification.
This issue is causing broken connectivity in OpenStack deployments
with OVS/OVN due to hairpin traffic being unexpectedly dropped.
The setup has datapath flows with several conntrack actions and tuple
changes between them:
actions:ct(commit,zone=8,mark=0/0x1,nat(src)),
set(eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:06)),
set(ipv4(src=172.18.2.10,dst=192.168.100.6,ttl=62)),
ct(zone=8),recirc(0x4)
After the first ct() action the packet headers are almost fully
re-written. The next ct() tries to re-use the existing nfct entry
and marks the packet as invalid, so it gets dropped later in the
pipeline.
Clearing the cached conntrack entry whenever packet tuple is changed
to avoid the issue.
The flow key should not be cleared though, because we should still
be able to match on the ct_state if the recirculation happens after
the tuple change but before the next ct() action.
Cc: stable@vger.kernel.org
Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action")
Reported-by: Frode Nordahl <frode.nordahl@canonical.com>
Link: https://mail.openvswitch.org/pipermail/ovs-discuss/2022-May/051829.html
Link: https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1967856
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Link: https://lore.kernel.org/r/20220606221140.488984-1-i.maximets@ovn.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-06-06 22:11:40 +00:00
|
|
|
ovs_ct_clear(skb, NULL);
|
2012-11-13 23:44:14 +00:00
|
|
|
memcpy(addr, new_addr, sizeof(__be32[4]));
|
|
|
|
}
|
|
|
|
|
2022-02-23 16:34:16 +00:00
|
|
|
static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
|
2012-11-13 23:44:14 +00:00
|
|
|
{
|
2022-02-23 16:34:16 +00:00
|
|
|
u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
|
|
|
|
|
|
|
|
ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
|
|
csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
|
|
|
|
(__force __wsum)(ipv6_tclass << 12));
|
|
|
|
|
|
|
|
ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
|
|
|
|
{
|
|
|
|
u32 ofl;
|
|
|
|
|
|
|
|
ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
|
|
|
|
fl = OVS_MASKED(ofl, fl, mask);
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
/* Bits 21-24 are always unmasked, so this retains their values. */
|
2022-02-23 16:34:16 +00:00
|
|
|
nh->flow_lbl[0] = (u8)(fl >> 16);
|
|
|
|
nh->flow_lbl[1] = (u8)(fl >> 8);
|
|
|
|
nh->flow_lbl[2] = (u8)fl;
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
|
|
csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
|
|
|
|
{
|
|
|
|
new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
|
|
csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
|
|
|
|
(__force __wsum)(new_ttl << 8));
|
|
|
|
nh->hop_limit = new_ttl;
|
2012-11-13 23:44:14 +00:00
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
|
|
|
|
u8 mask)
|
2012-11-13 23:44:14 +00:00
|
|
|
{
|
2015-08-26 18:31:45 +00:00
|
|
|
new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
|
2012-11-13 23:44:14 +00:00
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
|
|
|
|
nh->ttl = new_ttl;
|
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
const struct ovs_key_ipv4 *key,
|
|
|
|
const struct ovs_key_ipv4 *mask)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
struct iphdr *nh;
|
2015-02-05 21:40:49 +00:00
|
|
|
__be32 new_addr;
|
2011-10-26 02:26:31 +00:00
|
|
|
int err;
|
|
|
|
|
2014-11-19 13:05:01 +00:00
|
|
|
err = skb_ensure_writable(skb, skb_network_offset(skb) +
|
|
|
|
sizeof(struct iphdr));
|
2011-10-26 02:26:31 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
nh = ip_hdr(skb);
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
/* Setting an IP addresses is typically only a side effect of
|
|
|
|
* matching on them in the current userspace implementation, so it
|
|
|
|
* makes sense to check if the value actually changed.
|
|
|
|
*/
|
|
|
|
if (mask->ipv4_src) {
|
2015-08-26 18:31:45 +00:00
|
|
|
new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
if (unlikely(new_addr != nh->saddr)) {
|
|
|
|
set_ip_addr(skb, nh, &nh->saddr, new_addr);
|
|
|
|
flow_key->ipv4.addr.src = new_addr;
|
|
|
|
}
|
2014-11-06 14:55:14 +00:00
|
|
|
}
|
2015-02-05 21:40:49 +00:00
|
|
|
if (mask->ipv4_dst) {
|
2015-08-26 18:31:45 +00:00
|
|
|
new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
if (unlikely(new_addr != nh->daddr)) {
|
|
|
|
set_ip_addr(skb, nh, &nh->daddr, new_addr);
|
|
|
|
flow_key->ipv4.addr.dst = new_addr;
|
|
|
|
}
|
2014-11-06 14:55:14 +00:00
|
|
|
}
|
2015-02-05 21:40:49 +00:00
|
|
|
if (mask->ipv4_tos) {
|
|
|
|
ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
|
|
|
|
flow_key->ip.tos = nh->tos;
|
|
|
|
}
|
|
|
|
if (mask->ipv4_ttl) {
|
|
|
|
set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
|
|
|
|
flow_key->ip.ttl = nh->ttl;
|
2014-11-06 14:55:14 +00:00
|
|
|
}
|
2011-10-26 02:26:31 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static bool is_ipv6_mask_nonzero(const __be32 addr[4])
|
|
|
|
{
|
|
|
|
return !!(addr[0] | addr[1] | addr[2] | addr[3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
const struct ovs_key_ipv6 *key,
|
|
|
|
const struct ovs_key_ipv6 *mask)
|
2012-11-13 23:44:14 +00:00
|
|
|
{
|
|
|
|
struct ipv6hdr *nh;
|
|
|
|
int err;
|
|
|
|
|
2014-11-19 13:05:01 +00:00
|
|
|
err = skb_ensure_writable(skb, skb_network_offset(skb) +
|
|
|
|
sizeof(struct ipv6hdr));
|
2012-11-13 23:44:14 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
nh = ipv6_hdr(skb);
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
/* Setting an IP addresses is typically only a side effect of
|
|
|
|
* matching on them in the current userspace implementation, so it
|
|
|
|
* makes sense to check if the value actually changed.
|
|
|
|
*/
|
|
|
|
if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
|
|
|
|
__be32 *saddr = (__be32 *)&nh->saddr;
|
|
|
|
__be32 masked[4];
|
|
|
|
|
|
|
|
mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
|
|
|
|
|
|
|
|
if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
|
2016-04-21 01:49:15 +00:00
|
|
|
set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
|
2015-02-05 21:40:49 +00:00
|
|
|
true);
|
|
|
|
memcpy(&flow_key->ipv6.addr.src, masked,
|
|
|
|
sizeof(flow_key->ipv6.addr.src));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
|
2012-11-13 23:44:14 +00:00
|
|
|
unsigned int offset = 0;
|
|
|
|
int flags = IP6_FH_F_SKIP_RH;
|
|
|
|
bool recalc_csum = true;
|
2015-02-05 21:40:49 +00:00
|
|
|
__be32 *daddr = (__be32 *)&nh->daddr;
|
|
|
|
__be32 masked[4];
|
|
|
|
|
|
|
|
mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
|
|
|
|
|
|
|
|
if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
|
|
|
|
if (ipv6_ext_hdr(nh->nexthdr))
|
|
|
|
recalc_csum = (ipv6_find_hdr(skb, &offset,
|
|
|
|
NEXTHDR_ROUTING,
|
|
|
|
NULL, &flags)
|
|
|
|
!= NEXTHDR_ROUTING);
|
|
|
|
|
2016-04-21 01:49:15 +00:00
|
|
|
set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
|
2015-02-05 21:40:49 +00:00
|
|
|
recalc_csum);
|
|
|
|
memcpy(&flow_key->ipv6.addr.dst, masked,
|
|
|
|
sizeof(flow_key->ipv6.addr.dst));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (mask->ipv6_tclass) {
|
2022-02-23 16:34:16 +00:00
|
|
|
set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
|
2015-02-05 21:40:49 +00:00
|
|
|
flow_key->ip.tos = ipv6_get_dsfield(nh);
|
|
|
|
}
|
|
|
|
if (mask->ipv6_label) {
|
2022-02-23 16:34:16 +00:00
|
|
|
set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
|
2015-02-05 21:40:49 +00:00
|
|
|
ntohl(mask->ipv6_label));
|
|
|
|
flow_key->ipv6.label =
|
|
|
|
*(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
|
|
|
|
}
|
|
|
|
if (mask->ipv6_hlimit) {
|
2022-02-23 16:34:16 +00:00
|
|
|
set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
|
2015-02-05 21:40:49 +00:00
|
|
|
flow_key->ip.ttl = nh->hop_limit;
|
2012-11-13 23:44:14 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-07 13:07:02 +00:00
|
|
|
static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
const struct nlattr *a)
|
|
|
|
{
|
|
|
|
struct nshhdr *nh;
|
|
|
|
size_t length;
|
|
|
|
int err;
|
|
|
|
u8 flags;
|
|
|
|
u8 ttl;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
struct ovs_key_nsh key;
|
|
|
|
struct ovs_key_nsh mask;
|
|
|
|
|
|
|
|
err = nsh_key_from_nlattr(a, &key, &mask);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* Make sure the NSH base header is there */
|
|
|
|
if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nh = nsh_hdr(skb);
|
|
|
|
length = nsh_hdr_len(nh);
|
|
|
|
|
|
|
|
/* Make sure the whole NSH header is there */
|
|
|
|
err = skb_ensure_writable(skb, skb_network_offset(skb) +
|
|
|
|
length);
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
nh = nsh_hdr(skb);
|
|
|
|
skb_postpull_rcsum(skb, nh, length);
|
|
|
|
flags = nsh_get_flags(nh);
|
|
|
|
flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
|
|
|
|
flow_key->nsh.base.flags = flags;
|
|
|
|
ttl = nsh_get_ttl(nh);
|
|
|
|
ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
|
|
|
|
flow_key->nsh.base.ttl = ttl;
|
|
|
|
nsh_set_flags_and_ttl(nh, flags, ttl);
|
|
|
|
nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
|
|
|
|
mask.base.path_hdr);
|
|
|
|
flow_key->nsh.base.path_hdr = nh->path_hdr;
|
|
|
|
switch (nh->mdtype) {
|
|
|
|
case NSH_M_TYPE1:
|
|
|
|
for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
|
|
|
|
nh->md1.context[i] =
|
|
|
|
OVS_MASKED(nh->md1.context[i], key.context[i],
|
|
|
|
mask.context[i]);
|
|
|
|
}
|
|
|
|
memcpy(flow_key->nsh.context, nh->md1.context,
|
|
|
|
sizeof(nh->md1.context));
|
|
|
|
break;
|
|
|
|
case NSH_M_TYPE2:
|
|
|
|
memset(flow_key->nsh.context, 0,
|
|
|
|
sizeof(flow_key->nsh.context));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
skb_postpush_rcsum(skb, nh, length);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-19 13:05:01 +00:00
|
|
|
/* Must follow skb_ensure_writable() since that can move the skb data. */
|
2011-10-26 02:26:31 +00:00
|
|
|
static void set_tp_port(struct sk_buff *skb, __be16 *port,
|
2015-02-05 21:40:49 +00:00
|
|
|
__be16 new_port, __sum16 *check)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
net: openvswitch: fix misuse of the cached connection on tuple changes
If packet headers changed, the cached nfct is no longer relevant
for the packet and attempt to re-use it leads to the incorrect packet
classification.
This issue is causing broken connectivity in OpenStack deployments
with OVS/OVN due to hairpin traffic being unexpectedly dropped.
The setup has datapath flows with several conntrack actions and tuple
changes between them:
actions:ct(commit,zone=8,mark=0/0x1,nat(src)),
set(eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:06)),
set(ipv4(src=172.18.2.10,dst=192.168.100.6,ttl=62)),
ct(zone=8),recirc(0x4)
After the first ct() action the packet headers are almost fully
re-written. The next ct() tries to re-use the existing nfct entry
and marks the packet as invalid, so it gets dropped later in the
pipeline.
Clearing the cached conntrack entry whenever packet tuple is changed
to avoid the issue.
The flow key should not be cleared though, because we should still
be able to match on the ct_state if the recirculation happens after
the tuple change but before the next ct() action.
Cc: stable@vger.kernel.org
Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action")
Reported-by: Frode Nordahl <frode.nordahl@canonical.com>
Link: https://mail.openvswitch.org/pipermail/ovs-discuss/2022-May/051829.html
Link: https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1967856
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Link: https://lore.kernel.org/r/20220606221140.488984-1-i.maximets@ovn.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-06-06 22:11:40 +00:00
|
|
|
ovs_ct_clear(skb, NULL);
|
2015-08-17 20:42:25 +00:00
|
|
|
inet_proto_csum_replace2(check, skb, *port, new_port, false);
|
2011-10-26 02:26:31 +00:00
|
|
|
*port = new_port;
|
2012-03-06 23:05:46 +00:00
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
const struct ovs_key_udp *key,
|
|
|
|
const struct ovs_key_udp *mask)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
struct udphdr *uh;
|
2015-02-05 21:40:49 +00:00
|
|
|
__be16 src, dst;
|
2011-10-26 02:26:31 +00:00
|
|
|
int err;
|
|
|
|
|
2014-11-19 13:05:01 +00:00
|
|
|
err = skb_ensure_writable(skb, skb_transport_offset(skb) +
|
|
|
|
sizeof(struct udphdr));
|
2011-10-26 02:26:31 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
uh = udp_hdr(skb);
|
2015-02-05 21:40:49 +00:00
|
|
|
/* Either of the masks is non-zero, so do not bother checking them. */
|
2015-08-26 18:31:45 +00:00
|
|
|
src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
|
|
|
|
dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
|
|
|
|
if (likely(src != uh->source)) {
|
|
|
|
set_tp_port(skb, &uh->source, src, &uh->check);
|
|
|
|
flow_key->tp.src = src;
|
|
|
|
}
|
|
|
|
if (likely(dst != uh->dest)) {
|
|
|
|
set_tp_port(skb, &uh->dest, dst, &uh->check);
|
|
|
|
flow_key->tp.dst = dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!uh->check))
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
} else {
|
|
|
|
uh->source = src;
|
|
|
|
uh->dest = dst;
|
|
|
|
flow_key->tp.src = src;
|
|
|
|
flow_key->tp.dst = dst;
|
net: openvswitch: fix misuse of the cached connection on tuple changes
If packet headers changed, the cached nfct is no longer relevant
for the packet and attempt to re-use it leads to the incorrect packet
classification.
This issue is causing broken connectivity in OpenStack deployments
with OVS/OVN due to hairpin traffic being unexpectedly dropped.
The setup has datapath flows with several conntrack actions and tuple
changes between them:
actions:ct(commit,zone=8,mark=0/0x1,nat(src)),
set(eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:06)),
set(ipv4(src=172.18.2.10,dst=192.168.100.6,ttl=62)),
ct(zone=8),recirc(0x4)
After the first ct() action the packet headers are almost fully
re-written. The next ct() tries to re-use the existing nfct entry
and marks the packet as invalid, so it gets dropped later in the
pipeline.
Clearing the cached conntrack entry whenever packet tuple is changed
to avoid the issue.
The flow key should not be cleared though, because we should still
be able to match on the ct_state if the recirculation happens after
the tuple change but before the next ct() action.
Cc: stable@vger.kernel.org
Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action")
Reported-by: Frode Nordahl <frode.nordahl@canonical.com>
Link: https://mail.openvswitch.org/pipermail/ovs-discuss/2022-May/051829.html
Link: https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1967856
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Link: https://lore.kernel.org/r/20220606221140.488984-1-i.maximets@ovn.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-06-06 22:11:40 +00:00
|
|
|
ovs_ct_clear(skb, NULL);
|
2014-11-06 14:55:14 +00:00
|
|
|
}
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
skb_clear_hash(skb);
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
const struct ovs_key_tcp *key,
|
|
|
|
const struct ovs_key_tcp *mask)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
struct tcphdr *th;
|
2015-02-05 21:40:49 +00:00
|
|
|
__be16 src, dst;
|
2011-10-26 02:26:31 +00:00
|
|
|
int err;
|
|
|
|
|
2014-11-19 13:05:01 +00:00
|
|
|
err = skb_ensure_writable(skb, skb_transport_offset(skb) +
|
|
|
|
sizeof(struct tcphdr));
|
2011-10-26 02:26:31 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
th = tcp_hdr(skb);
|
2015-08-26 18:31:45 +00:00
|
|
|
src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
|
2015-02-05 21:40:49 +00:00
|
|
|
if (likely(src != th->source)) {
|
|
|
|
set_tp_port(skb, &th->source, src, &th->check);
|
|
|
|
flow_key->tp.src = src;
|
2014-11-06 14:55:14 +00:00
|
|
|
}
|
2015-08-26 18:31:45 +00:00
|
|
|
dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
|
2015-02-05 21:40:49 +00:00
|
|
|
if (likely(dst != th->dest)) {
|
|
|
|
set_tp_port(skb, &th->dest, dst, &th->check);
|
|
|
|
flow_key->tp.dst = dst;
|
2014-11-06 14:55:14 +00:00
|
|
|
}
|
2015-02-05 21:40:49 +00:00
|
|
|
skb_clear_hash(skb);
|
2011-10-26 02:26:31 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|
|
|
const struct ovs_key_sctp *key,
|
|
|
|
const struct ovs_key_sctp *mask)
|
2013-08-22 19:30:48 +00:00
|
|
|
{
|
2015-02-05 21:40:49 +00:00
|
|
|
unsigned int sctphoff = skb_transport_offset(skb);
|
2013-08-22 19:30:48 +00:00
|
|
|
struct sctphdr *sh;
|
2015-02-05 21:40:49 +00:00
|
|
|
__le32 old_correct_csum, new_csum, old_csum;
|
2013-08-22 19:30:48 +00:00
|
|
|
int err;
|
|
|
|
|
2014-11-19 13:05:01 +00:00
|
|
|
err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
|
2013-08-22 19:30:48 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
sh = sctp_hdr(skb);
|
2015-02-05 21:40:49 +00:00
|
|
|
old_csum = sh->checksum;
|
|
|
|
old_correct_csum = sctp_compute_cksum(skb, sctphoff);
|
2013-08-22 19:30:48 +00:00
|
|
|
|
2015-08-26 18:31:45 +00:00
|
|
|
sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
|
|
|
|
sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
|
2013-08-22 19:30:48 +00:00
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
new_csum = sctp_compute_cksum(skb, sctphoff);
|
2013-08-22 19:30:48 +00:00
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
/* Carry any checksum errors through. */
|
|
|
|
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
|
2013-08-22 19:30:48 +00:00
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
skb_clear_hash(skb);
|
net: openvswitch: fix misuse of the cached connection on tuple changes
If packet headers changed, the cached nfct is no longer relevant
for the packet and attempt to re-use it leads to the incorrect packet
classification.
This issue is causing broken connectivity in OpenStack deployments
with OVS/OVN due to hairpin traffic being unexpectedly dropped.
The setup has datapath flows with several conntrack actions and tuple
changes between them:
actions:ct(commit,zone=8,mark=0/0x1,nat(src)),
set(eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:06)),
set(ipv4(src=172.18.2.10,dst=192.168.100.6,ttl=62)),
ct(zone=8),recirc(0x4)
After the first ct() action the packet headers are almost fully
re-written. The next ct() tries to re-use the existing nfct entry
and marks the packet as invalid, so it gets dropped later in the
pipeline.
Clearing the cached conntrack entry whenever packet tuple is changed
to avoid the issue.
The flow key should not be cleared though, because we should still
be able to match on the ct_state if the recirculation happens after
the tuple change but before the next ct() action.
Cc: stable@vger.kernel.org
Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action")
Reported-by: Frode Nordahl <frode.nordahl@canonical.com>
Link: https://mail.openvswitch.org/pipermail/ovs-discuss/2022-May/051829.html
Link: https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1967856
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Link: https://lore.kernel.org/r/20220606221140.488984-1-i.maximets@ovn.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-06-06 22:11:40 +00:00
|
|
|
ovs_ct_clear(skb, NULL);
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
flow_key->tp.src = sh->source;
|
|
|
|
flow_key->tp.dst = sh->dest;
|
2013-08-22 19:30:48 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-01 12:26:12 +00:00
|
|
|
static int ovs_vport_output(struct net *net, struct sock *sk,
|
|
|
|
struct sk_buff *skb)
|
2015-08-26 18:31:48 +00:00
|
|
|
{
|
|
|
|
struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
|
|
|
|
struct vport *vport = data->vport;
|
|
|
|
|
|
|
|
if (skb_cow_head(skb, data->l2_len) < 0) {
|
2023-08-11 14:12:52 +00:00
|
|
|
kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
|
2015-08-26 18:31:48 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
__skb_dst_copy(skb, data->dst);
|
|
|
|
*OVS_CB(skb) = data->cb;
|
|
|
|
skb->inner_protocol = data->inner_protocol;
|
2018-11-08 17:44:50 +00:00
|
|
|
if (data->vlan_tci & VLAN_CFI_MASK)
|
|
|
|
__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
|
|
|
|
else
|
|
|
|
__vlan_hwaccel_clear_tag(skb);
|
2015-08-26 18:31:48 +00:00
|
|
|
|
|
|
|
/* Reconstruct the MAC header. */
|
|
|
|
skb_push(skb, data->l2_len);
|
|
|
|
memcpy(skb->data, &data->l2_data, data->l2_len);
|
2016-02-19 23:29:30 +00:00
|
|
|
skb_postpush_rcsum(skb, skb->data, data->l2_len);
|
2015-08-26 18:31:48 +00:00
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
2016-10-05 13:01:57 +00:00
|
|
|
if (eth_p_mpls(skb->protocol)) {
|
|
|
|
skb->inner_network_header = skb->network_header;
|
|
|
|
skb_set_network_header(skb, data->network_offset);
|
|
|
|
skb_reset_mac_len(skb);
|
|
|
|
}
|
|
|
|
|
2016-11-10 15:28:19 +00:00
|
|
|
ovs_vport_send(vport, skb, data->mac_proto);
|
2015-08-26 18:31:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
ovs_dst_get_mtu(const struct dst_entry *dst)
|
|
|
|
{
|
|
|
|
return dst->dev->mtu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dst_ops ovs_dst_ops = {
|
|
|
|
.family = AF_UNSPEC,
|
|
|
|
.mtu = ovs_dst_get_mtu,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
|
|
|
|
* ovs_vport_output(), which is called once per fragmented packet.
|
|
|
|
*/
|
2016-10-05 13:01:57 +00:00
|
|
|
static void prepare_frag(struct vport *vport, struct sk_buff *skb,
|
2016-11-10 15:28:19 +00:00
|
|
|
u16 orig_network_offset, u8 mac_proto)
|
2015-08-26 18:31:48 +00:00
|
|
|
{
|
|
|
|
unsigned int hlen = skb_network_offset(skb);
|
|
|
|
struct ovs_frag_data *data;
|
|
|
|
|
|
|
|
data = this_cpu_ptr(&ovs_frag_data_storage);
|
|
|
|
data->dst = skb->_skb_refdst;
|
|
|
|
data->vport = vport;
|
|
|
|
data->cb = *OVS_CB(skb);
|
|
|
|
data->inner_protocol = skb->inner_protocol;
|
2016-10-05 13:01:57 +00:00
|
|
|
data->network_offset = orig_network_offset;
|
2018-11-08 17:44:50 +00:00
|
|
|
if (skb_vlan_tag_present(skb))
|
|
|
|
data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
|
|
|
|
else
|
|
|
|
data->vlan_tci = 0;
|
2015-08-26 18:31:48 +00:00
|
|
|
data->vlan_proto = skb->vlan_proto;
|
2016-11-10 15:28:19 +00:00
|
|
|
data->mac_proto = mac_proto;
|
2015-08-26 18:31:48 +00:00
|
|
|
data->l2_len = hlen;
|
|
|
|
memcpy(&data->l2_data, skb->data, hlen);
|
|
|
|
|
|
|
|
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
|
|
|
skb_pull(skb, hlen);
|
|
|
|
}
|
|
|
|
|
2015-09-15 01:10:28 +00:00
|
|
|
static void ovs_fragment(struct net *net, struct vport *vport,
|
2016-11-10 15:28:19 +00:00
|
|
|
struct sk_buff *skb, u16 mru,
|
|
|
|
struct sw_flow_key *key)
|
2015-08-26 18:31:48 +00:00
|
|
|
{
|
2023-08-11 14:12:52 +00:00
|
|
|
enum ovs_drop_reason reason;
|
2016-10-05 13:01:57 +00:00
|
|
|
u16 orig_network_offset = 0;
|
|
|
|
|
|
|
|
if (eth_p_mpls(skb->protocol)) {
|
|
|
|
orig_network_offset = skb_network_offset(skb);
|
|
|
|
skb->network_header = skb->inner_network_header;
|
|
|
|
}
|
|
|
|
|
2015-08-26 18:31:48 +00:00
|
|
|
if (skb_network_offset(skb) > MAX_L2_LEN) {
|
|
|
|
OVS_NLERR(1, "L2 header too long to fragment");
|
2023-08-11 14:12:52 +00:00
|
|
|
reason = OVS_DROP_FRAG_L2_TOO_LONG;
|
2015-10-06 17:59:57 +00:00
|
|
|
goto err;
|
2015-08-26 18:31:48 +00:00
|
|
|
}
|
|
|
|
|
2016-11-10 15:28:19 +00:00
|
|
|
if (key->eth.type == htons(ETH_P_IP)) {
|
2021-04-28 13:23:07 +00:00
|
|
|
struct rtable ovs_rt = { 0 };
|
2015-08-26 18:31:48 +00:00
|
|
|
unsigned long orig_dst;
|
|
|
|
|
2016-11-10 15:28:19 +00:00
|
|
|
prepare_frag(vport, skb, orig_network_offset,
|
|
|
|
ovs_key_mac_proto(key));
|
2023-09-11 12:50:45 +00:00
|
|
|
dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
|
2015-08-26 18:31:48 +00:00
|
|
|
DST_OBSOLETE_NONE, DST_NOCOUNT);
|
2021-04-28 13:23:07 +00:00
|
|
|
ovs_rt.dst.dev = vport->dev;
|
2015-08-26 18:31:48 +00:00
|
|
|
|
|
|
|
orig_dst = skb->_skb_refdst;
|
2021-04-28 13:23:07 +00:00
|
|
|
skb_dst_set_noref(skb, &ovs_rt.dst);
|
2015-08-26 18:31:48 +00:00
|
|
|
IPCB(skb)->frag_max_size = mru;
|
|
|
|
|
2015-06-13 02:55:31 +00:00
|
|
|
ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
|
2015-08-26 18:31:48 +00:00
|
|
|
refdst_drop(orig_dst);
|
2016-11-10 15:28:19 +00:00
|
|
|
} else if (key->eth.type == htons(ETH_P_IPV6)) {
|
2015-08-26 18:31:48 +00:00
|
|
|
unsigned long orig_dst;
|
|
|
|
struct rt6_info ovs_rt;
|
|
|
|
|
2016-11-10 15:28:19 +00:00
|
|
|
prepare_frag(vport, skb, orig_network_offset,
|
|
|
|
ovs_key_mac_proto(key));
|
2015-08-26 18:31:48 +00:00
|
|
|
memset(&ovs_rt, 0, sizeof(ovs_rt));
|
2023-09-11 12:50:45 +00:00
|
|
|
dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
|
2015-08-26 18:31:48 +00:00
|
|
|
DST_OBSOLETE_NONE, DST_NOCOUNT);
|
|
|
|
ovs_rt.dst.dev = vport->dev;
|
|
|
|
|
|
|
|
orig_dst = skb->_skb_refdst;
|
|
|
|
skb_dst_set_noref(skb, &ovs_rt.dst);
|
|
|
|
IP6CB(skb)->frag_max_size = mru;
|
|
|
|
|
2020-08-28 15:14:32 +00:00
|
|
|
ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
|
2015-08-26 18:31:48 +00:00
|
|
|
refdst_drop(orig_dst);
|
|
|
|
} else {
|
|
|
|
WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
|
2016-11-10 15:28:19 +00:00
|
|
|
ovs_vport_name(vport), ntohs(key->eth.type), mru,
|
2015-08-26 18:31:48 +00:00
|
|
|
vport->dev->mtu);
|
2023-08-11 14:12:52 +00:00
|
|
|
reason = OVS_DROP_FRAG_INVALID_PROTO;
|
2015-10-06 17:59:57 +00:00
|
|
|
goto err;
|
2015-08-26 18:31:48 +00:00
|
|
|
}
|
2015-10-06 17:59:57 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
err:
|
2023-08-11 14:12:52 +00:00
|
|
|
ovs_kfree_skb_reason(skb, reason);
|
2015-08-26 18:31:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
|
|
|
|
struct sw_flow_key *key)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
2014-09-08 07:35:02 +00:00
|
|
|
struct vport *vport = ovs_vport_rcu(dp, out_port);
|
2011-10-26 02:26:31 +00:00
|
|
|
|
net: openvswitch: fix race on port output
assume the following setup on a single machine:
1. An openvswitch instance with one bridge and default flows
2. two network namespaces "server" and "client"
3. two ovs interfaces "server" and "client" on the bridge
4. for each ovs interface a veth pair with a matching name and 32 rx and
tx queues
5. move the ends of the veth pairs to the respective network namespaces
6. assign ip addresses to each of the veth ends in the namespaces (needs
to be the same subnet)
7. start some http server on the server network namespace
8. test if a client in the client namespace can reach the http server
when following the actions below the host has a chance of getting a cpu
stuck in a infinite loop:
1. send a large amount of parallel requests to the http server (around
3000 curls should work)
2. in parallel delete the network namespace (do not delete interfaces or
stop the server, just kill the namespace)
there is a low chance that this will cause the below kernel cpu stuck
message. If this does not happen just retry.
Below there is also the output of bpftrace for the functions mentioned
in the output.
The series of events happening here is:
1. the network namespace is deleted calling
`unregister_netdevice_many_notify` somewhere in the process
2. this sets first `NETREG_UNREGISTERING` on both ends of the veth and
then runs `synchronize_net`
3. it then calls `call_netdevice_notifiers` with `NETDEV_UNREGISTER`
4. this is then handled by `dp_device_event` which calls
`ovs_netdev_detach_dev` (if a vport is found, which is the case for
the veth interface attached to ovs)
5. this removes the rx_handlers of the device but does not prevent
packages to be sent to the device
6. `dp_device_event` then queues the vport deletion to work in
background as a ovs_lock is needed that we do not hold in the
unregistration path
7. `unregister_netdevice_many_notify` continues to call
`netdev_unregister_kobject` which sets `real_num_tx_queues` to 0
8. port deletion continues (but details are not relevant for this issue)
9. at some future point the background task deletes the vport
If after 7. but before 9. a packet is send to the ovs vport (which is
not deleted at this point in time) which forwards it to the
`dev_queue_xmit` flow even though the device is unregistering.
In `skb_tx_hash` (which is called in the `dev_queue_xmit`) path there is
a while loop (if the packet has a rx_queue recorded) that is infinite if
`dev->real_num_tx_queues` is zero.
To prevent this from happening we update `do_output` to handle devices
without carrier the same as if the device is not found (which would
be the code path after 9. is done).
Additionally we now produce a warning in `skb_tx_hash` if we will hit
the infinite loop.
bpftrace (first word is function name):
__dev_queue_xmit server: real_num_tx_queues: 1, cpu: 2, pid: 28024, tid: 28024, skb_addr: 0xffff9edb6f207000, reg_state: 1
netdev_core_pick_tx server: addr: 0xffff9f0a46d4a000 real_num_tx_queues: 1, cpu: 2, pid: 28024, tid: 28024, skb_addr: 0xffff9edb6f207000, reg_state: 1
dp_device_event server: real_num_tx_queues: 1 cpu 9, pid: 21024, tid: 21024, event 2, reg_state: 1
synchronize_rcu_expedited: cpu 9, pid: 21024, tid: 21024
synchronize_rcu_expedited: cpu 9, pid: 21024, tid: 21024
synchronize_rcu_expedited: cpu 9, pid: 21024, tid: 21024
synchronize_rcu_expedited: cpu 9, pid: 21024, tid: 21024
dp_device_event server: real_num_tx_queues: 1 cpu 9, pid: 21024, tid: 21024, event 6, reg_state: 2
ovs_netdev_detach_dev server: real_num_tx_queues: 1 cpu 9, pid: 21024, tid: 21024, reg_state: 2
netdev_rx_handler_unregister server: real_num_tx_queues: 1, cpu: 9, pid: 21024, tid: 21024, reg_state: 2
synchronize_rcu_expedited: cpu 9, pid: 21024, tid: 21024
netdev_rx_handler_unregister ret server: real_num_tx_queues: 1, cpu: 9, pid: 21024, tid: 21024, reg_state: 2
dp_device_event server: real_num_tx_queues: 1 cpu 9, pid: 21024, tid: 21024, event 27, reg_state: 2
dp_device_event server: real_num_tx_queues: 1 cpu 9, pid: 21024, tid: 21024, event 22, reg_state: 2
dp_device_event server: real_num_tx_queues: 1 cpu 9, pid: 21024, tid: 21024, event 18, reg_state: 2
netdev_unregister_kobject: real_num_tx_queues: 1, cpu: 9, pid: 21024, tid: 21024
synchronize_rcu_expedited: cpu 9, pid: 21024, tid: 21024
ovs_vport_send server: real_num_tx_queues: 0, cpu: 2, pid: 28024, tid: 28024, skb_addr: 0xffff9edb6f207000, reg_state: 2
__dev_queue_xmit server: real_num_tx_queues: 0, cpu: 2, pid: 28024, tid: 28024, skb_addr: 0xffff9edb6f207000, reg_state: 2
netdev_core_pick_tx server: addr: 0xffff9f0a46d4a000 real_num_tx_queues: 0, cpu: 2, pid: 28024, tid: 28024, skb_addr: 0xffff9edb6f207000, reg_state: 2
broken device server: real_num_tx_queues: 0, cpu: 2, pid: 28024, tid: 28024
ovs_dp_detach_port server: real_num_tx_queues: 0 cpu 9, pid: 9124, tid: 9124, reg_state: 2
synchronize_rcu_expedited: cpu 9, pid: 33604, tid: 33604
stuck message:
watchdog: BUG: soft lockup - CPU#5 stuck for 26s! [curl:1929279]
Modules linked in: veth pktgen bridge stp llc ip_set_hash_net nft_counter xt_set nft_compat nf_tables ip_set_hash_ip ip_set nfnetlink_cttimeout nfnetlink openvswitch nsh nf_conncount nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 tls binfmt_misc nls_iso8859_1 input_leds joydev serio_raw dm_multipath scsi_dh_rdac scsi_dh_emc scsi_dh_alua sch_fq_codel drm efi_pstore virtio_rng ip_tables x_tables autofs4 btrfs blake2b_generic zstd_compress raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx xor raid6_pq libcrc32c raid1 raid0 multipath linear hid_generic usbhid hid crct10dif_pclmul crc32_pclmul ghash_clmulni_intel aesni_intel virtio_net ahci net_failover crypto_simd cryptd psmouse libahci virtio_blk failover
CPU: 5 PID: 1929279 Comm: curl Not tainted 5.15.0-67-generic #74-Ubuntu
Hardware name: OpenStack Foundation OpenStack Nova, BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
RIP: 0010:netdev_pick_tx+0xf1/0x320
Code: 00 00 8d 48 ff 0f b7 c1 66 39 ca 0f 86 e9 01 00 00 45 0f b7 ff 41 39 c7 0f 87 5b 01 00 00 44 29 f8 41 39 c7 0f 87 4f 01 00 00 <eb> f2 0f 1f 44 00 00 49 8b 94 24 28 04 00 00 48 85 d2 0f 84 53 01
RSP: 0018:ffffb78b40298820 EFLAGS: 00000246
RAX: 0000000000000000 RBX: ffff9c8773adc2e0 RCX: 000000000000083f
RDX: 0000000000000000 RSI: ffff9c8773adc2e0 RDI: ffff9c870a25e000
RBP: ffffb78b40298858 R08: 0000000000000001 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: ffff9c870a25e000
R13: ffff9c870a25e000 R14: ffff9c87fe043480 R15: 0000000000000000
FS: 00007f7b80008f00(0000) GS:ffff9c8e5f740000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f7b80f6a0b0 CR3: 0000000329d66000 CR4: 0000000000350ee0
Call Trace:
<IRQ>
netdev_core_pick_tx+0xa4/0xb0
__dev_queue_xmit+0xf8/0x510
? __bpf_prog_exit+0x1e/0x30
dev_queue_xmit+0x10/0x20
ovs_vport_send+0xad/0x170 [openvswitch]
do_output+0x59/0x180 [openvswitch]
do_execute_actions+0xa80/0xaa0 [openvswitch]
? kfree+0x1/0x250
? kfree+0x1/0x250
? kprobe_perf_func+0x4f/0x2b0
? flow_lookup.constprop.0+0x5c/0x110 [openvswitch]
ovs_execute_actions+0x4c/0x120 [openvswitch]
ovs_dp_process_packet+0xa1/0x200 [openvswitch]
? ovs_ct_update_key.isra.0+0xa8/0x120 [openvswitch]
? ovs_ct_fill_key+0x1d/0x30 [openvswitch]
? ovs_flow_key_extract+0x2db/0x350 [openvswitch]
ovs_vport_receive+0x77/0xd0 [openvswitch]
? __htab_map_lookup_elem+0x4e/0x60
? bpf_prog_680e8aff8547aec1_kfree+0x3b/0x714
? trace_call_bpf+0xc8/0x150
? kfree+0x1/0x250
? kfree+0x1/0x250
? kprobe_perf_func+0x4f/0x2b0
? kprobe_perf_func+0x4f/0x2b0
? __mod_memcg_lruvec_state+0x63/0xe0
netdev_port_receive+0xc4/0x180 [openvswitch]
? netdev_port_receive+0x180/0x180 [openvswitch]
netdev_frame_hook+0x1f/0x40 [openvswitch]
__netif_receive_skb_core.constprop.0+0x23d/0xf00
__netif_receive_skb_one_core+0x3f/0xa0
__netif_receive_skb+0x15/0x60
process_backlog+0x9e/0x170
__napi_poll+0x33/0x180
net_rx_action+0x126/0x280
? ttwu_do_activate+0x72/0xf0
__do_softirq+0xd9/0x2e7
? rcu_report_exp_cpu_mult+0x1b0/0x1b0
do_softirq+0x7d/0xb0
</IRQ>
<TASK>
__local_bh_enable_ip+0x54/0x60
ip_finish_output2+0x191/0x460
__ip_finish_output+0xb7/0x180
ip_finish_output+0x2e/0xc0
ip_output+0x78/0x100
? __ip_finish_output+0x180/0x180
ip_local_out+0x5e/0x70
__ip_queue_xmit+0x184/0x440
? tcp_syn_options+0x1f9/0x300
ip_queue_xmit+0x15/0x20
__tcp_transmit_skb+0x910/0x9c0
? __mod_memcg_state+0x44/0xa0
tcp_connect+0x437/0x4e0
? ktime_get_with_offset+0x60/0xf0
tcp_v4_connect+0x436/0x530
__inet_stream_connect+0xd4/0x3a0
? kprobe_perf_func+0x4f/0x2b0
? aa_sk_perm+0x43/0x1c0
inet_stream_connect+0x3b/0x60
__sys_connect_file+0x63/0x70
__sys_connect+0xa6/0xd0
? setfl+0x108/0x170
? do_fcntl+0xe8/0x5a0
__x64_sys_connect+0x18/0x20
do_syscall_64+0x5c/0xc0
? __x64_sys_fcntl+0xa9/0xd0
? exit_to_user_mode_prepare+0x37/0xb0
? syscall_exit_to_user_mode+0x27/0x50
? do_syscall_64+0x69/0xc0
? __sys_setsockopt+0xea/0x1e0
? exit_to_user_mode_prepare+0x37/0xb0
? syscall_exit_to_user_mode+0x27/0x50
? __x64_sys_setsockopt+0x1f/0x30
? do_syscall_64+0x69/0xc0
? irqentry_exit+0x1d/0x30
? exc_page_fault+0x89/0x170
entry_SYSCALL_64_after_hwframe+0x61/0xcb
RIP: 0033:0x7f7b8101c6a7
Code: 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2a 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 18 89 54 24 0c 48 89 34 24 89
RSP: 002b:00007ffffd6b2198 EFLAGS: 00000246 ORIG_RAX: 000000000000002a
RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7b8101c6a7
RDX: 0000000000000010 RSI: 00007ffffd6b2360 RDI: 0000000000000005
RBP: 0000561f1370d560 R08: 00002795ad21d1ac R09: 0030312e302e302e
R10: 00007ffffd73f080 R11: 0000000000000246 R12: 0000561f1370c410
R13: 0000000000000000 R14: 0000000000000005 R15: 0000000000000000
</TASK>
Fixes: 7f8a436eaa2c ("openvswitch: Add conntrack action")
Co-developed-by: Luca Czesla <luca.czesla@mail.schwarz>
Signed-off-by: Luca Czesla <luca.czesla@mail.schwarz>
Signed-off-by: Felix Huettner <felix.huettner@mail.schwarz>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/ZC0pBXBAgh7c76CA@kernel-bug-kernel-bug
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-04-05 07:53:41 +00:00
|
|
|
if (likely(vport && netif_carrier_ok(vport->dev))) {
|
2015-08-26 18:31:48 +00:00
|
|
|
u16 mru = OVS_CB(skb)->mru;
|
2016-06-10 18:49:33 +00:00
|
|
|
u32 cutlen = OVS_CB(skb)->cutlen;
|
|
|
|
|
|
|
|
if (unlikely(cutlen > 0)) {
|
2016-11-10 15:28:19 +00:00
|
|
|
if (skb->len - cutlen > ovs_mac_header_len(key))
|
2016-06-10 18:49:33 +00:00
|
|
|
pskb_trim(skb, skb->len - cutlen);
|
|
|
|
else
|
2016-11-10 15:28:19 +00:00
|
|
|
pskb_trim(skb, ovs_mac_header_len(key));
|
2016-06-10 18:49:33 +00:00
|
|
|
}
|
2015-08-26 18:31:48 +00:00
|
|
|
|
2016-11-10 15:28:17 +00:00
|
|
|
if (likely(!mru ||
|
|
|
|
(skb->len <= mru + vport->dev->hard_header_len))) {
|
2016-11-10 15:28:19 +00:00
|
|
|
ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
|
2015-08-26 18:31:48 +00:00
|
|
|
} else if (mru <= vport->dev->mtu) {
|
2015-09-15 01:10:28 +00:00
|
|
|
struct net *net = read_pnet(&dp->net);
|
2015-08-26 18:31:48 +00:00
|
|
|
|
2016-11-10 15:28:19 +00:00
|
|
|
ovs_fragment(net, vport, skb, mru, key);
|
2015-08-26 18:31:48 +00:00
|
|
|
} else {
|
2023-08-11 14:12:52 +00:00
|
|
|
kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
|
2015-08-26 18:31:48 +00:00
|
|
|
}
|
|
|
|
} else {
|
2023-08-11 14:12:52 +00:00
|
|
|
kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
|
2015-08-26 18:31:48 +00:00
|
|
|
}
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
|
2015-05-27 03:59:43 +00:00
|
|
|
struct sw_flow_key *key, const struct nlattr *attr,
|
2016-06-10 18:49:33 +00:00
|
|
|
const struct nlattr *actions, int actions_len,
|
|
|
|
uint32_t cutlen)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
struct dp_upcall_info upcall;
|
|
|
|
const struct nlattr *a;
|
|
|
|
int rem;
|
|
|
|
|
2015-05-27 03:59:43 +00:00
|
|
|
memset(&upcall, 0, sizeof(upcall));
|
2011-10-26 02:26:31 +00:00
|
|
|
upcall.cmd = OVS_PACKET_CMD_ACTION;
|
2015-08-26 18:31:48 +00:00
|
|
|
upcall.mru = OVS_CB(skb)->mru;
|
2011-10-26 02:26:31 +00:00
|
|
|
|
|
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
2020-09-01 12:26:12 +00:00
|
|
|
a = nla_next(a, &rem)) {
|
2011-10-26 02:26:31 +00:00
|
|
|
switch (nla_type(a)) {
|
|
|
|
case OVS_USERSPACE_ATTR_USERDATA:
|
|
|
|
upcall.userdata = a;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OVS_USERSPACE_ATTR_PID:
|
2021-07-23 14:24:13 +00:00
|
|
|
if (dp->user_features &
|
|
|
|
OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
|
openvswitch: Introduce per-cpu upcall dispatch
The Open vSwitch kernel module uses the upcall mechanism to send
packets from kernel space to user space when it misses in the kernel
space flow table. The upcall sends packets via a Netlink socket.
Currently, a Netlink socket is created for every vport. In this way,
there is a 1:1 mapping between a vport and a Netlink socket.
When a packet is received by a vport, if it needs to be sent to
user space, it is sent via the corresponding Netlink socket.
This mechanism, with various iterations of the corresponding user
space code, has seen some limitations and issues:
* On systems with a large number of vports, there is a correspondingly
large number of Netlink sockets which can limit scaling.
(https://bugzilla.redhat.com/show_bug.cgi?id=1526306)
* Packet reordering on upcalls.
(https://bugzilla.redhat.com/show_bug.cgi?id=1844576)
* A thundering herd issue.
(https://bugzilla.redhat.com/show_bug.cgi?id=1834444)
This patch introduces an alternative, feature-negotiated, upcall
mode using a per-cpu dispatch rather than a per-vport dispatch.
In this mode, the Netlink socket to be used for the upcall is
selected based on the CPU of the thread that is executing the upcall.
In this way, it resolves the issues above as:
a) The number of Netlink sockets scales with the number of CPUs
rather than the number of vports.
b) Ordering per-flow is maintained as packets are distributed to
CPUs based on mechanisms such as RSS and flows are distributed
to a single user space thread.
c) Packets from a flow can only wake up one user space thread.
The corresponding user space code can be found at:
https://mail.openvswitch.org/pipermail/ovs-dev/2021-July/385139.html
Bugzilla: https://bugzilla.redhat.com/1844576
Signed-off-by: Mark Gray <mark.d.gray@redhat.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-15 12:27:54 +00:00
|
|
|
upcall.portid =
|
2021-07-23 14:24:13 +00:00
|
|
|
ovs_dp_get_upcall_portid(dp,
|
|
|
|
smp_processor_id());
|
openvswitch: Introduce per-cpu upcall dispatch
The Open vSwitch kernel module uses the upcall mechanism to send
packets from kernel space to user space when it misses in the kernel
space flow table. The upcall sends packets via a Netlink socket.
Currently, a Netlink socket is created for every vport. In this way,
there is a 1:1 mapping between a vport and a Netlink socket.
When a packet is received by a vport, if it needs to be sent to
user space, it is sent via the corresponding Netlink socket.
This mechanism, with various iterations of the corresponding user
space code, has seen some limitations and issues:
* On systems with a large number of vports, there is a correspondingly
large number of Netlink sockets which can limit scaling.
(https://bugzilla.redhat.com/show_bug.cgi?id=1526306)
* Packet reordering on upcalls.
(https://bugzilla.redhat.com/show_bug.cgi?id=1844576)
* A thundering herd issue.
(https://bugzilla.redhat.com/show_bug.cgi?id=1834444)
This patch introduces an alternative, feature-negotiated, upcall
mode using a per-cpu dispatch rather than a per-vport dispatch.
In this mode, the Netlink socket to be used for the upcall is
selected based on the CPU of the thread that is executing the upcall.
In this way, it resolves the issues above as:
a) The number of Netlink sockets scales with the number of CPUs
rather than the number of vports.
b) Ordering per-flow is maintained as packets are distributed to
CPUs based on mechanisms such as RSS and flows are distributed
to a single user space thread.
c) Packets from a flow can only wake up one user space thread.
The corresponding user space code can be found at:
https://mail.openvswitch.org/pipermail/ovs-dev/2021-July/385139.html
Bugzilla: https://bugzilla.redhat.com/1844576
Signed-off-by: Mark Gray <mark.d.gray@redhat.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-15 12:27:54 +00:00
|
|
|
else
|
|
|
|
upcall.portid = nla_get_u32(a);
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
2014-11-06 14:51:24 +00:00
|
|
|
|
|
|
|
case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
|
|
|
|
/* Get out tunnel info. */
|
|
|
|
struct vport *vport;
|
|
|
|
|
|
|
|
vport = ovs_vport_rcu(dp, nla_get_u32(a));
|
|
|
|
if (vport) {
|
|
|
|
int err;
|
|
|
|
|
2015-10-23 01:17:16 +00:00
|
|
|
err = dev_fill_metadata_dst(vport->dev, skb);
|
|
|
|
if (!err)
|
|
|
|
upcall.egress_tun_info = skb_tunnel_info(skb);
|
2014-11-06 14:51:24 +00:00
|
|
|
}
|
2015-08-31 01:09:38 +00:00
|
|
|
|
2014-11-06 14:51:24 +00:00
|
|
|
break;
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
2014-11-06 14:51:24 +00:00
|
|
|
|
2015-05-27 03:59:43 +00:00
|
|
|
case OVS_USERSPACE_ATTR_ACTIONS: {
|
|
|
|
/* Include actions. */
|
|
|
|
upcall.actions = actions;
|
|
|
|
upcall.actions_len = actions_len;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-11-06 14:51:24 +00:00
|
|
|
} /* End of switch. */
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
2016-06-10 18:49:33 +00:00
|
|
|
return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *key,
|
2021-01-13 13:50:00 +00:00
|
|
|
const struct nlattr *attr)
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
{
|
2020-12-07 10:08:39 +00:00
|
|
|
/* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
|
|
|
|
struct nlattr *actions = nla_data(attr);
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
|
2020-12-07 10:08:39 +00:00
|
|
|
if (nla_len(actions))
|
|
|
|
return clone_execute(dp, skb, key, 0, nla_data(actions),
|
2021-01-13 13:50:00 +00:00
|
|
|
nla_len(actions), true, false);
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
|
2023-08-11 14:12:52 +00:00
|
|
|
ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
/* When 'last' is true, sample() should always consume the 'skb'.
|
|
|
|
* Otherwise, sample() should keep 'skb' intact regardless what
|
|
|
|
* actions are executed within sample().
|
|
|
|
*/
|
2011-10-26 02:26:31 +00:00
|
|
|
static int sample(struct datapath *dp, struct sk_buff *skb,
|
2015-05-27 03:59:43 +00:00
|
|
|
struct sw_flow_key *key, const struct nlattr *attr,
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
bool last)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
struct nlattr *actions;
|
|
|
|
struct nlattr *sample_arg;
|
|
|
|
int rem = nla_len(attr);
|
|
|
|
const struct sample_arg *arg;
|
2017-03-20 23:32:30 +00:00
|
|
|
bool clone_flow_key;
|
2011-10-26 02:26:31 +00:00
|
|
|
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
/* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
|
|
|
|
sample_arg = nla_data(attr);
|
|
|
|
arg = nla_data(sample_arg);
|
|
|
|
actions = nla_next(sample_arg, &rem);
|
2015-08-05 07:30:47 +00:00
|
|
|
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
if ((arg->probability != U32_MAX) &&
|
2022-10-05 15:43:22 +00:00
|
|
|
(!arg->probability || get_random_u32() > arg->probability)) {
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
if (last)
|
2023-08-11 14:12:48 +00:00
|
|
|
ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
return 0;
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
2017-03-20 23:32:30 +00:00
|
|
|
clone_flow_key = !arg->exec;
|
|
|
|
return clone_execute(dp, skb, key, 0, actions, rem, last,
|
|
|
|
clone_flow_key);
|
2014-09-16 02:37:25 +00:00
|
|
|
}
|
|
|
|
|
2018-07-02 15:18:03 +00:00
|
|
|
/* When 'last' is true, clone() should always consume the 'skb'.
|
|
|
|
* Otherwise, clone() should keep 'skb' intact regardless what
|
|
|
|
* actions are executed within clone().
|
|
|
|
*/
|
|
|
|
static int clone(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *key, const struct nlattr *attr,
|
|
|
|
bool last)
|
|
|
|
{
|
|
|
|
struct nlattr *actions;
|
|
|
|
struct nlattr *clone_arg;
|
|
|
|
int rem = nla_len(attr);
|
|
|
|
bool dont_clone_flow_key;
|
|
|
|
|
net: openvswitch: don't send internal clone attribute to the userspace.
'OVS_CLONE_ATTR_EXEC' is an internal attribute that is used for
performance optimization inside the kernel. It's added by the kernel
while parsing user-provided actions and should not be sent during the
flow dump as it's not part of the uAPI.
The issue doesn't cause any significant problems to the ovs-vswitchd
process, because reported actions are not really used in the
application lifecycle and only supposed to be shown to a human via
ovs-dpctl flow dump. However, the action list is still incorrect
and causes the following error if the user wants to look at the
datapath flows:
# ovs-dpctl add-dp system@ovs-system
# ovs-dpctl add-flow "<flow match>" "clone(ct(commit),0)"
# ovs-dpctl dump-flows
<flow match>, packets:0, bytes:0, used:never,
actions:clone(bad length 4, expected -1 for: action0(01 00 00 00),
ct(commit),0)
With the fix:
# ovs-dpctl dump-flows
<flow match>, packets:0, bytes:0, used:never,
actions:clone(ct(commit),0)
Additionally fixed an incorrect attribute name in the comment.
Fixes: b233504033db ("openvswitch: kernel datapath clone action")
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Aaron Conole <aconole@redhat.com>
Link: https://lore.kernel.org/r/20220404104150.2865736-1-i.maximets@ovn.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-04-04 10:41:50 +00:00
|
|
|
/* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
|
2018-07-02 15:18:03 +00:00
|
|
|
clone_arg = nla_data(attr);
|
|
|
|
dont_clone_flow_key = nla_get_u32(clone_arg);
|
|
|
|
actions = nla_next(clone_arg, &rem);
|
|
|
|
|
|
|
|
return clone_execute(dp, skb, key, 0, actions, rem, last,
|
|
|
|
!dont_clone_flow_key);
|
|
|
|
}
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
|
|
|
|
const struct nlattr *attr)
|
|
|
|
{
|
|
|
|
struct ovs_action_hash *hash_act = nla_data(attr);
|
|
|
|
u32 hash = 0;
|
|
|
|
|
net: openvswitch: add support for l4 symmetric hashing
Since its introduction, the ovs module execute_hash action allowed
hash algorithms other than the skb->l4_hash to be used. However,
additional hash algorithms were not implemented. This means flows
requiring different hash distributions weren't able to use the
kernel datapath.
Now, introduce support for symmetric hashing algorithm as an
alternative hash supported by the ovs module using the flow
dissector.
Output of flow using l4_sym hash:
recirc_id(0),in_port(3),eth(),eth_type(0x0800),
ipv4(dst=64.0.0.0/192.0.0.0,proto=6,frag=no), packets:30473425,
bytes:45902883702, used:0.000s, flags:SP.,
actions:hash(sym_l4(0)),recirc(0xd)
Some performance testing with no GRO/GSO, two veths, single flow:
hash(l4(0)): 4.35 GBits/s
hash(l4_sym(0)): 4.24 GBits/s
Signed-off-by: Aaron Conole <aconole@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-06-09 13:59:55 +00:00
|
|
|
if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
|
|
|
|
/* OVS_HASH_ALG_L4 hasing type. */
|
|
|
|
hash = skb_get_hash(skb);
|
|
|
|
} else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
|
|
|
|
/* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't
|
|
|
|
* extend past an encapsulated header.
|
|
|
|
*/
|
|
|
|
hash = __skb_get_hash_symmetric(skb);
|
|
|
|
}
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
hash = jhash_1word(hash, hash_act->hash_basis);
|
|
|
|
if (!hash)
|
|
|
|
hash = 0x1;
|
|
|
|
|
|
|
|
key->ovs_flow_hash = hash;
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
static int execute_set_action(struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *flow_key,
|
|
|
|
const struct nlattr *a)
|
|
|
|
{
|
|
|
|
/* Only tunnel set execution is supported without a mask. */
|
|
|
|
if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
|
2015-07-21 08:44:03 +00:00
|
|
|
struct ovs_tunnel_info *tun = nla_data(a);
|
|
|
|
|
|
|
|
skb_dst_drop(skb);
|
|
|
|
dst_hold((struct dst_entry *)tun->tun_dst);
|
|
|
|
skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
|
2015-02-05 21:40:49 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mask is at the midpoint of the data. */
|
|
|
|
#define get_mask(a, type) ((const type)nla_data(a) + 1)
|
|
|
|
|
|
|
|
static int execute_masked_set_action(struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *flow_key,
|
|
|
|
const struct nlattr *a)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
switch (nla_type(a)) {
|
2011-10-26 02:26:31 +00:00
|
|
|
case OVS_KEY_ATTR_PRIORITY:
|
2015-08-26 18:31:45 +00:00
|
|
|
OVS_SET_MASKED(skb->priority, nla_get_u32(a),
|
|
|
|
*get_mask(a, u32 *));
|
2015-02-05 21:40:49 +00:00
|
|
|
flow_key->phy.priority = skb->priority;
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
|
|
|
|
2012-11-26 19:24:11 +00:00
|
|
|
case OVS_KEY_ATTR_SKB_MARK:
|
2015-08-26 18:31:45 +00:00
|
|
|
OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
|
2015-02-05 21:40:49 +00:00
|
|
|
flow_key->phy.skb_mark = skb->mark;
|
2012-11-26 19:24:11 +00:00
|
|
|
break;
|
|
|
|
|
2014-10-03 22:35:31 +00:00
|
|
|
case OVS_KEY_ATTR_TUNNEL_INFO:
|
2015-02-05 21:40:49 +00:00
|
|
|
/* Masked data not supported for tunnel. */
|
|
|
|
err = -EINVAL;
|
2013-06-18 00:50:18 +00:00
|
|
|
break;
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
case OVS_KEY_ATTR_ETHERNET:
|
2015-02-05 21:40:49 +00:00
|
|
|
err = set_eth_addr(skb, flow_key, nla_data(a),
|
|
|
|
get_mask(a, struct ovs_key_ethernet *));
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
|
|
|
|
2017-11-07 13:07:02 +00:00
|
|
|
case OVS_KEY_ATTR_NSH:
|
|
|
|
err = set_nsh(skb, flow_key, a);
|
|
|
|
break;
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
case OVS_KEY_ATTR_IPV4:
|
2015-02-05 21:40:49 +00:00
|
|
|
err = set_ipv4(skb, flow_key, nla_data(a),
|
|
|
|
get_mask(a, struct ovs_key_ipv4 *));
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
|
|
|
|
2012-11-13 23:44:14 +00:00
|
|
|
case OVS_KEY_ATTR_IPV6:
|
2015-02-05 21:40:49 +00:00
|
|
|
err = set_ipv6(skb, flow_key, nla_data(a),
|
|
|
|
get_mask(a, struct ovs_key_ipv6 *));
|
2012-11-13 23:44:14 +00:00
|
|
|
break;
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
case OVS_KEY_ATTR_TCP:
|
2015-02-05 21:40:49 +00:00
|
|
|
err = set_tcp(skb, flow_key, nla_data(a),
|
|
|
|
get_mask(a, struct ovs_key_tcp *));
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_UDP:
|
2015-02-05 21:40:49 +00:00
|
|
|
err = set_udp(skb, flow_key, nla_data(a),
|
|
|
|
get_mask(a, struct ovs_key_udp *));
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
2013-08-22 19:30:48 +00:00
|
|
|
|
|
|
|
case OVS_KEY_ATTR_SCTP:
|
2015-02-05 21:40:49 +00:00
|
|
|
err = set_sctp(skb, flow_key, nla_data(a),
|
|
|
|
get_mask(a, struct ovs_key_sctp *));
|
2013-08-22 19:30:48 +00:00
|
|
|
break;
|
2014-10-06 12:05:13 +00:00
|
|
|
|
|
|
|
case OVS_KEY_ATTR_MPLS:
|
2015-02-05 21:40:49 +00:00
|
|
|
err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
|
|
|
|
__be32 *));
|
2014-10-06 12:05:13 +00:00
|
|
|
break;
|
2015-08-26 18:31:48 +00:00
|
|
|
|
|
|
|
case OVS_KEY_ATTR_CT_STATE:
|
|
|
|
case OVS_KEY_ATTR_CT_ZONE:
|
2015-08-26 18:31:49 +00:00
|
|
|
case OVS_KEY_ATTR_CT_MARK:
|
2015-10-01 22:00:37 +00:00
|
|
|
case OVS_KEY_ATTR_CT_LABELS:
|
openvswitch: Add original direction conntrack tuple to sw_flow_key.
Add the fields of the conntrack original direction 5-tuple to struct
sw_flow_key. The new fields are initially marked as non-existent, and
are populated whenever a conntrack action is executed and either finds
or generates a conntrack entry. This means that these fields exist
for all packets that were not rejected by conntrack as untrackable.
The original tuple fields in the sw_flow_key are filled from the
original direction tuple of the conntrack entry relating to the
current packet, or from the original direction tuple of the master
conntrack entry, if the current conntrack entry has a master.
Generally, expected connections of connections having an assigned
helper (e.g., FTP), have a master conntrack entry.
The main purpose of the new conntrack original tuple fields is to
allow matching on them for policy decision purposes, with the premise
that the admissibility of tracked connections reply packets (as well
as original direction packets), and both direction packets of any
related connections may be based on ACL rules applying to the master
connection's original direction 5-tuple. This also makes it easier to
make policy decisions when the actual packet headers might have been
transformed by NAT, as the original direction 5-tuple represents the
packet headers before any such transformation.
When using the original direction 5-tuple the admissibility of return
and/or related packets need not be based on the mere existence of a
conntrack entry, allowing separation of admission policy from the
established conntrack state. While existence of a conntrack entry is
required for admission of the return or related packets, policy
changes can render connections that were initially admitted to be
rejected or dropped afterwards. If the admission of the return and
related packets was based on mere conntrack state (e.g., connection
being in an established state), a policy change that would make the
connection rejected or dropped would need to find and delete all
conntrack entries affected by such a change. When using the original
direction 5-tuple matching the affected conntrack entries can be
allowed to time out instead, as the established state of the
connection would not need to be the basis for packet admission any
more.
It should be noted that the directionality of related connections may
be the same or different than that of the master connection, and
neither the original direction 5-tuple nor the conntrack state bits
carry this information. If needed, the directionality of the master
connection can be stored in master's conntrack mark or labels, which
are automatically inherited by the expected related connections.
The fact that neither ARP nor ND packets are trackable by conntrack
allows mutual exclusion between ARP/ND and the new conntrack original
tuple fields. Hence, the IP addresses are overlaid in union with ARP
and ND fields. This allows the sw_flow_key to not grow much due to
this patch, but it also means that we must be careful to never use the
new key fields with ARP or ND packets. ARP is easy to distinguish and
keep mutually exclusive based on the ethernet type, but ND being an
ICMPv6 protocol requires a bit more attention.
Signed-off-by: Jarno Rajahalme <jarno@ovn.org>
Acked-by: Joe Stringer <joe@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-09 19:21:59 +00:00
|
|
|
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
|
|
|
|
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
|
2015-08-26 18:31:48 +00:00
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *key,
|
2017-03-20 23:32:30 +00:00
|
|
|
const struct nlattr *a, bool last)
|
2014-09-16 02:37:25 +00:00
|
|
|
{
|
2017-03-20 23:32:30 +00:00
|
|
|
u32 recirc_id;
|
2014-09-16 02:37:25 +00:00
|
|
|
|
2014-11-06 14:55:14 +00:00
|
|
|
if (!is_flow_key_valid(key)) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = ovs_flow_key_update(skb, key);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
BUG_ON(!is_flow_key_valid(key));
|
2014-09-16 02:37:25 +00:00
|
|
|
|
2017-03-20 23:32:30 +00:00
|
|
|
recirc_id = nla_get_u32(a);
|
|
|
|
return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
|
2014-09-16 02:37:25 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 00:43:46 +00:00
|
|
|
static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *key,
|
|
|
|
const struct nlattr *attr, bool last)
|
|
|
|
{
|
2020-06-23 16:33:15 +00:00
|
|
|
struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
|
2019-03-26 00:43:46 +00:00
|
|
|
const struct nlattr *actions, *cpl_arg;
|
2020-06-23 16:33:15 +00:00
|
|
|
int len, max_len, rem = nla_len(attr);
|
2019-03-26 00:43:46 +00:00
|
|
|
const struct check_pkt_len_arg *arg;
|
|
|
|
bool clone_flow_key;
|
|
|
|
|
|
|
|
/* The first netlink attribute in 'attr' is always
|
|
|
|
* 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
|
|
|
|
*/
|
|
|
|
cpl_arg = nla_data(attr);
|
|
|
|
arg = nla_data(cpl_arg);
|
|
|
|
|
2020-06-23 16:33:15 +00:00
|
|
|
len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
|
|
|
|
max_len = arg->pkt_len;
|
|
|
|
|
|
|
|
if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
|
|
|
|
len <= max_len) {
|
2019-03-26 00:43:46 +00:00
|
|
|
/* Second netlink attribute in 'attr' is always
|
|
|
|
* 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
|
|
|
|
*/
|
|
|
|
actions = nla_next(cpl_arg, &rem);
|
|
|
|
clone_flow_key = !arg->exec_for_lesser_equal;
|
|
|
|
} else {
|
|
|
|
/* Third netlink attribute in 'attr' is always
|
|
|
|
* 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
|
|
|
|
*/
|
|
|
|
actions = nla_next(cpl_arg, &rem);
|
|
|
|
actions = nla_next(actions, &rem);
|
|
|
|
clone_flow_key = !arg->exec_for_greater;
|
|
|
|
}
|
|
|
|
|
|
|
|
return clone_execute(dp, skb, key, 0, nla_data(actions),
|
|
|
|
nla_len(actions), last, clone_flow_key);
|
|
|
|
}
|
|
|
|
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (skb->protocol == htons(ETH_P_IPV6)) {
|
|
|
|
struct ipv6hdr *nh;
|
|
|
|
|
|
|
|
err = skb_ensure_writable(skb, skb_network_offset(skb) +
|
|
|
|
sizeof(*nh));
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
nh = ipv6_hdr(skb);
|
|
|
|
|
|
|
|
if (nh->hop_limit <= 1)
|
|
|
|
return -EHOSTUNREACH;
|
|
|
|
|
|
|
|
key->ip.ttl = --nh->hop_limit;
|
2020-12-07 10:08:39 +00:00
|
|
|
} else if (skb->protocol == htons(ETH_P_IP)) {
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
struct iphdr *nh;
|
|
|
|
u8 old_ttl;
|
|
|
|
|
|
|
|
err = skb_ensure_writable(skb, skb_network_offset(skb) +
|
|
|
|
sizeof(*nh));
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
nh = ip_hdr(skb);
|
|
|
|
if (nh->ttl <= 1)
|
|
|
|
return -EHOSTUNREACH;
|
|
|
|
|
|
|
|
old_ttl = nh->ttl--;
|
|
|
|
csum_replace2(&nh->check, htons(old_ttl << 8),
|
|
|
|
htons(nh->ttl << 8));
|
|
|
|
key->ip.ttl = nh->ttl;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
/* Execute a list of actions against 'skb'. */
|
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
2014-09-16 02:15:28 +00:00
|
|
|
struct sw_flow_key *key,
|
2014-07-21 22:12:34 +00:00
|
|
|
const struct nlattr *attr, int len)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
|
|
|
const struct nlattr *a;
|
|
|
|
int rem;
|
|
|
|
|
|
|
|
for (a = attr, rem = len; rem > 0;
|
|
|
|
a = nla_next(a, &rem)) {
|
|
|
|
int err = 0;
|
|
|
|
|
2021-06-22 14:02:33 +00:00
|
|
|
if (trace_ovs_do_execute_action_enabled())
|
|
|
|
trace_ovs_do_execute_action(dp, skb, key, a, rem);
|
|
|
|
|
2023-08-11 14:12:48 +00:00
|
|
|
/* Actions that rightfully have to consume the skb should do it
|
|
|
|
* and return directly.
|
|
|
|
*/
|
2017-01-27 21:45:28 +00:00
|
|
|
switch (nla_type(a)) {
|
|
|
|
case OVS_ACTION_ATTR_OUTPUT: {
|
|
|
|
int port = nla_get_u32(a);
|
|
|
|
struct sk_buff *clone;
|
|
|
|
|
|
|
|
/* Every output action needs a separate clone
|
|
|
|
* of 'skb', In case the output action is the
|
|
|
|
* last action, cloning can be avoided.
|
|
|
|
*/
|
|
|
|
if (nla_is_last(a, rem)) {
|
|
|
|
do_output(dp, skb, port, key);
|
|
|
|
/* 'skb' has been used for output.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
2014-09-08 07:35:02 +00:00
|
|
|
|
2017-01-27 21:45:28 +00:00
|
|
|
clone = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (clone)
|
|
|
|
do_output(dp, clone, port, key);
|
2016-06-10 18:49:33 +00:00
|
|
|
OVS_CB(skb)->cutlen = 0;
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
2017-01-27 21:45:28 +00:00
|
|
|
}
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2016-06-10 18:49:33 +00:00
|
|
|
case OVS_ACTION_ATTR_TRUNC: {
|
|
|
|
struct ovs_action_trunc *trunc = nla_data(a);
|
|
|
|
|
|
|
|
if (skb->len > trunc->max_len)
|
|
|
|
OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
case OVS_ACTION_ATTR_USERSPACE:
|
2016-06-10 18:49:33 +00:00
|
|
|
output_userspace(dp, skb, key, a, attr,
|
|
|
|
len, OVS_CB(skb)->cutlen);
|
|
|
|
OVS_CB(skb)->cutlen = 0;
|
2023-08-11 14:12:48 +00:00
|
|
|
if (nla_is_last(a, rem)) {
|
|
|
|
consume_skb(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
case OVS_ACTION_ATTR_HASH:
|
|
|
|
execute_hash(skb, key, a);
|
|
|
|
break;
|
|
|
|
|
2019-12-21 03:20:46 +00:00
|
|
|
case OVS_ACTION_ATTR_PUSH_MPLS: {
|
|
|
|
struct ovs_action_push_mpls *mpls = nla_data(a);
|
|
|
|
|
|
|
|
err = push_mpls(skb, key, mpls->mpls_lse,
|
|
|
|
mpls->mpls_ethertype, skb->mac_len);
|
2014-10-06 12:05:13 +00:00
|
|
|
break;
|
2019-12-21 03:20:46 +00:00
|
|
|
}
|
|
|
|
case OVS_ACTION_ATTR_ADD_MPLS: {
|
|
|
|
struct ovs_action_add_mpls *mpls = nla_data(a);
|
|
|
|
__u16 mac_len = 0;
|
|
|
|
|
|
|
|
if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
|
|
|
|
mac_len = skb->mac_len;
|
2014-10-06 12:05:13 +00:00
|
|
|
|
2019-12-21 03:20:46 +00:00
|
|
|
err = push_mpls(skb, key, mpls->mpls_lse,
|
|
|
|
mpls->mpls_ethertype, mac_len);
|
|
|
|
break;
|
|
|
|
}
|
2014-10-06 12:05:13 +00:00
|
|
|
case OVS_ACTION_ATTR_POP_MPLS:
|
2014-11-06 14:55:14 +00:00
|
|
|
err = pop_mpls(skb, key, nla_get_be16(a));
|
2014-10-06 12:05:13 +00:00
|
|
|
break;
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
case OVS_ACTION_ATTR_PUSH_VLAN:
|
2014-11-06 14:55:14 +00:00
|
|
|
err = push_vlan(skb, key, nla_data(a));
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_POP_VLAN:
|
2014-11-06 14:55:14 +00:00
|
|
|
err = pop_vlan(skb, key);
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
|
|
|
|
2017-03-20 23:32:30 +00:00
|
|
|
case OVS_ACTION_ATTR_RECIRC: {
|
|
|
|
bool last = nla_is_last(a, rem);
|
|
|
|
|
|
|
|
err = execute_recirc(dp, skb, key, a, last);
|
|
|
|
if (last) {
|
2014-09-16 02:37:25 +00:00
|
|
|
/* If this is the last action, the skb has
|
|
|
|
* been consumed or freed.
|
|
|
|
* Return immediately.
|
|
|
|
*/
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
break;
|
2017-03-20 23:32:30 +00:00
|
|
|
}
|
2014-09-16 02:37:25 +00:00
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
case OVS_ACTION_ATTR_SET:
|
2014-11-06 14:55:14 +00:00
|
|
|
err = execute_set_action(skb, key, nla_data(a));
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
|
|
|
|
2015-02-05 21:40:49 +00:00
|
|
|
case OVS_ACTION_ATTR_SET_MASKED:
|
|
|
|
case OVS_ACTION_ATTR_SET_TO_MASKED:
|
|
|
|
err = execute_masked_set_action(skb, key, nla_data(a));
|
|
|
|
break;
|
|
|
|
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
case OVS_ACTION_ATTR_SAMPLE: {
|
|
|
|
bool last = nla_is_last(a, rem);
|
|
|
|
|
|
|
|
err = sample(dp, skb, key, a, last);
|
|
|
|
if (last)
|
|
|
|
return err;
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
break;
|
openvswitch: Optimize sample action for the clone use cases
With the introduction of open flow 'clone' action, the OVS user space
can now translate the 'clone' action into kernel datapath 'sample'
action, with 100% probability, to ensure that the clone semantics,
which is that the packet seen by the clone action is the same as the
packet seen by the action after clone, is faithfully carried out
in the datapath.
While the sample action in the datpath has the matching semantics,
its implementation is only optimized for its original use.
Specifically, there are two limitation: First, there is a 3 level of
nesting restriction, enforced at the flow downloading time. This
limit turns out to be too restrictive for the 'clone' use case.
Second, the implementation avoid recursive call only if the sample
action list has a single userspace action.
The main optimization implemented in this series removes the static
nesting limit check, instead, implement the run time recursion limit
check, and recursion avoidance similar to that of the 'recirc' action.
This optimization solve both #1 and #2 issues above.
One related optimization attempts to avoid copying flow key as
long as the actions enclosed does not change the flow key. The
detection is performed only once at the flow downloading time.
Another related optimization is to rewrite the action list
at flow downloading time in order to save the fast path from parsing
the sample action list in its original form repeatedly.
Signed-off-by: Andy Zhou <azhou@ovn.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-20 23:32:29 +00:00
|
|
|
}
|
2015-08-26 18:31:48 +00:00
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_CT:
|
2015-10-06 17:59:58 +00:00
|
|
|
if (!is_flow_key_valid(key)) {
|
|
|
|
err = ovs_flow_key_update(skb, key);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-08-26 18:31:48 +00:00
|
|
|
err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
|
|
|
|
nla_data(a));
|
|
|
|
|
|
|
|
/* Hide stolen IP fragments from user space. */
|
2015-10-26 03:21:48 +00:00
|
|
|
if (err)
|
|
|
|
return err == -EINPROGRESS ? 0 : err;
|
2015-08-26 18:31:48 +00:00
|
|
|
break;
|
2016-11-10 15:28:23 +00:00
|
|
|
|
2017-10-10 20:54:44 +00:00
|
|
|
case OVS_ACTION_ATTR_CT_CLEAR:
|
|
|
|
err = ovs_ct_clear(skb, key);
|
|
|
|
break;
|
|
|
|
|
2016-11-10 15:28:23 +00:00
|
|
|
case OVS_ACTION_ATTR_PUSH_ETH:
|
|
|
|
err = push_eth(skb, key, nla_data(a));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_POP_ETH:
|
|
|
|
err = pop_eth(skb, key);
|
|
|
|
break;
|
2017-11-07 13:07:02 +00:00
|
|
|
|
2023-09-21 19:42:35 +00:00
|
|
|
case OVS_ACTION_ATTR_PUSH_NSH:
|
|
|
|
err = push_nsh(skb, key, nla_data(a));
|
2017-11-07 13:07:02 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_POP_NSH:
|
|
|
|
err = pop_nsh(skb, key);
|
|
|
|
break;
|
2017-11-10 20:09:43 +00:00
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_METER:
|
|
|
|
if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
|
2023-08-11 14:12:51 +00:00
|
|
|
ovs_kfree_skb_reason(skb, OVS_DROP_METER);
|
2017-11-10 20:09:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2018-07-02 15:18:03 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_CLONE: {
|
|
|
|
bool last = nla_is_last(a, rem);
|
|
|
|
|
|
|
|
err = clone(dp, skb, key, a, last);
|
|
|
|
if (last)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2019-03-26 00:43:46 +00:00
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
|
|
|
|
bool last = nla_is_last(a, rem);
|
|
|
|
|
|
|
|
err = execute_check_pkt_len(dp, skb, key, a, last);
|
|
|
|
if (last)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_DEC_TTL:
|
|
|
|
err = execute_dec_ttl(skb, key);
|
2021-01-13 13:50:00 +00:00
|
|
|
if (err == -EHOSTUNREACH)
|
|
|
|
return dec_ttl_exception_handler(dp, skb,
|
|
|
|
key, a);
|
openvswitch: add TTL decrement action
New action to decrement TTL instead of setting it to a fixed value.
This action will decrement the TTL and, in case of expired TTL, drop it
or execute an action passed via a nested attribute.
The default TTL expired action is to drop the packet.
Supports both IPv4 and IPv6 via the ttl and hop_limit fields, respectively.
Tested with a corresponding change in the userspace:
# ovs-dpctl dump-flows
in_port(2),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},1
in_port(1),eth(),eth_type(0x0800), packets:0, bytes:0, used:never, actions:dec_ttl{ttl<=1 action:(drop)},2
in_port(1),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:2
in_port(2),eth(),eth_type(0x0806), packets:0, bytes:0, used:never, actions:1
# ping -c1 192.168.0.2 -t 42
IP (tos 0x0, ttl 41, id 61647, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 386, seq 1, length 64
# ping -c1 192.168.0.2 -t 120
IP (tos 0x0, ttl 119, id 62070, offset 0, flags [DF], proto ICMP (1), length 84)
192.168.0.1 > 192.168.0.2: ICMP echo request, id 388, seq 1, length 64
# ping -c1 192.168.0.2 -t 1
#
Co-developed-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Bindiya Kurle <bindiyakurle@gmail.com>
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-15 13:20:56 +00:00
|
|
|
break;
|
2023-08-11 14:12:50 +00:00
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_DROP: {
|
|
|
|
enum ovs_drop_reason reason = nla_get_u32(a)
|
|
|
|
? OVS_DROP_EXPLICIT_WITH_ERROR
|
|
|
|
: OVS_DROP_EXPLICIT;
|
|
|
|
|
|
|
|
ovs_kfree_skb_reason(skb, reason);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(err)) {
|
2023-08-11 14:12:49 +00:00
|
|
|
ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
|
2011-10-26 02:26:31 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-11 14:12:48 +00:00
|
|
|
ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
|
2011-10-26 02:26:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-20 23:32:30 +00:00
|
|
|
/* Execute the actions on the clone of the packet. The effect of the
|
|
|
|
* execution does not affect the original 'skb' nor the original 'key'.
|
|
|
|
*
|
|
|
|
* The execution may be deferred in case the actions can not be executed
|
|
|
|
* immediately.
|
|
|
|
*/
|
|
|
|
static int clone_execute(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
struct sw_flow_key *key, u32 recirc_id,
|
|
|
|
const struct nlattr *actions, int len,
|
|
|
|
bool last, bool clone_flow_key)
|
|
|
|
{
|
|
|
|
struct deferred_action *da;
|
|
|
|
struct sw_flow_key *clone;
|
|
|
|
|
|
|
|
skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!skb) {
|
|
|
|
/* Out of memory, skip this action.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* When clone_flow_key is false, the 'key' will not be change
|
|
|
|
* by the actions, then the 'key' can be used directly.
|
|
|
|
* Otherwise, try to clone key from the next recursion level of
|
|
|
|
* 'flow_keys'. If clone is successful, execute the actions
|
|
|
|
* without deferring.
|
|
|
|
*/
|
|
|
|
clone = clone_flow_key ? clone_key(key) : key;
|
|
|
|
if (clone) {
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (actions) { /* Sample action */
|
|
|
|
if (clone_flow_key)
|
|
|
|
__this_cpu_inc(exec_actions_level);
|
|
|
|
|
|
|
|
err = do_execute_actions(dp, skb, clone,
|
|
|
|
actions, len);
|
|
|
|
|
|
|
|
if (clone_flow_key)
|
|
|
|
__this_cpu_dec(exec_actions_level);
|
|
|
|
} else { /* Recirc action */
|
|
|
|
clone->recirc_id = recirc_id;
|
|
|
|
ovs_dp_process_packet(skb, clone);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Out of 'flow_keys' space. Defer actions */
|
|
|
|
da = add_deferred_actions(skb, key, actions, len);
|
|
|
|
if (da) {
|
|
|
|
if (!actions) { /* Recirc action */
|
|
|
|
key = &da->pkt_key;
|
|
|
|
key->recirc_id = recirc_id;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Out of per CPU action FIFO space. Drop the 'skb' and
|
|
|
|
* log an error.
|
|
|
|
*/
|
2023-08-11 14:12:52 +00:00
|
|
|
ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
|
2017-03-20 23:32:30 +00:00
|
|
|
|
|
|
|
if (net_ratelimit()) {
|
|
|
|
if (actions) { /* Sample action */
|
|
|
|
pr_warn("%s: deferred action limit reached, drop sample action\n",
|
|
|
|
ovs_dp_name(dp));
|
|
|
|
} else { /* Recirc action */
|
2022-03-30 19:42:45 +00:00
|
|
|
pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
|
|
|
|
ovs_dp_name(dp), recirc_id);
|
2017-03-20 23:32:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
static void process_deferred_actions(struct datapath *dp)
|
|
|
|
{
|
|
|
|
struct action_fifo *fifo = this_cpu_ptr(action_fifos);
|
|
|
|
|
|
|
|
/* Do not touch the FIFO in case there is no deferred actions. */
|
|
|
|
if (action_fifo_is_empty(fifo))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Finishing executing all deferred actions. */
|
|
|
|
do {
|
|
|
|
struct deferred_action *da = action_fifo_get(fifo);
|
|
|
|
struct sk_buff *skb = da->skb;
|
|
|
|
struct sw_flow_key *key = &da->pkt_key;
|
|
|
|
const struct nlattr *actions = da->actions;
|
2017-03-20 23:32:27 +00:00
|
|
|
int actions_len = da->actions_len;
|
2014-09-16 02:37:25 +00:00
|
|
|
|
|
|
|
if (actions)
|
2017-03-20 23:32:27 +00:00
|
|
|
do_execute_actions(dp, skb, key, actions, actions_len);
|
2014-09-16 02:37:25 +00:00
|
|
|
else
|
|
|
|
ovs_dp_process_packet(skb, key);
|
|
|
|
} while (!action_fifo_is_empty(fifo));
|
|
|
|
|
|
|
|
/* Reset FIFO for the next packet. */
|
|
|
|
action_fifo_init(fifo);
|
|
|
|
}
|
|
|
|
|
2011-10-26 02:26:31 +00:00
|
|
|
/* Execute a list of actions against 'skb'. */
|
2014-09-16 02:15:28 +00:00
|
|
|
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
2014-11-06 14:58:52 +00:00
|
|
|
const struct sw_flow_actions *acts,
|
|
|
|
struct sw_flow_key *key)
|
2011-10-26 02:26:31 +00:00
|
|
|
{
|
2016-01-18 17:03:48 +00:00
|
|
|
int err, level;
|
|
|
|
|
|
|
|
level = __this_cpu_inc_return(exec_actions_level);
|
2016-09-13 14:08:54 +00:00
|
|
|
if (unlikely(level > OVS_RECURSION_LIMIT)) {
|
2016-01-18 17:03:48 +00:00
|
|
|
net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
|
|
|
|
ovs_dp_name(dp));
|
2023-08-11 14:12:52 +00:00
|
|
|
ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
|
2016-01-18 17:03:48 +00:00
|
|
|
err = -ENETDOWN;
|
|
|
|
goto out;
|
|
|
|
}
|
2014-09-16 02:37:25 +00:00
|
|
|
|
openvswitch: fix skb_panic due to the incorrect actions attrlen
For sw_flow_actions, the actions_len only represents the kernel part's
size, and when we dump the actions to the userspace, we will do the
convertions, so it's true size may become bigger than the actions_len.
But unfortunately, for OVS_PACKET_ATTR_ACTIONS, we use the actions_len
to alloc the skbuff, so the user_skb's size may become insufficient and
oops will happen like this:
skbuff: skb_over_panic: text:ffffffff8148fabf len:1749 put:157 head:
ffff881300f39000 data:ffff881300f39000 tail:0x6d5 end:0x6c0 dev:<NULL>
------------[ cut here ]------------
kernel BUG at net/core/skbuff.c:129!
[...]
Call Trace:
<IRQ>
[<ffffffff8148be82>] skb_put+0x43/0x44
[<ffffffff8148fabf>] skb_zerocopy+0x6c/0x1f4
[<ffffffffa0290d36>] queue_userspace_packet+0x3a3/0x448 [openvswitch]
[<ffffffffa0292023>] ovs_dp_upcall+0x30/0x5c [openvswitch]
[<ffffffffa028d435>] output_userspace+0x132/0x158 [openvswitch]
[<ffffffffa01e6890>] ? ip6_rcv_finish+0x74/0x77 [ipv6]
[<ffffffffa028e277>] do_execute_actions+0xcc1/0xdc8 [openvswitch]
[<ffffffffa028e3f2>] ovs_execute_actions+0x74/0x106 [openvswitch]
[<ffffffffa0292130>] ovs_dp_process_packet+0xe1/0xfd [openvswitch]
[<ffffffffa0292b77>] ? key_extract+0x63c/0x8d5 [openvswitch]
[<ffffffffa029848b>] ovs_vport_receive+0xa1/0xc3 [openvswitch]
[...]
Also we can find that the actions_len is much little than the orig_len:
crash> struct sw_flow_actions 0xffff8812f539d000
struct sw_flow_actions {
rcu = {
next = 0xffff8812f5398800,
func = 0xffffe3b00035db32
},
orig_len = 1384,
actions_len = 592,
actions = 0xffff8812f539d01c
}
So as a quick fix, use the orig_len instead of the actions_len to alloc
the user_skb.
Last, this oops happened on our system running a relative old kernel, but
the same risk still exists on the mainline, since we use the wrong
actions_len from the beginning.
Fixes: ccea74457bbd ("openvswitch: include datapath actions with sampled-packet upcall to userspace")
Cc: Neil McKee <neil.mckee@inmon.com>
Signed-off-by: Liping Zhang <zlpnobody@gmail.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-16 05:30:07 +00:00
|
|
|
OVS_CB(skb)->acts_origlen = acts->orig_len;
|
2014-09-16 02:37:25 +00:00
|
|
|
err = do_execute_actions(dp, skb, key,
|
|
|
|
acts->actions, acts->actions_len);
|
|
|
|
|
2016-01-18 17:03:48 +00:00
|
|
|
if (level == 1)
|
2014-09-16 02:37:25 +00:00
|
|
|
process_deferred_actions(dp);
|
|
|
|
|
2016-01-18 17:03:48 +00:00
|
|
|
out:
|
|
|
|
__this_cpu_dec(exec_actions_level);
|
2014-09-16 02:37:25 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int action_fifos_init(void)
|
|
|
|
{
|
|
|
|
action_fifos = alloc_percpu(struct action_fifo);
|
|
|
|
if (!action_fifos)
|
|
|
|
return -ENOMEM;
|
2011-10-26 02:26:31 +00:00
|
|
|
|
2017-03-20 23:32:28 +00:00
|
|
|
flow_keys = alloc_percpu(struct action_flow_keys);
|
|
|
|
if (!flow_keys) {
|
2016-09-13 14:08:54 +00:00
|
|
|
free_percpu(action_fifos);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2014-09-16 02:37:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void action_fifos_exit(void)
|
|
|
|
{
|
|
|
|
free_percpu(action_fifos);
|
2017-03-20 23:32:28 +00:00
|
|
|
free_percpu(flow_keys);
|
2011-10-26 02:26:31 +00:00
|
|
|
}
|