2019-05-27 06:55:01 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
|
|
|
* operating system. INET is implemented using the BSD Socket
|
|
|
|
* interface as the means of communication with the user level.
|
|
|
|
*
|
|
|
|
* Definitions for the Forwarding Information Base.
|
|
|
|
*
|
|
|
|
* Authors: A.N.Kuznetsov, <kuznet@ms2.inr.ac.ru>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _NET_IP_FIB_H
|
|
|
|
#define _NET_IP_FIB_H
|
|
|
|
|
|
|
|
#include <net/flow.h>
|
|
|
|
#include <linux/seq_file.h>
|
2012-07-17 11:19:00 +00:00
|
|
|
#include <linux/rcupdate.h>
|
2017-08-03 11:28:11 +00:00
|
|
|
#include <net/fib_notifier.h>
|
2006-08-04 10:39:22 +00:00
|
|
|
#include <net/fib_rules.h>
|
2012-06-11 07:01:52 +00:00
|
|
|
#include <net/inetpeer.h>
|
2012-07-31 05:45:30 +00:00
|
|
|
#include <linux/percpu.h>
|
2016-09-26 10:52:29 +00:00
|
|
|
#include <linux/notifier.h>
|
2017-07-04 06:35:02 +00:00
|
|
|
#include <linux/refcount.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-08-18 01:14:52 +00:00
|
|
|
struct fib_config {
|
|
|
|
u8 fc_dst_len;
|
|
|
|
u8 fc_tos;
|
|
|
|
u8 fc_protocol;
|
|
|
|
u8 fc_scope;
|
|
|
|
u8 fc_type;
|
2019-04-05 23:30:28 +00:00
|
|
|
u8 fc_gw_family;
|
|
|
|
/* 2 bytes unused */
|
2006-08-18 01:14:52 +00:00
|
|
|
u32 fc_table;
|
2006-09-27 05:15:46 +00:00
|
|
|
__be32 fc_dst;
|
2019-04-05 23:30:30 +00:00
|
|
|
union {
|
|
|
|
__be32 fc_gw4;
|
|
|
|
struct in6_addr fc_gw6;
|
|
|
|
};
|
2006-08-18 01:14:52 +00:00
|
|
|
int fc_oif;
|
|
|
|
u32 fc_flags;
|
|
|
|
u32 fc_priority;
|
2006-09-27 05:15:46 +00:00
|
|
|
__be32 fc_prefsrc;
|
2019-06-08 21:53:32 +00:00
|
|
|
u32 fc_nh_id;
|
2006-08-18 01:14:52 +00:00
|
|
|
struct nlattr *fc_mx;
|
|
|
|
struct rtnexthop *fc_mp;
|
|
|
|
int fc_mx_len;
|
|
|
|
int fc_mp_len;
|
|
|
|
u32 fc_flow;
|
|
|
|
u32 fc_nlflags;
|
|
|
|
struct nl_info fc_nlinfo;
|
2015-07-21 08:43:47 +00:00
|
|
|
struct nlattr *fc_encap;
|
|
|
|
u16 fc_encap_type;
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
struct fib_info;
|
2012-07-17 19:20:47 +00:00
|
|
|
struct rtable;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-07-17 11:19:00 +00:00
|
|
|
struct fib_nh_exception {
|
|
|
|
struct fib_nh_exception __rcu *fnhe_next;
|
2013-05-27 20:46:33 +00:00
|
|
|
int fnhe_genid;
|
2012-07-17 11:19:00 +00:00
|
|
|
__be32 fnhe_daddr;
|
|
|
|
u32 fnhe_pmtu;
|
2018-03-14 09:21:14 +00:00
|
|
|
bool fnhe_mtu_locked;
|
2012-07-18 10:15:35 +00:00
|
|
|
__be32 fnhe_gw;
|
2012-07-17 11:19:00 +00:00
|
|
|
unsigned long fnhe_expires;
|
2013-06-27 07:27:05 +00:00
|
|
|
struct rtable __rcu *fnhe_rth_input;
|
|
|
|
struct rtable __rcu *fnhe_rth_output;
|
2012-07-17 11:19:00 +00:00
|
|
|
unsigned long fnhe_stamp;
|
2016-02-18 13:21:19 +00:00
|
|
|
struct rcu_head rcu;
|
2012-07-17 11:19:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct fnhe_hash_bucket {
|
|
|
|
struct fib_nh_exception __rcu *chain;
|
|
|
|
};
|
|
|
|
|
2014-09-04 15:21:31 +00:00
|
|
|
#define FNHE_HASH_SHIFT 11
|
|
|
|
#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT)
|
2012-07-17 11:19:00 +00:00
|
|
|
#define FNHE_RECLAIM_DEPTH 5
|
|
|
|
|
2019-03-28 03:53:57 +00:00
|
|
|
struct fib_nh_common {
|
|
|
|
struct net_device *nhc_dev;
|
|
|
|
int nhc_oif;
|
|
|
|
unsigned char nhc_scope;
|
|
|
|
u8 nhc_family;
|
2019-04-05 23:30:26 +00:00
|
|
|
u8 nhc_gw_family;
|
2019-04-23 15:48:09 +00:00
|
|
|
unsigned char nhc_flags;
|
|
|
|
struct lwtunnel_state *nhc_lwtstate;
|
2019-04-05 23:30:26 +00:00
|
|
|
|
2019-03-28 03:53:57 +00:00
|
|
|
union {
|
|
|
|
__be32 ipv4;
|
|
|
|
struct in6_addr ipv6;
|
|
|
|
} nhc_gw;
|
|
|
|
|
|
|
|
int nhc_weight;
|
|
|
|
atomic_t nhc_upper_bound;
|
2019-04-30 14:45:48 +00:00
|
|
|
|
|
|
|
/* v4 specific, but allows fib6_nh with v4 routes */
|
|
|
|
struct rtable __rcu * __percpu *nhc_pcpu_rth_output;
|
|
|
|
struct rtable __rcu *nhc_rth_input;
|
2019-04-30 14:45:50 +00:00
|
|
|
struct fnhe_hash_bucket __rcu *nhc_exceptions;
|
2019-03-28 03:53:57 +00:00
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct fib_nh {
|
2019-03-28 03:53:57 +00:00
|
|
|
struct fib_nh_common nh_common;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct hlist_node nh_hash;
|
|
|
|
struct fib_info *nh_parent;
|
2011-01-14 12:36:42 +00:00
|
|
|
#ifdef CONFIG_IP_ROUTE_CLASSID
|
2005-04-16 22:20:36 +00:00
|
|
|
__u32 nh_tclassid;
|
|
|
|
#endif
|
2011-03-08 04:54:48 +00:00
|
|
|
__be32 nh_saddr;
|
2011-03-25 00:42:21 +00:00
|
|
|
int nh_saddr_genid;
|
2019-03-28 03:53:57 +00:00
|
|
|
#define fib_nh_family nh_common.nhc_family
|
|
|
|
#define fib_nh_dev nh_common.nhc_dev
|
|
|
|
#define fib_nh_oif nh_common.nhc_oif
|
|
|
|
#define fib_nh_flags nh_common.nhc_flags
|
|
|
|
#define fib_nh_lws nh_common.nhc_lwtstate
|
|
|
|
#define fib_nh_scope nh_common.nhc_scope
|
2019-04-05 23:30:26 +00:00
|
|
|
#define fib_nh_gw_family nh_common.nhc_gw_family
|
2019-03-28 03:53:57 +00:00
|
|
|
#define fib_nh_gw4 nh_common.nhc_gw.ipv4
|
|
|
|
#define fib_nh_gw6 nh_common.nhc_gw.ipv6
|
|
|
|
#define fib_nh_weight nh_common.nhc_weight
|
|
|
|
#define fib_nh_upper_bound nh_common.nhc_upper_bound
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This structure contains data shared by many of routes.
|
|
|
|
*/
|
|
|
|
|
2019-06-04 03:19:51 +00:00
|
|
|
struct nexthop;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct fib_info {
|
|
|
|
struct hlist_node fib_hash;
|
|
|
|
struct hlist_node fib_lhash;
|
2019-06-04 03:19:51 +00:00
|
|
|
struct list_head nh_list;
|
2008-02-01 02:49:32 +00:00
|
|
|
struct net *fib_net;
|
2021-07-29 07:13:50 +00:00
|
|
|
refcount_t fib_treeref;
|
2017-07-04 06:35:02 +00:00
|
|
|
refcount_t fib_clntref;
|
2012-04-15 05:58:06 +00:00
|
|
|
unsigned int fib_flags;
|
2011-03-25 01:06:47 +00:00
|
|
|
unsigned char fib_dead;
|
|
|
|
unsigned char fib_protocol;
|
|
|
|
unsigned char fib_scope;
|
2012-10-04 01:25:26 +00:00
|
|
|
unsigned char fib_type;
|
2006-09-27 05:14:15 +00:00
|
|
|
__be32 fib_prefsrc;
|
2016-09-04 22:20:20 +00:00
|
|
|
u32 fib_tb_id;
|
2005-04-16 22:20:36 +00:00
|
|
|
u32 fib_priority;
|
2017-05-25 21:27:35 +00:00
|
|
|
struct dst_metrics *fib_metrics;
|
|
|
|
#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
|
|
|
|
#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
|
|
|
|
#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
|
|
|
|
#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
|
2005-04-16 22:20:36 +00:00
|
|
|
int fib_nhs;
|
2019-04-05 23:30:39 +00:00
|
|
|
bool fib_nh_is_v6;
|
2019-05-22 19:04:42 +00:00
|
|
|
bool nh_updated;
|
2019-06-04 03:19:51 +00:00
|
|
|
struct nexthop *nh;
|
2010-10-05 10:41:36 +00:00
|
|
|
struct rcu_head rcu;
|
2020-03-02 12:03:52 +00:00
|
|
|
struct fib_nh fib_nh[];
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
|
|
|
struct fib_rule;
|
|
|
|
#endif
|
|
|
|
|
2011-02-01 00:10:03 +00:00
|
|
|
struct fib_table;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct fib_result {
|
2019-04-02 21:11:55 +00:00
|
|
|
__be32 prefix;
|
|
|
|
unsigned char prefixlen;
|
|
|
|
unsigned char nh_sel;
|
|
|
|
unsigned char type;
|
|
|
|
unsigned char scope;
|
|
|
|
u32 tclassid;
|
|
|
|
struct fib_nh_common *nhc;
|
|
|
|
struct fib_info *fi;
|
|
|
|
struct fib_table *table;
|
|
|
|
struct hlist_head *fa_head;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2005-06-20 20:36:39 +00:00
|
|
|
struct fib_result_nl {
|
2006-09-27 05:19:36 +00:00
|
|
|
__be32 fl_addr; /* To be looked up*/
|
2006-11-09 23:21:41 +00:00
|
|
|
u32 fl_mark;
|
2005-06-20 20:36:39 +00:00
|
|
|
unsigned char fl_tos;
|
|
|
|
unsigned char fl_scope;
|
|
|
|
unsigned char tb_id_in;
|
|
|
|
|
|
|
|
unsigned char tb_id; /* Results */
|
|
|
|
unsigned char prefixlen;
|
|
|
|
unsigned char nh_sel;
|
|
|
|
unsigned char type;
|
|
|
|
unsigned char scope;
|
2018-02-27 23:48:21 +00:00
|
|
|
int err;
|
2005-06-20 20:36:39 +00:00
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-03-13 00:24:15 +00:00
|
|
|
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
2008-01-10 11:23:38 +00:00
|
|
|
#define FIB_TABLE_HASHSZ 256
|
2013-03-13 00:24:15 +00:00
|
|
|
#else
|
|
|
|
#define FIB_TABLE_HASHSZ 2
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-06-04 03:19:50 +00:00
|
|
|
__be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
|
|
|
|
unsigned char scope);
|
2019-04-02 21:11:55 +00:00
|
|
|
__be32 fib_result_prefsrc(struct net *net, struct fib_result *res);
|
2011-03-25 00:42:21 +00:00
|
|
|
|
2019-04-02 21:11:55 +00:00
|
|
|
#define FIB_RES_NHC(res) ((res).nhc)
|
|
|
|
#define FIB_RES_DEV(res) (FIB_RES_NHC(res)->nhc_dev)
|
|
|
|
#define FIB_RES_OIF(res) (FIB_RES_NHC(res)->nhc_oif)
|
2011-03-08 04:54:48 +00:00
|
|
|
|
2020-01-14 11:23:10 +00:00
|
|
|
struct fib_rt_info {
|
|
|
|
struct fib_info *fi;
|
|
|
|
u32 tb_id;
|
|
|
|
__be32 dst;
|
|
|
|
int dst_len;
|
|
|
|
u8 tos;
|
|
|
|
u8 type;
|
ipv4: Add "offload" and "trap" indications to routes
When performing L3 offload, routes and nexthops are usually programmed
into two different tables in the underlying device. Therefore, the fact
that a nexthop resides in hardware does not necessarily mean that all
the associated routes also reside in hardware and vice-versa.
While the kernel can signal to user space the presence of a nexthop in
hardware (via 'RTNH_F_OFFLOAD'), it does not have a corresponding flag
for routes. In addition, the fact that a route resides in hardware does
not necessarily mean that the traffic is offloaded. For example,
unreachable routes (i.e., 'RTN_UNREACHABLE') are programmed to trap
packets to the CPU so that the kernel will be able to generate the
appropriate ICMP error packet.
This patch adds an "offload" and "trap" indications to IPv4 routes, so
that users will have better visibility into the offload process.
'struct fib_alias' is extended with two new fields that indicate if the
route resides in hardware or not and if it is offloading traffic from
the kernel or trapping packets to it. Note that the new fields are added
in the 6 bytes hole and therefore the struct still fits in a single
cache line [1].
Capable drivers are expected to invoke fib_alias_hw_flags_set() with the
route's key in order to set the flags.
The indications are dumped to user space via a new flags (i.e.,
'RTM_F_OFFLOAD' and 'RTM_F_TRAP') in the 'rtm_flags' field in the
ancillary header.
v2:
* Make use of 'struct fib_rt_info' in fib_alias_hw_flags_set()
[1]
struct fib_alias {
struct hlist_node fa_list; /* 0 16 */
struct fib_info * fa_info; /* 16 8 */
u8 fa_tos; /* 24 1 */
u8 fa_type; /* 25 1 */
u8 fa_state; /* 26 1 */
u8 fa_slen; /* 27 1 */
u32 tb_id; /* 28 4 */
s16 fa_default; /* 32 2 */
u8 offload:1; /* 34: 0 1 */
u8 trap:1; /* 34: 1 1 */
u8 unused:6; /* 34: 2 1 */
/* XXX 5 bytes hole, try to pack */
struct callback_head rcu __attribute__((__aligned__(8))); /* 40 16 */
/* size: 56, cachelines: 1, members: 12 */
/* sum members: 50, holes: 1, sum holes: 5 */
/* sum bitfield members: 8 bits (1 bytes) */
/* forced alignments: 1, forced holes: 1, sum forced holes: 5 */
/* last cacheline: 56 bytes */
} __attribute__((__aligned__(8)));
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-01-14 11:23:11 +00:00
|
|
|
u8 offload:1,
|
|
|
|
trap:1,
|
IPv4: Add "offload failed" indication to routes
After installing a route to the kernel, user space receives an
acknowledgment, which means the route was installed in the kernel, but not
necessarily in hardware.
The asynchronous nature of route installation in hardware can lead to a
routing daemon advertising a route before it was actually installed in
hardware. This can result in packet loss or mis-routed packets until the
route is installed in hardware.
To avoid such cases, previous patch set added the ability to emit
RTM_NEWROUTE notifications whenever RTM_F_OFFLOAD/RTM_F_TRAP flags
are changed, this behavior is controlled by sysctl.
With the above mentioned behavior, it is possible to know from user-space
if the route was offloaded, but if the offload fails there is no indication
to user-space. Following a failure, a routing daemon will wait indefinitely
for a notification that will never come.
This patch adds an "offload_failed" indication to IPv4 routes, so that
users will have better visibility into the offload process.
'struct fib_alias', and 'struct fib_rt_info' are extended with new field
that indicates if route offload failed. Note that the new field is added
using unused bit and therefore there is no need to increase structs size.
Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-02-07 08:22:50 +00:00
|
|
|
offload_failed:1,
|
|
|
|
unused:5;
|
2020-01-14 11:23:10 +00:00
|
|
|
};
|
|
|
|
|
2016-09-26 10:52:29 +00:00
|
|
|
struct fib_entry_notifier_info {
|
|
|
|
struct fib_notifier_info info; /* must be first */
|
|
|
|
u32 dst;
|
|
|
|
int dst_len;
|
|
|
|
struct fib_info *fi;
|
|
|
|
u8 tos;
|
|
|
|
u8 type;
|
|
|
|
u32 tb_id;
|
|
|
|
};
|
|
|
|
|
2017-02-08 10:16:39 +00:00
|
|
|
struct fib_nh_notifier_info {
|
|
|
|
struct fib_notifier_info info; /* must be first */
|
|
|
|
struct fib_nh *fib_nh;
|
|
|
|
};
|
|
|
|
|
2019-10-03 09:49:27 +00:00
|
|
|
int call_fib4_notifier(struct notifier_block *nb,
|
2017-08-03 11:28:11 +00:00
|
|
|
enum fib_event_type event_type,
|
2016-09-26 10:52:29 +00:00
|
|
|
struct fib_notifier_info *info);
|
2017-08-03 11:28:11 +00:00
|
|
|
int call_fib4_notifiers(struct net *net, enum fib_event_type event_type,
|
|
|
|
struct fib_notifier_info *info);
|
|
|
|
|
|
|
|
int __net_init fib4_notifier_init(struct net *net);
|
|
|
|
void __net_exit fib4_notifier_exit(struct net *net);
|
2016-09-26 10:52:29 +00:00
|
|
|
|
2019-05-22 19:04:42 +00:00
|
|
|
void fib_info_notify_update(struct net *net, struct nl_info *info);
|
2019-10-03 09:49:30 +00:00
|
|
|
int fib_notify(struct net *net, struct notifier_block *nb,
|
|
|
|
struct netlink_ext_ack *extack);
|
2017-03-10 07:56:18 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct fib_table {
|
2012-06-11 07:01:52 +00:00
|
|
|
struct hlist_node tb_hlist;
|
|
|
|
u32 tb_id;
|
|
|
|
int tb_num_default;
|
2015-03-04 23:02:44 +00:00
|
|
|
struct rcu_head rcu;
|
2015-03-06 21:47:00 +00:00
|
|
|
unsigned long *tb_data;
|
2020-03-02 12:03:52 +00:00
|
|
|
unsigned long __data[];
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2018-10-16 01:56:42 +00:00
|
|
|
struct fib_dump_filter {
|
|
|
|
u32 table_id;
|
|
|
|
/* filter_set is an optimization that an entry is set */
|
|
|
|
bool filter_set;
|
fib_frontend, ip6_fib: Select routes or exceptions dump from RTM_F_CLONED
The following patches add back the ability to dump IPv4 and IPv6 exception
routes, and we need to allow selection of regular routes or exceptions.
Use RTM_F_CLONED as filter to decide whether to dump routes or exceptions:
iproute2 passes it in dump requests (except for IPv6 cache flush requests,
this will be fixed in iproute2) and this used to work as long as
exceptions were stored directly in the FIB, for both IPv4 and IPv6.
Caveat: if strict checking is not requested (that is, if the dump request
doesn't go through ip_valid_fib_dump_req()), we can't filter on protocol,
tables or route types.
In this case, filtering on RTM_F_CLONED would be inconsistent: we would
fix 'ip route list cache' by returning exception routes and at the same
time introduce another bug in case another selector is present, e.g. on
'ip route list cache table main' we would return all exception routes,
without filtering on tables.
Keep this consistent by applying no filters at all, and dumping both
routes and exceptions, if strict checking is not requested. iproute2
currently filters results anyway, and no unwanted results will be
presented to the user. The kernel will just dump more data than needed.
v7: No changes
v6: Rebase onto net-next, no changes
v5: New patch: add dump_routes and dump_exceptions flags in filter and
simply clear the unwanted one if strict checking is enabled, don't
ignore NLM_F_MATCH and don't set filter_set if NLM_F_MATCH is set.
Skip filtering altogether if no strict checking is requested:
selecting routes or exceptions only would be inconsistent with the
fact we can't filter on tables.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-21 15:45:20 +00:00
|
|
|
bool dump_routes;
|
|
|
|
bool dump_exceptions;
|
2018-10-16 01:56:42 +00:00
|
|
|
unsigned char protocol;
|
|
|
|
unsigned char rt_type;
|
|
|
|
unsigned int flags;
|
|
|
|
struct net_device *dev;
|
|
|
|
};
|
|
|
|
|
2013-09-21 17:22:42 +00:00
|
|
|
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
|
|
|
|
struct fib_result *res, int fib_flags);
|
2017-05-21 16:12:02 +00:00
|
|
|
int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
|
|
|
|
struct netlink_ext_ack *extack);
|
2017-05-27 22:19:26 +00:00
|
|
|
int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
|
|
|
|
struct netlink_ext_ack *extack);
|
2013-09-21 17:22:42 +00:00
|
|
|
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
|
2018-10-16 01:56:43 +00:00
|
|
|
struct netlink_callback *cb, struct fib_dump_filter *filter);
|
2019-01-09 09:57:39 +00:00
|
|
|
int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
|
2015-03-06 21:47:00 +00:00
|
|
|
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
|
2016-11-15 10:46:06 +00:00
|
|
|
void fib_table_flush_external(struct fib_table *table);
|
2013-09-21 17:22:42 +00:00
|
|
|
void fib_free_table(struct fib_table *tb);
|
2010-10-28 02:00:43 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifndef CONFIG_IP_MULTIPLE_TABLES
|
|
|
|
|
2014-12-02 18:58:21 +00:00
|
|
|
#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
|
|
|
|
#define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-10 11:24:11 +00:00
|
|
|
static inline struct fib_table *fib_get_table(struct net *net, u32 id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-03-04 23:02:44 +00:00
|
|
|
struct hlist_node *tb_hlist;
|
2008-01-10 11:23:38 +00:00
|
|
|
struct hlist_head *ptr;
|
|
|
|
|
|
|
|
ptr = id == RT_TABLE_LOCAL ?
|
2008-01-10 11:28:24 +00:00
|
|
|
&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] :
|
|
|
|
&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX];
|
2015-03-04 23:02:44 +00:00
|
|
|
|
|
|
|
tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr));
|
|
|
|
|
|
|
|
return hlist_entry(tb_hlist, struct fib_table, tb_hlist);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-10 11:24:11 +00:00
|
|
|
static inline struct fib_table *fib_new_table(struct net *net, u32 id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-10 11:24:11 +00:00
|
|
|
return fib_get_table(net, id);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-03-12 00:54:08 +00:00
|
|
|
static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
|
2015-06-23 17:45:37 +00:00
|
|
|
struct fib_result *res, unsigned int flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-03-04 23:02:44 +00:00
|
|
|
struct fib_table *tb;
|
2015-03-06 21:47:00 +00:00
|
|
|
int err = -ENETUNREACH;
|
2014-12-31 18:56:24 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
2015-03-06 21:47:00 +00:00
|
|
|
tb = fib_get_table(net, RT_TABLE_MAIN);
|
2015-09-17 14:01:32 +00:00
|
|
|
if (tb)
|
|
|
|
err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF);
|
|
|
|
|
|
|
|
if (err == -EAGAIN)
|
|
|
|
err = -ENETUNREACH;
|
2008-01-10 11:23:38 +00:00
|
|
|
|
2014-12-31 18:56:24 +00:00
|
|
|
rcu_read_unlock();
|
2008-01-10 11:23:38 +00:00
|
|
|
|
2014-12-31 18:56:24 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-11-20 12:47:36 +00:00
|
|
|
static inline bool fib4_has_custom_rules(const struct net *net)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
ipv4: fib_rules: Check if rule is a default rule
Currently, when non-default (custom) FIB rules are used, devices capable
of layer 3 offloading flush their tables and let the kernel do the
forwarding instead.
When these devices' drivers are loaded they register to the FIB
notification chain, which lets them know about the existence of any
custom FIB rules. This is done by sending a RULE_ADD notification based
on the value of 'net->ipv4.fib_has_custom_rules'.
This approach is problematic when VRF offload is taken into account, as
upon the creation of the first VRF netdev, a l3mdev rule is programmed
to direct skbs to the VRF's table.
Instead of merely reading the above value and sending a single RULE_ADD
notification, we should iterate over all the FIB rules and send a
detailed notification for each, thereby allowing offloading drivers to
sanitize the rules they don't support and potentially flush their
tables.
While l3mdev rules are uniquely marked, the default rules are not.
Therefore, when they are being notified they might invoke offloading
drivers to unnecessarily flush their tables.
Solve this by adding an helper to check if a FIB rule is a default rule.
Namely, its selector should match all packets and its action should
point to the local, main or default tables.
As noted by David Ahern, uniquely marking the default rules is
insufficient. When using VRFs, it's common to avoid false hits by moving
the rule for the local table to just before the main table:
Default configuration:
$ ip rule show
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
Common configuration with VRFs:
$ ip rule show
1000: from all lookup [l3mdev-table]
32765: from all lookup local
32766: from all lookup main
32767: from all lookup default
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-16 08:08:12 +00:00
|
|
|
static inline bool fib4_rule_default(const struct fib_rule *rule)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-10-03 09:49:30 +00:00
|
|
|
static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb,
|
|
|
|
struct netlink_ext_ack *extack)
|
2017-08-03 11:28:14 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int fib4_rules_seq_read(struct net *net)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-01 03:42:41 +00:00
|
|
|
static inline bool fib4_rules_early_flow_dissect(struct net *net,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct flowi4 *fl4,
|
|
|
|
struct flow_keys *flkeys)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
#else /* CONFIG_IP_MULTIPLE_TABLES */
|
2013-09-21 17:22:42 +00:00
|
|
|
int __net_init fib4_rules_init(struct net *net);
|
|
|
|
void __net_exit fib4_rules_exit(struct net *net);
|
2007-11-07 07:34:04 +00:00
|
|
|
|
2013-09-21 17:22:42 +00:00
|
|
|
struct fib_table *fib_new_table(struct net *net, u32 id);
|
|
|
|
struct fib_table *fib_get_table(struct net *net, u32 id);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-06-23 17:45:37 +00:00
|
|
|
int __fib_lookup(struct net *net, struct flowi4 *flp,
|
|
|
|
struct fib_result *res, unsigned int flags);
|
2012-07-06 05:13:13 +00:00
|
|
|
|
|
|
|
static inline int fib_lookup(struct net *net, struct flowi4 *flp,
|
2015-06-23 17:45:37 +00:00
|
|
|
struct fib_result *res, unsigned int flags)
|
2012-07-06 05:13:13 +00:00
|
|
|
{
|
2015-03-04 23:02:44 +00:00
|
|
|
struct fib_table *tb;
|
2015-09-17 14:01:32 +00:00
|
|
|
int err = -ENETUNREACH;
|
2015-03-04 23:02:44 +00:00
|
|
|
|
2015-06-23 17:45:37 +00:00
|
|
|
flags |= FIB_LOOKUP_NOREF;
|
2015-03-04 23:02:44 +00:00
|
|
|
if (net->ipv4.fib_has_custom_rules)
|
2015-06-23 17:45:37 +00:00
|
|
|
return __fib_lookup(net, flp, res, flags);
|
2015-03-04 23:02:44 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
res->tclassid = 0;
|
|
|
|
|
2015-09-17 14:01:32 +00:00
|
|
|
tb = rcu_dereference_rtnl(net->ipv4.fib_main);
|
|
|
|
if (tb)
|
|
|
|
err = fib_table_lookup(tb, flp, res, flags);
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tb = rcu_dereference_rtnl(net->ipv4.fib_default);
|
|
|
|
if (tb)
|
|
|
|
err = fib_table_lookup(tb, flp, res, flags);
|
2015-03-04 23:02:44 +00:00
|
|
|
|
2015-09-17 14:01:32 +00:00
|
|
|
out:
|
|
|
|
if (err == -EAGAIN)
|
|
|
|
err = -ENETUNREACH;
|
2015-03-04 23:02:44 +00:00
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return err;
|
2012-07-06 05:13:13 +00:00
|
|
|
}
|
|
|
|
|
2019-11-20 12:47:36 +00:00
|
|
|
static inline bool fib4_has_custom_rules(const struct net *net)
|
|
|
|
{
|
|
|
|
return net->ipv4.fib_has_custom_rules;
|
|
|
|
}
|
|
|
|
|
ipv4: fib_rules: Check if rule is a default rule
Currently, when non-default (custom) FIB rules are used, devices capable
of layer 3 offloading flush their tables and let the kernel do the
forwarding instead.
When these devices' drivers are loaded they register to the FIB
notification chain, which lets them know about the existence of any
custom FIB rules. This is done by sending a RULE_ADD notification based
on the value of 'net->ipv4.fib_has_custom_rules'.
This approach is problematic when VRF offload is taken into account, as
upon the creation of the first VRF netdev, a l3mdev rule is programmed
to direct skbs to the VRF's table.
Instead of merely reading the above value and sending a single RULE_ADD
notification, we should iterate over all the FIB rules and send a
detailed notification for each, thereby allowing offloading drivers to
sanitize the rules they don't support and potentially flush their
tables.
While l3mdev rules are uniquely marked, the default rules are not.
Therefore, when they are being notified they might invoke offloading
drivers to unnecessarily flush their tables.
Solve this by adding an helper to check if a FIB rule is a default rule.
Namely, its selector should match all packets and its action should
point to the local, main or default tables.
As noted by David Ahern, uniquely marking the default rules is
insufficient. When using VRFs, it's common to avoid false hits by moving
the rule for the local table to just before the main table:
Default configuration:
$ ip rule show
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
Common configuration with VRFs:
$ ip rule show
1000: from all lookup [l3mdev-table]
32765: from all lookup local
32766: from all lookup main
32767: from all lookup default
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-16 08:08:12 +00:00
|
|
|
bool fib4_rule_default(const struct fib_rule *rule);
|
2019-10-03 09:49:30 +00:00
|
|
|
int fib4_rules_dump(struct net *net, struct notifier_block *nb,
|
|
|
|
struct netlink_ext_ack *extack);
|
2017-08-03 11:28:14 +00:00
|
|
|
unsigned int fib4_rules_seq_read(struct net *net);
|
ipv4: fib_rules: Check if rule is a default rule
Currently, when non-default (custom) FIB rules are used, devices capable
of layer 3 offloading flush their tables and let the kernel do the
forwarding instead.
When these devices' drivers are loaded they register to the FIB
notification chain, which lets them know about the existence of any
custom FIB rules. This is done by sending a RULE_ADD notification based
on the value of 'net->ipv4.fib_has_custom_rules'.
This approach is problematic when VRF offload is taken into account, as
upon the creation of the first VRF netdev, a l3mdev rule is programmed
to direct skbs to the VRF's table.
Instead of merely reading the above value and sending a single RULE_ADD
notification, we should iterate over all the FIB rules and send a
detailed notification for each, thereby allowing offloading drivers to
sanitize the rules they don't support and potentially flush their
tables.
While l3mdev rules are uniquely marked, the default rules are not.
Therefore, when they are being notified they might invoke offloading
drivers to unnecessarily flush their tables.
Solve this by adding an helper to check if a FIB rule is a default rule.
Namely, its selector should match all packets and its action should
point to the local, main or default tables.
As noted by David Ahern, uniquely marking the default rules is
insufficient. When using VRFs, it's common to avoid false hits by moving
the rule for the local table to just before the main table:
Default configuration:
$ ip rule show
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
Common configuration with VRFs:
$ ip rule show
1000: from all lookup [l3mdev-table]
32765: from all lookup local
32766: from all lookup main
32767: from all lookup default
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-16 08:08:12 +00:00
|
|
|
|
2018-03-01 03:42:41 +00:00
|
|
|
static inline bool fib4_rules_early_flow_dissect(struct net *net,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct flowi4 *fl4,
|
|
|
|
struct flow_keys *flkeys)
|
|
|
|
{
|
|
|
|
unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
|
|
|
|
|
|
|
|
if (!net->ipv4.fib_rules_require_fldissect)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
skb_flow_dissect_flow_keys(skb, flkeys, flag);
|
|
|
|
fl4->fl4_sport = flkeys->ports.src;
|
|
|
|
fl4->fl4_dport = flkeys->ports.dst;
|
|
|
|
fl4->flowi4_proto = flkeys->basic.ip_proto;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* CONFIG_IP_MULTIPLE_TABLES */
|
|
|
|
|
|
|
|
/* Exported by fib_frontend.c */
|
2007-06-05 19:38:30 +00:00
|
|
|
extern const struct nla_policy rtm_ipv4_policy[];
|
2013-09-21 17:22:42 +00:00
|
|
|
void ip_fib_init(void);
|
2019-04-05 23:30:40 +00:00
|
|
|
int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
|
|
|
|
struct netlink_ext_ack *extack);
|
2013-09-21 17:22:42 +00:00
|
|
|
__be32 fib_compute_spec_dst(struct sk_buff *skb);
|
2018-09-20 20:50:47 +00:00
|
|
|
bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
|
2013-09-21 17:22:42 +00:00
|
|
|
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
|
|
|
u8 tos, int oif, struct net_device *dev,
|
|
|
|
struct in_device *idev, u32 *itag);
|
2012-06-29 08:32:45 +00:00
|
|
|
#ifdef CONFIG_IP_ROUTE_CLASSID
|
2012-07-06 05:13:13 +00:00
|
|
|
static inline int fib_num_tclassid_users(struct net *net)
|
|
|
|
{
|
2021-12-02 02:26:35 +00:00
|
|
|
return atomic_read(&net->ipv4.fib_num_tclassid_users);
|
2012-07-06 05:13:13 +00:00
|
|
|
}
|
2012-06-29 08:32:45 +00:00
|
|
|
#else
|
2012-07-06 05:13:13 +00:00
|
|
|
static inline int fib_num_tclassid_users(struct net *net)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2012-06-29 08:32:45 +00:00
|
|
|
#endif
|
2015-03-06 21:47:00 +00:00
|
|
|
int fib_unmerge(struct net *net);
|
2005-12-27 04:43:12 +00:00
|
|
|
|
2020-05-26 18:56:18 +00:00
|
|
|
static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc,
|
|
|
|
const struct net_device *dev)
|
|
|
|
{
|
|
|
|
if (nhc->nhc_dev == dev ||
|
|
|
|
l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Exported by fib_semantics.c */
|
2013-09-21 17:22:42 +00:00
|
|
|
int ip_fib_check_default(__be32 gw, struct net_device *dev);
|
2015-10-30 08:23:33 +00:00
|
|
|
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
|
2016-09-04 22:20:20 +00:00
|
|
|
int fib_sync_down_addr(struct net_device *dev, __be32 local);
|
2019-04-23 15:48:09 +00:00
|
|
|
int fib_sync_up(struct net_device *dev, unsigned char nh_flags);
|
net: ipv4: update fnhe_pmtu when first hop's MTU changes
Since commit 5aad1de5ea2c ("ipv4: use separate genid for next hop
exceptions"), exceptions get deprecated separately from cached
routes. In particular, administrative changes don't clear PMTU anymore.
As Stefano described in commit e9fa1495d738 ("ipv6: Reflect MTU changes
on PMTU of exceptions for MTU-less routes"), the PMTU discovered before
the local MTU change can become stale:
- if the local MTU is now lower than the PMTU, that PMTU is now
incorrect
- if the local MTU was the lowest value in the path, and is increased,
we might discover a higher PMTU
Similarly to what commit e9fa1495d738 did for IPv6, update PMTU in those
cases.
If the exception was locked, the discovered PMTU was smaller than the
minimal accepted PMTU. In that case, if the new local MTU is smaller
than the current PMTU, let PMTU discovery figure out if locking of the
exception is still needed.
To do this, we need to know the old link MTU in the NETDEV_CHANGEMTU
notifier. By the time the notifier is called, dev->mtu has been
changed. This patch adds the old MTU as additional information in the
notifier structure, and a new call_netdevice_notifiers_u32() function.
Fixes: 5aad1de5ea2c ("ipv4: use separate genid for next hop exceptions")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-09 15:48:14 +00:00
|
|
|
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
|
2019-05-22 19:04:46 +00:00
|
|
|
void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
|
2015-09-30 08:12:21 +00:00
|
|
|
|
2021-05-17 18:15:18 +00:00
|
|
|
/* Fields used for sysctl_fib_multipath_hash_fields.
|
|
|
|
* Common to IPv4 and IPv6.
|
|
|
|
*
|
|
|
|
* Add new fields at the end. This is user API.
|
|
|
|
*/
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_SRC_IP BIT(0)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_DST_IP BIT(1)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_IP_PROTO BIT(2)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_FLOWLABEL BIT(3)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_SRC_PORT BIT(4)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_DST_PORT BIT(5)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP BIT(6)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP BIT(7)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO BIT(8)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL BIT(9)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT BIT(10)
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT BIT(11)
|
|
|
|
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_OUTER_MASK \
|
|
|
|
(FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_DST_IP | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_IP_PROTO | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_FLOWLABEL | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_SRC_PORT | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_DST_PORT)
|
|
|
|
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_INNER_MASK \
|
|
|
|
(FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
|
|
|
|
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_ALL_MASK \
|
|
|
|
(FIB_MULTIPATH_HASH_FIELD_OUTER_MASK | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
|
|
|
|
|
|
|
|
#define FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK \
|
|
|
|
(FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_DST_IP | \
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
|
|
|
|
|
2017-03-16 13:28:00 +00:00
|
|
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
2018-03-02 16:32:12 +00:00
|
|
|
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
|
2018-03-01 03:42:41 +00:00
|
|
|
const struct sk_buff *skb, struct flow_keys *flkeys);
|
2017-03-16 13:28:00 +00:00
|
|
|
#endif
|
2019-05-22 19:04:43 +00:00
|
|
|
int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
|
|
|
|
struct netlink_ext_ack *extack);
|
2015-09-30 08:12:21 +00:00
|
|
|
void fib_select_multipath(struct fib_result *res, int hash);
|
2015-10-05 15:51:25 +00:00
|
|
|
void fib_select_path(struct net *net, struct fib_result *res,
|
2017-03-16 13:28:00 +00:00
|
|
|
struct flowi4 *fl4, const struct sk_buff *skb);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-03-28 03:53:48 +00:00
|
|
|
int fib_nh_init(struct net *net, struct fib_nh *fib_nh,
|
|
|
|
struct fib_config *cfg, int nh_weight,
|
|
|
|
struct netlink_ext_ack *extack);
|
2019-03-28 03:53:49 +00:00
|
|
|
void fib_nh_release(struct net *net, struct fib_nh *fib_nh);
|
2020-03-27 22:00:21 +00:00
|
|
|
int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc,
|
|
|
|
struct nlattr *fc_encap, u16 fc_encap_type,
|
|
|
|
void *cfg, gfp_t gfp_flags,
|
2019-03-28 03:53:58 +00:00
|
|
|
struct netlink_ext_ack *extack);
|
|
|
|
void fib_nh_common_release(struct fib_nh_common *nhc);
|
2019-03-28 03:53:48 +00:00
|
|
|
|
2011-02-01 23:30:56 +00:00
|
|
|
/* Exported by fib_trie.c */
|
ipv4: Add "offload" and "trap" indications to routes
When performing L3 offload, routes and nexthops are usually programmed
into two different tables in the underlying device. Therefore, the fact
that a nexthop resides in hardware does not necessarily mean that all
the associated routes also reside in hardware and vice-versa.
While the kernel can signal to user space the presence of a nexthop in
hardware (via 'RTNH_F_OFFLOAD'), it does not have a corresponding flag
for routes. In addition, the fact that a route resides in hardware does
not necessarily mean that the traffic is offloaded. For example,
unreachable routes (i.e., 'RTN_UNREACHABLE') are programmed to trap
packets to the CPU so that the kernel will be able to generate the
appropriate ICMP error packet.
This patch adds an "offload" and "trap" indications to IPv4 routes, so
that users will have better visibility into the offload process.
'struct fib_alias' is extended with two new fields that indicate if the
route resides in hardware or not and if it is offloading traffic from
the kernel or trapping packets to it. Note that the new fields are added
in the 6 bytes hole and therefore the struct still fits in a single
cache line [1].
Capable drivers are expected to invoke fib_alias_hw_flags_set() with the
route's key in order to set the flags.
The indications are dumped to user space via a new flags (i.e.,
'RTM_F_OFFLOAD' and 'RTM_F_TRAP') in the 'rtm_flags' field in the
ancillary header.
v2:
* Make use of 'struct fib_rt_info' in fib_alias_hw_flags_set()
[1]
struct fib_alias {
struct hlist_node fa_list; /* 0 16 */
struct fib_info * fa_info; /* 16 8 */
u8 fa_tos; /* 24 1 */
u8 fa_type; /* 25 1 */
u8 fa_state; /* 26 1 */
u8 fa_slen; /* 27 1 */
u32 tb_id; /* 28 4 */
s16 fa_default; /* 32 2 */
u8 offload:1; /* 34: 0 1 */
u8 trap:1; /* 34: 1 1 */
u8 unused:6; /* 34: 2 1 */
/* XXX 5 bytes hole, try to pack */
struct callback_head rcu __attribute__((__aligned__(8))); /* 40 16 */
/* size: 56, cachelines: 1, members: 12 */
/* sum members: 50, holes: 1, sum holes: 5 */
/* sum bitfield members: 8 bits (1 bytes) */
/* forced alignments: 1, forced holes: 1, sum forced holes: 5 */
/* last cacheline: 56 bytes */
} __attribute__((__aligned__(8)));
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-01-14 11:23:11 +00:00
|
|
|
void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
|
2013-09-21 17:22:42 +00:00
|
|
|
void fib_trie_init(void);
|
2015-03-06 21:47:00 +00:00
|
|
|
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
|
2020-05-26 18:56:17 +00:00
|
|
|
bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
|
|
|
|
const struct flowi4 *flp);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-02-17 06:04:57 +00:00
|
|
|
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-01-14 12:36:42 +00:00
|
|
|
#ifdef CONFIG_IP_ROUTE_CLASSID
|
2019-04-02 21:11:55 +00:00
|
|
|
struct fib_nh_common *nhc = res->nhc;
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
|
|
|
u32 rtag;
|
|
|
|
#endif
|
2019-06-04 03:19:50 +00:00
|
|
|
if (nhc->nhc_family == AF_INET) {
|
|
|
|
struct fib_nh *nh;
|
|
|
|
|
|
|
|
nh = container_of(nhc, struct fib_nh, nh_common);
|
|
|
|
*itag = nh->nh_tclassid << 16;
|
|
|
|
} else {
|
|
|
|
*itag = 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
2012-07-13 15:21:29 +00:00
|
|
|
rtag = res->tclassid;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (*itag == 0)
|
|
|
|
*itag = (rtag<<16);
|
|
|
|
*itag |= (rtag>>16);
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-05-22 19:04:44 +00:00
|
|
|
void fib_flush(struct net *net);
|
2013-09-21 17:22:42 +00:00
|
|
|
void free_fib_info(struct fib_info *fi);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-12-03 15:44:59 +00:00
|
|
|
static inline void fib_info_hold(struct fib_info *fi)
|
|
|
|
{
|
2017-07-04 06:35:02 +00:00
|
|
|
refcount_inc(&fi->fib_clntref);
|
2016-12-03 15:44:59 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static inline void fib_info_put(struct fib_info *fi)
|
|
|
|
{
|
2017-07-04 06:35:02 +00:00
|
|
|
if (refcount_dec_and_test(&fi->fib_clntref))
|
2005-04-16 22:20:36 +00:00
|
|
|
free_fib_info(fi);
|
|
|
|
}
|
|
|
|
|
2005-08-16 05:18:02 +00:00
|
|
|
#ifdef CONFIG_PROC_FS
|
2013-09-21 17:22:42 +00:00
|
|
|
int __net_init fib_proc_init(struct net *net);
|
|
|
|
void __net_exit fib_proc_exit(struct net *net);
|
2008-02-05 10:54:16 +00:00
|
|
|
#else
|
|
|
|
static inline int fib_proc_init(struct net *net)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline void fib_proc_exit(struct net *net)
|
|
|
|
{
|
|
|
|
}
|
2005-08-16 05:18:02 +00:00
|
|
|
#endif
|
|
|
|
|
2018-05-21 16:08:13 +00:00
|
|
|
u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);
|
|
|
|
|
2018-10-16 01:56:42 +00:00
|
|
|
int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
|
|
|
|
struct fib_dump_filter *filter,
|
2018-10-16 01:56:48 +00:00
|
|
|
struct netlink_callback *cb);
|
2019-04-02 21:11:58 +00:00
|
|
|
|
|
|
|
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
|
2019-09-04 14:11:58 +00:00
|
|
|
u8 rt_family, unsigned char *flags, bool skip_oif);
|
2019-04-02 21:11:58 +00:00
|
|
|
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
|
2021-09-23 15:03:19 +00:00
|
|
|
int nh_weight, u8 rt_family, u32 nh_tclassid);
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* _NET_FIB_H */
|