Merge branch 'mlxsw-add-support-for-nexthop-objects'

Ido Schimmel says:

====================
mlxsw: Add support for nexthop objects

This patch set adds support for nexthop objects in mlxsw. Nexthop
objects are treated as another front-end for programming nexthops, in
addition to the existing IPv4 and IPv6 front-ends.

Patch #1 registers a listener to the nexthop notification chain and
parses the nexthop information into the existing mlxsw data structures
that are already used by the IPv4 and IPv6 front-ends. Blackhole
nexthops are currently rejected. Support will be added in a follow-up
patch set.

Patch #2 extends mlxsw to resolve its internal nexthop objects from the
nexthop identifier encoded in the FIB info of the notified routes.

Patch #3 finally removes the limitation of rejecting routes that use
nexthop objects.

Patch #4 adds a selftest.

Patches #5-#8 add generic forwarding selftests that can be used with
veth pairs or physical loopbacks.
====================

Link: https://lore.kernel.org/r/20201119130848.407918-1-idosch@idosch.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2020-11-20 15:20:23 -08:00
commit 3cd336c517
6 changed files with 1201 additions and 15 deletions

View File

@ -2873,6 +2873,7 @@ struct mlxsw_sp_nexthop {
enum mlxsw_sp_nexthop_group_type {
MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
};
struct mlxsw_sp_nexthop_group_info {
@ -2894,6 +2895,9 @@ struct mlxsw_sp_nexthop_group {
struct {
struct fib_info *fi;
} ipv4;
struct {
u32 id;
} obj;
};
struct mlxsw_sp_nexthop_group_info *nhgi;
enum mlxsw_sp_nexthop_group_type type;
@ -3012,6 +3016,7 @@ struct mlxsw_sp_nexthop_group_cmp_arg {
union {
struct fib_info *fi;
struct mlxsw_sp_fib6_entry *fib6_entry;
u32 id;
};
};
@ -3074,6 +3079,8 @@ mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
cmp_arg->fib6_entry);
case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
return cmp_arg->id != nh_grp->obj.id;
default:
WARN_ON(1);
return 1;
@ -3100,6 +3107,8 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
}
return jhash(&val, sizeof(val), seed);
case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
default:
WARN_ON(1);
return 0;
@ -3134,6 +3143,8 @@ mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
default:
WARN_ON(1);
return 0;
@ -3538,6 +3549,25 @@ mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
}
static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
/* Do not update the flags if the nexthop group is being destroyed
* since:
* 1. The nexthop objects is being deleted, in which case the flags are
* irrelevant.
* 2. The nexthop group was replaced by a newer group, in which case
* the flags of the nexthop object were already updated based on the
* new group.
*/
if (nh_grp->can_destroy)
return;
nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
nh_grp->nhgi->adj_index_valid, false);
}
static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@ -3549,6 +3579,9 @@ mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
break;
case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
break;
}
}
@ -4088,6 +4121,413 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
}
static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
const struct nh_notifier_single_info *nh,
struct netlink_ext_ack *extack)
{
int err = -EINVAL;
if (nh->is_reject)
NL_SET_ERR_MSG_MOD(extack, "Blackhole nexthops are not supported");
else if (nh->is_fdb)
NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
else if (nh->has_encap)
NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
else
err = 0;
return err;
}
static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
const struct nh_notifier_grp_info *nh_grp,
struct netlink_ext_ack *extack)
{
int i;
if (nh_grp->is_fdb) {
NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
return -EINVAL;
}
for (i = 0; i < nh_grp->num_nh; i++) {
const struct nh_notifier_single_info *nh;
int err;
nh = &nh_grp->nh_entries[i].nh;
err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh,
extack);
if (err)
return err;
/* Device only nexthops with an IPIP device are programmed as
* encapsulating adjacency entries.
*/
if (!nh->gw_family &&
!mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
return -EINVAL;
}
}
return 0;
}
static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
unsigned long event,
struct nh_notifier_info *info)
{
if (event != NEXTHOP_EVENT_REPLACE)
return 0;
if (!info->is_grp)
return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
info->extack);
return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp,
info->extack);
}
static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
const struct nh_notifier_info *info)
{
const struct net_device *dev;
if (info->is_grp)
/* Already validated earlier. */
return true;
dev = info->nh->dev;
return info->nh->gw_family ||
mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
}
static int
mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh,
struct nh_notifier_single_info *nh_obj, int weight)
{
struct net_device *dev = nh_obj->dev;
int err;
nh->nhgi = nh_grp->nhgi;
nh->nh_weight = weight;
switch (nh_obj->gw_family) {
case AF_INET:
memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
nh->neigh_tbl = &arp_tbl;
break;
case AF_INET6:
memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
#if IS_ENABLED(CONFIG_IPV6)
nh->neigh_tbl = &nd_tbl;
#endif
break;
}
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
nh->ifindex = dev->ifindex;
err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
if (err)
goto err_type_init;
return 0;
err_type_init:
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
return err;
}
static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
}
static int
mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct nh_notifier_info *info)
{
unsigned int nhs = info->is_grp ? info->nh_grp->num_nh : 1;
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
int err, i;
nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
if (!nhgi)
return -ENOMEM;
nh_grp->nhgi = nhgi;
nhgi->nh_grp = nh_grp;
nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
nhgi->count = nhs;
for (i = 0; i < nhgi->count; i++) {
struct nh_notifier_single_info *nh_obj;
int weight;
nh = &nhgi->nexthops[i];
if (info->is_grp) {
nh_obj = &info->nh_grp->nh_entries[i].nh;
weight = info->nh_grp->nh_entries[i].weight;
} else {
nh_obj = info->nh;
weight = 1;
}
err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
weight);
if (err)
goto err_nexthop_obj_init;
}
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
goto err_group_refresh;
}
return 0;
err_group_refresh:
i = nhgi->count;
err_nexthop_obj_init:
for (i--; i >= 0; i--) {
nh = &nhgi->nexthops[i];
mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
}
kfree(nhgi);
return err;
}
static void
mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i;
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
}
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
WARN_ON_ONCE(nhgi->adj_index_valid);
kfree(nhgi);
}
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
struct nh_notifier_info *info)
{
struct mlxsw_sp_nexthop_group *nh_grp;
int err;
nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
if (!nh_grp)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&nh_grp->fib_list);
nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
nh_grp->obj.id = info->id;
err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
if (err)
goto err_nexthop_group_info_init;
nh_grp->can_destroy = false;
return nh_grp;
err_nexthop_group_info_init:
kfree(nh_grp);
return ERR_PTR(err);
}
static void
mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
if (!nh_grp->can_destroy)
return;
mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
kfree(nh_grp);
}
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
{
struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
cmp_arg.id = id;
return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
&cmp_arg,
mlxsw_sp_nexthop_group_ht_params);
}
static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
}
static int
mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop_group *old_nh_grp,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
int err;
old_nh_grp->nhgi = new_nhgi;
new_nhgi->nh_grp = old_nh_grp;
nh_grp->nhgi = old_nhgi;
old_nhgi->nh_grp = nh_grp;
if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
/* Both the old adjacency index and the new one are valid.
* Routes are currently using the old one. Tell the device to
* replace the old adjacency index with the new one.
*/
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
old_nhgi->adj_index,
old_nhgi->ecmp_size);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
goto err_out;
}
} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
/* The old adjacency index is valid, while the new one is not.
* Iterate over all the routes using the group and change them
* to trap packets to the CPU.
*/
err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
goto err_out;
}
} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
/* The old adjacency index is invalid, while the new one is.
* Iterate over all the routes using the group and change them
* to forward packets using the new valid index.
*/
err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
goto err_out;
}
}
/* Make sure the flags are set / cleared based on the new nexthop group
* information.
*/
mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
/* At this point 'nh_grp' is just a shell that is not used by anyone
* and its nexthop group info is the old info that was just replaced
* with the new one. Remove it.
*/
nh_grp->can_destroy = true;
mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
return 0;
err_out:
old_nhgi->nh_grp = old_nh_grp;
nh_grp->nhgi = new_nhgi;
new_nhgi->nh_grp = nh_grp;
old_nh_grp->nhgi = old_nhgi;
return err;
}
static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
struct nh_notifier_info *info)
{
struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
struct netlink_ext_ack *extack = info->extack;
int err;
nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
if (IS_ERR(nh_grp))
return PTR_ERR(nh_grp);
old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
if (!old_nh_grp)
err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
else
err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
old_nh_grp, extack);
if (err) {
nh_grp->can_destroy = true;
mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
}
return err;
}
static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
struct nh_notifier_info *info)
{
struct mlxsw_sp_nexthop_group *nh_grp;
nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
if (!nh_grp)
return;
nh_grp->can_destroy = true;
mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
/* If the group still has routes using it, then defer the delete
* operation until the last route using it is deleted.
*/
if (!list_empty(&nh_grp->fib_list))
return;
mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
}
static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct nh_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err = 0;
router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
if (err)
goto out;
mutex_lock(&router->lock);
ASSERT_RTNL();
switch (event) {
case NEXTHOP_EVENT_REPLACE:
err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
break;
case NEXTHOP_EVENT_DEL:
mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
break;
default:
break;
}
mutex_unlock(&router->lock);
out:
return notifier_from_errno(err);
}
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
struct fib_info *fi)
{
@ -4208,12 +4648,21 @@ static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group *nh_grp;
if (fi->nh) {
nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
fi->nh->id);
if (WARN_ON_ONCE(!nh_grp))
return -EINVAL;
goto out;
}
nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
if (!nh_grp) {
nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
if (IS_ERR(nh_grp))
return PTR_ERR(nh_grp);
}
out:
list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
fib_entry->nh_group = nh_grp;
return 0;
@ -4227,6 +4676,12 @@ static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
list_del(&fib_entry->nexthop_group_node);
if (!list_empty(&nh_grp->fib_list))
return;
if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
return;
}
mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
}
@ -5517,8 +5972,17 @@ mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
struct mlxsw_sp_nexthop_group *nh_grp;
if (rt->nh) {
nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
rt->nh->id);
if (WARN_ON_ONCE(!nh_grp))
return -EINVAL;
goto out;
}
nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
if (!nh_grp) {
nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
@ -5531,6 +5995,7 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
*/
__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
out:
list_add_tail(&fib6_entry->common.nexthop_group_node,
&nh_grp->fib_list);
fib6_entry->common.nh_group = nh_grp;
@ -5546,6 +6011,12 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
list_del(&fib_entry->nexthop_group_node);
if (!list_empty(&nh_grp->fib_list))
return;
if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
return;
}
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
@ -6597,20 +7068,6 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
return notifier_from_errno(-EINVAL);
}
if (fen_info->fi->nh) {
NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
return notifier_from_errno(-EINVAL);
}
} else if (info->family == AF_INET6) {
struct fib6_entry_notifier_info *fen6_info;
fen6_info = container_of(info,
struct fib6_entry_notifier_info,
info);
if (fen6_info->rt->nh) {
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
return notifier_from_errno(-EINVAL);
}
}
break;
}
@ -8549,6 +9006,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_netevent_notifier;
mlxsw_sp->router->nexthop_nb.notifier_call =
mlxsw_sp_nexthop_obj_event;
err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->nexthop_nb,
extack);
if (err)
goto err_register_nexthop_notifier;
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb,
@ -8559,6 +9024,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_register_fib_notifier:
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->nexthop_nb);
err_register_nexthop_notifier:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
unregister_inet6addr_notifier(&router->inet6addr_nb);
@ -8598,6 +9066,8 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->nexthop_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);

View File

@ -58,6 +58,7 @@ struct mlxsw_sp_router {
struct list_head nexthop_neighs_list;
struct list_head ipip_list;
bool aborted;
struct notifier_block nexthop_nb;
struct notifier_block fib_nb;
struct notifier_block netevent_nb;
struct notifier_block inetaddr_nb;

View File

@ -29,6 +29,10 @@ ALL_TESTS="
bridge_extern_learn_test
neigh_offload_test
nexthop_offload_test
nexthop_obj_invalid_test
nexthop_obj_offload_test
nexthop_obj_group_offload_test
nexthop_obj_route_offload_test
devlink_reload_test
"
NUM_NETIFS=2
@ -674,6 +678,191 @@ nexthop_offload_test()
sysctl_restore net.ipv6.conf.$swp2.keep_addr_on_down
}
nexthop_obj_invalid_test()
{
# Test that invalid nexthop object configurations are rejected
RET=0
simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64
simple_if_init $swp2 192.0.2.2/24 2001:db8:1::2/64
setup_wait
ip nexthop add id 1 via 192.0.2.3 fdb
check_fail $? "managed to configure an FDB nexthop when should not"
ip nexthop add id 1 encap mpls 200/300 via 192.0.2.3 dev $swp1
check_fail $? "managed to configure a nexthop with MPLS encap when should not"
ip nexthop add id 1 blackhole
check_fail $? "managed to configure a blackhole nexthop when should not"
ip nexthop add id 1 dev $swp1
ip nexthop add id 2 dev $swp1
ip nexthop add id 10 group 1/2
check_fail $? "managed to configure a nexthop group with device-only nexthops when should not"
log_test "nexthop objects - invalid configurations"
ip nexthop del id 2
ip nexthop del id 1
simple_if_fini $swp2 192.0.2.2/24 2001:db8:1::2/64
simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
}
nexthop_obj_offload_test()
{
# Test offload indication of nexthop objects
RET=0
simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64
simple_if_init $swp2
setup_wait
ip nexthop add id 1 via 192.0.2.2 dev $swp1
ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \
dev $swp1
busywait "$TIMEOUT" wait_for_offload \
ip nexthop show id 1
check_err $? "nexthop not marked as offloaded when should"
ip neigh replace 192.0.2.2 nud failed dev $swp1
busywait "$TIMEOUT" not wait_for_offload \
ip nexthop show id 1
check_err $? "nexthop marked as offloaded after setting neigh to failed state"
ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \
dev $swp1
busywait "$TIMEOUT" wait_for_offload \
ip nexthop show id 1
check_err $? "nexthop not marked as offloaded after neigh replace"
ip nexthop replace id 1 via 192.0.2.3 dev $swp1
busywait "$TIMEOUT" not wait_for_offload \
ip nexthop show id 1
check_err $? "nexthop marked as offloaded after replacing to use an invalid address"
ip nexthop replace id 1 via 192.0.2.2 dev $swp1
busywait "$TIMEOUT" wait_for_offload \
ip nexthop show id 1
check_err $? "nexthop not marked as offloaded after replacing to use a valid address"
log_test "nexthop objects offload indication"
ip neigh del 192.0.2.2 dev $swp1
ip nexthop del id 1
simple_if_fini $swp2
simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
}
nexthop_obj_group_offload_test()
{
# Test offload indication of nexthop group objects
RET=0
simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64
simple_if_init $swp2
setup_wait
ip nexthop add id 1 via 192.0.2.2 dev $swp1
ip nexthop add id 2 via 2001:db8:1::2 dev $swp1
ip nexthop add id 10 group 1/2
ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \
dev $swp1
ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud reachable \
dev $swp1
ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud reachable \
dev $swp1
busywait "$TIMEOUT" wait_for_offload \
ip nexthop show id 1
check_err $? "IPv4 nexthop not marked as offloaded when should"
busywait "$TIMEOUT" wait_for_offload \
ip nexthop show id 2
check_err $? "IPv6 nexthop not marked as offloaded when should"
busywait "$TIMEOUT" wait_for_offload \
ip nexthop show id 10
check_err $? "nexthop group not marked as offloaded when should"
# Invalidate nexthop id 1
ip neigh replace 192.0.2.2 nud failed dev $swp1
busywait "$TIMEOUT" not wait_for_offload \
ip nexthop show id 10
check_fail $? "nexthop group not marked as offloaded with one valid nexthop"
# Invalidate nexthop id 2
ip neigh replace 2001:db8:1::2 nud failed dev $swp1
busywait "$TIMEOUT" not wait_for_offload \
ip nexthop show id 10
check_err $? "nexthop group marked as offloaded when should not"
# Revalidate nexthop id 1
ip nexthop replace id 1 via 192.0.2.3 dev $swp1
busywait "$TIMEOUT" wait_for_offload \
ip nexthop show id 10
check_err $? "nexthop group not marked as offloaded after revalidating nexthop"
log_test "nexthop group objects offload indication"
ip neigh del 2001:db8:1::2 dev $swp1
ip neigh del 192.0.2.3 dev $swp1
ip neigh del 192.0.2.2 dev $swp1
ip nexthop del id 10
ip nexthop del id 2
ip nexthop del id 1
simple_if_fini $swp2
simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
}
nexthop_obj_route_offload_test()
{
# Test offload indication of routes using nexthop objects
RET=0
simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64
simple_if_init $swp2
setup_wait
ip nexthop add id 1 via 192.0.2.2 dev $swp1
ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \
dev $swp1
ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud reachable \
dev $swp1
ip route replace 198.51.100.0/24 nhid 1
busywait "$TIMEOUT" wait_for_offload \
ip route show 198.51.100.0/24
check_err $? "route not marked as offloaded when using valid nexthop"
ip nexthop replace id 1 via 192.0.2.3 dev $swp1
busywait "$TIMEOUT" wait_for_offload \
ip route show 198.51.100.0/24
check_err $? "route not marked as offloaded after replacing valid nexthop with a valid one"
ip nexthop replace id 1 via 192.0.2.4 dev $swp1
busywait "$TIMEOUT" not wait_for_offload \
ip route show 198.51.100.0/24
check_err $? "route marked as offloaded after replacing valid nexthop with an invalid one"
ip nexthop replace id 1 via 192.0.2.2 dev $swp1
busywait "$TIMEOUT" wait_for_offload \
ip route show 198.51.100.0/24
check_err $? "route not marked as offloaded after replacing invalid nexthop with a valid one"
log_test "routes using nexthop objects offload indication"
ip route del 198.51.100.0/24
ip neigh del 192.0.2.3 dev $swp1
ip neigh del 192.0.2.2 dev $swp1
ip nexthop del id 1
simple_if_fini $swp2
simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
}
devlink_reload_test()
{
# Test that after executing all the above configuration tests, a

View File

@ -0,0 +1,356 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Test traffic distribution when a wECMP route forwards traffic to two GRE
# tunnels.
#
# +-------------------------+
# | H1 |
# | $h1 + |
# | 192.0.2.1/28 | |
# | 2001:db8:1::1/64 | |
# +-------------------|-----+
# |
# +-------------------|------------------------+
# | SW1 | |
# | $ol1 + |
# | 192.0.2.2/28 |
# | 2001:db8:1::2/64 |
# | |
# | + g1a (gre) + g1b (gre) |
# | loc=192.0.2.65 loc=192.0.2.81 |
# | rem=192.0.2.66 --. rem=192.0.2.82 --. |
# | tos=inherit | tos=inherit | |
# | .------------------' | |
# | | .------------------' |
# | v v |
# | + $ul1.111 (vlan) + $ul1.222 (vlan) |
# | | 192.0.2.129/28 | 192.0.2.145/28 |
# | \ / |
# | \________________/ |
# | | |
# | + $ul1 |
# +------------|-------------------------------+
# |
# +------------|-------------------------------+
# | SW2 + $ul2 |
# | _______|________ |
# | / \ |
# | / \ |
# | + $ul2.111 (vlan) + $ul2.222 (vlan) |
# | ^ 192.0.2.130/28 ^ 192.0.2.146/28 |
# | | | |
# | | '------------------. |
# | '------------------. | |
# | + g2a (gre) | + g2b (gre) | |
# | loc=192.0.2.66 | loc=192.0.2.82 | |
# | rem=192.0.2.65 --' rem=192.0.2.81 --' |
# | tos=inherit tos=inherit |
# | |
# | $ol2 + |
# | 192.0.2.17/28 | |
# | 2001:db8:2::1/64 | |
# +-------------------|------------------------+
# |
# +-------------------|-----+
# | H2 | |
# | $h2 + |
# | 192.0.2.18/28 |
# | 2001:db8:2::2/64 |
# +-------------------------+
ALL_TESTS="
ping_ipv4
ping_ipv6
multipath_ipv4
multipath_ipv6
multipath_ipv6_l4
"
NUM_NETIFS=6
source lib.sh
h1_create()
{
simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
ip route add vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2
}
h1_destroy()
{
ip route del vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2
ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
simple_if_fini $h1 192.0.2.1/28
}
sw1_create()
{
simple_if_init $ol1 192.0.2.2/28 2001:db8:1::2/64
__simple_if_init $ul1 v$ol1
vlan_create $ul1 111 v$ol1 192.0.2.129/28
vlan_create $ul1 222 v$ol1 192.0.2.145/28
tunnel_create g1a gre 192.0.2.65 192.0.2.66 tos inherit dev v$ol1
__simple_if_init g1a v$ol1 192.0.2.65/32
ip route add vrf v$ol1 192.0.2.66/32 via 192.0.2.130
tunnel_create g1b gre 192.0.2.81 192.0.2.82 tos inherit dev v$ol1
__simple_if_init g1b v$ol1 192.0.2.81/32
ip route add vrf v$ol1 192.0.2.82/32 via 192.0.2.146
ip -6 nexthop add id 101 dev g1a
ip -6 nexthop add id 102 dev g1b
ip nexthop add id 103 group 101/102
ip route add vrf v$ol1 192.0.2.16/28 nhid 103
ip route add vrf v$ol1 2001:db8:2::/64 nhid 103
}
sw1_destroy()
{
ip route del vrf v$ol1 2001:db8:2::/64
ip route del vrf v$ol1 192.0.2.16/28
ip nexthop del id 103
ip -6 nexthop del id 102
ip -6 nexthop del id 101
ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
__simple_if_fini g1b 192.0.2.81/32
tunnel_destroy g1b
ip route del vrf v$ol1 192.0.2.66/32 via 192.0.2.130
__simple_if_fini g1a 192.0.2.65/32
tunnel_destroy g1a
vlan_destroy $ul1 222
vlan_destroy $ul1 111
__simple_if_fini $ul1
simple_if_fini $ol1 192.0.2.2/28 2001:db8:1::2/64
}
sw2_create()
{
simple_if_init $ol2 192.0.2.17/28 2001:db8:2::1/64
__simple_if_init $ul2 v$ol2
vlan_create $ul2 111 v$ol2 192.0.2.130/28
vlan_create $ul2 222 v$ol2 192.0.2.146/28
tunnel_create g2a gre 192.0.2.66 192.0.2.65 tos inherit dev v$ol2
__simple_if_init g2a v$ol2 192.0.2.66/32
ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
tunnel_create g2b gre 192.0.2.82 192.0.2.81 tos inherit dev v$ol2
__simple_if_init g2b v$ol2 192.0.2.82/32
ip route add vrf v$ol2 192.0.2.81/32 via 192.0.2.145
ip -6 nexthop add id 201 dev g2a
ip -6 nexthop add id 202 dev g2b
ip nexthop add id 203 group 201/202
ip route add vrf v$ol2 192.0.2.0/28 nhid 203
ip route add vrf v$ol2 2001:db8:1::/64 nhid 203
tc qdisc add dev $ul2 clsact
tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
flower vlan_id 111 action pass
tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
flower vlan_id 222 action pass
}
sw2_destroy()
{
tc qdisc del dev $ul2 clsact
ip route del vrf v$ol2 2001:db8:1::/64
ip route del vrf v$ol2 192.0.2.0/28
ip nexthop del id 203
ip -6 nexthop del id 202
ip -6 nexthop del id 201
ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
__simple_if_fini g2b 192.0.2.82/32
tunnel_destroy g2b
ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
__simple_if_fini g2a 192.0.2.66/32
tunnel_destroy g2a
vlan_destroy $ul2 222
vlan_destroy $ul2 111
__simple_if_fini $ul2
simple_if_fini $ol2 192.0.2.17/28 2001:db8:2::1/64
}
h2_create()
{
simple_if_init $h2 192.0.2.18/28 2001:db8:2::2/64
ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
ip route add vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1
}
h2_destroy()
{
ip route del vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1
ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
simple_if_fini $h2 192.0.2.18/28 2001:db8:2::2/64
}
setup_prepare()
{
h1=${NETIFS[p1]}
ol1=${NETIFS[p2]}
ul1=${NETIFS[p3]}
ul2=${NETIFS[p4]}
ol2=${NETIFS[p5]}
h2=${NETIFS[p6]}
vrf_prepare
h1_create
sw1_create
sw2_create
h2_create
forwarding_enable
}
cleanup()
{
pre_cleanup
forwarding_restore
h2_destroy
sw2_destroy
sw1_destroy
h1_destroy
vrf_cleanup
}
multipath4_test()
{
local what=$1; shift
local weight1=$1; shift
local weight2=$1; shift
sysctl_set net.ipv4.fib_multipath_hash_policy 1
ip nexthop replace id 103 group 101,$weight1/102,$weight2
local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
-d 1msec -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
local d111=$((t1_111 - t0_111))
local d222=$((t1_222 - t0_222))
multipath_eval "$what" $weight1 $weight2 $d111 $d222
ip nexthop replace id 103 group 101/102
sysctl_restore net.ipv4.fib_multipath_hash_policy
}
multipath6_test()
{
local what=$1; shift
local weight1=$1; shift
local weight2=$1; shift
sysctl_set net.ipv6.fib_multipath_hash_policy 0
ip nexthop replace id 103 group 101,$weight1/102,$weight2
local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
# Generate 16384 echo requests, each with a random flow label.
for ((i=0; i < 16384; ++i)); do
ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
done
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
local d111=$((t1_111 - t0_111))
local d222=$((t1_222 - t0_222))
multipath_eval "$what" $weight1 $weight2 $d111 $d222
ip nexthop replace id 103 group 101/102
sysctl_restore net.ipv6.fib_multipath_hash_policy
}
multipath6_l4_test()
{
local what=$1; shift
local weight1=$1; shift
local weight2=$1; shift
sysctl_set net.ipv6.fib_multipath_hash_policy 1
ip nexthop replace id 103 group 101,$weight1/102,$weight2
local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
ip vrf exec v$h1 \
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
local d111=$((t1_111 - t0_111))
local d222=$((t1_222 - t0_222))
multipath_eval "$what" $weight1 $weight2 $d111 $d222
ip nexthop replace id 103 group 101/102
sysctl_restore net.ipv6.fib_multipath_hash_policy
}
ping_ipv4()
{
ping_test $h1 192.0.2.18
}
ping_ipv6()
{
ping6_test $h1 2001:db8:2::2
}
multipath_ipv4()
{
log_info "Running IPv4 multipath tests"
multipath4_test "ECMP" 1 1
multipath4_test "Weighted MP 2:1" 2 1
multipath4_test "Weighted MP 11:45" 11 45
}
multipath_ipv6()
{
log_info "Running IPv6 multipath tests"
multipath6_test "ECMP" 1 1
multipath6_test "Weighted MP 2:1" 2 1
multipath6_test "Weighted MP 11:45" 11 45
}
multipath_ipv6_l4()
{
log_info "Running IPv6 L4 hash multipath tests"
multipath6_l4_test "ECMP" 1 1
multipath6_l4_test "Weighted MP 2:1" 2 1
multipath6_l4_test "Weighted MP 11:45" 11 45
}
trap cleanup EXIT
setup_prepare
setup_wait
tests_run
exit $EXIT_STATUS

View File

@ -280,6 +280,17 @@ multipath_test()
multipath4_test "Weighted MP 2:1" 2 1
multipath4_test "Weighted MP 11:45" 11 45
log_info "Running IPv4 multipath tests with IPv6 link-local nexthops"
ip nexthop replace id 101 via fe80:2::22 dev $rp12
ip nexthop replace id 102 via fe80:3::23 dev $rp13
multipath4_test "ECMP" 1 1
multipath4_test "Weighted MP 2:1" 2 1
multipath4_test "Weighted MP 11:45" 11 45
ip nexthop replace id 102 via 169.254.3.23 dev $rp13
ip nexthop replace id 101 via 169.254.2.22 dev $rp12
log_info "Running IPv6 multipath tests"
multipath6_test "ECMP" 1 1
multipath6_test "Weighted MP 2:1" 2 1
@ -312,7 +323,6 @@ setup_prepare()
router1_create
router2_create
routing_nh_obj
forwarding_enable
}

View File

@ -0,0 +1,160 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
ALL_TESTS="
ping_ipv4
ping_ipv6
"
NUM_NETIFS=4
source lib.sh
source tc_common.sh
h1_create()
{
vrf_create "vrf-h1"
ip link set dev $h1 master vrf-h1
ip link set dev vrf-h1 up
ip link set dev $h1 up
ip address add 192.0.2.2/24 dev $h1
ip address add 2001:db8:1::2/64 dev $h1
ip route add 198.51.100.0/24 vrf vrf-h1 nexthop via 192.0.2.1
ip route add 2001:db8:2::/64 vrf vrf-h1 nexthop via 2001:db8:1::1
}
h1_destroy()
{
ip route del 2001:db8:2::/64 vrf vrf-h1
ip route del 198.51.100.0/24 vrf vrf-h1
ip address del 2001:db8:1::2/64 dev $h1
ip address del 192.0.2.2/24 dev $h1
ip link set dev $h1 down
vrf_destroy "vrf-h1"
}
h2_create()
{
vrf_create "vrf-h2"
ip link set dev $h2 master vrf-h2
ip link set dev vrf-h2 up
ip link set dev $h2 up
ip address add 198.51.100.2/24 dev $h2
ip address add 2001:db8:2::2/64 dev $h2
ip route add 192.0.2.0/24 vrf vrf-h2 nexthop via 198.51.100.1
ip route add 2001:db8:1::/64 vrf vrf-h2 nexthop via 2001:db8:2::1
}
h2_destroy()
{
ip route del 2001:db8:1::/64 vrf vrf-h2
ip route del 192.0.2.0/24 vrf vrf-h2
ip address del 2001:db8:2::2/64 dev $h2
ip address del 198.51.100.2/24 dev $h2
ip link set dev $h2 down
vrf_destroy "vrf-h2"
}
router_create()
{
ip link set dev $rp1 up
ip link set dev $rp2 up
tc qdisc add dev $rp2 clsact
ip address add 192.0.2.1/24 dev $rp1
ip address add 2001:db8:1::1/64 dev $rp1
ip address add 198.51.100.1/24 dev $rp2
ip address add 2001:db8:2::1/64 dev $rp2
}
router_destroy()
{
ip address del 2001:db8:2::1/64 dev $rp2
ip address del 198.51.100.1/24 dev $rp2
ip address del 2001:db8:1::1/64 dev $rp1
ip address del 192.0.2.1/24 dev $rp1
tc qdisc del dev $rp2 clsact
ip link set dev $rp2 down
ip link set dev $rp1 down
}
routing_nh_obj()
{
# Create the nexthops as AF_INET6, so that IPv4 and IPv6 routes could
# use them.
ip -6 nexthop add id 101 dev $rp1
ip -6 nexthop add id 102 dev $rp2
ip route replace 192.0.2.0/24 nhid 101
ip route replace 2001:db8:1::/64 nhid 101
ip route replace 198.51.100.0/24 nhid 102
ip route replace 2001:db8:2::/64 nhid 102
}
setup_prepare()
{
h1=${NETIFS[p1]}
rp1=${NETIFS[p2]}
rp2=${NETIFS[p3]}
h2=${NETIFS[p4]}
rp1mac=$(mac_get $rp1)
vrf_prepare
h1_create
h2_create
router_create
forwarding_enable
}
cleanup()
{
pre_cleanup
forwarding_restore
router_destroy
h2_destroy
h1_destroy
vrf_cleanup
}
ping_ipv4()
{
ping_test $h1 198.51.100.2
}
ping_ipv6()
{
ping6_test $h1 2001:db8:2::2
}
trap cleanup EXIT
setup_prepare
setup_wait
routing_nh_obj
tests_run
exit $EXIT_STATUS