selftests: mlxsw: Use shapers in QOS RED tests instead of forcing speed

QOS tests create congestion and verify the switch behavior. To create
congestion, they need to have more traffic than the port can handle, so
some of them force 1Gbps speed.

The tests assume that 1Gbps speed is supported, otherwise, they will fail.
Spectrum-4 ASIC will not support this speed in all ports, so to be able
to run the tests there, some adjustments are required. Use shapers to limit
the traffic instead of forcing speed. Note that for several ports, the
speed configuration is just for autoneg issues, so shaper is not needed
instead.

The tests already use ETS qdisc as a root and RED qdiscs as children. Add
a new TBF shaper to limit the rate of traffic, and use it as a root qdisc,
then save the previous hierarchy of qdiscs under the new TBF root.

In some ASICs, the shapers do not limit the traffic as accurately as
forcing speed. To make the tests stable, allow the backlog size to be up to
+-10% of the threshold. The aim of the tests is to make sure that with
backlog << threshold, there are no drops, and that packets are dropped
somewhere in vicinity of the configured threshold.

Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Amit Cohen 2022-09-14 13:21:49 +02:00 committed by Jakub Kicinski
parent 9e7aaa7c65
commit 61a00b196a
3 changed files with 15 additions and 16 deletions

View File

@ -135,14 +135,16 @@ h2_create()
# cause packets to fail to queue up at $swp3 due to shared buffer
# quotas, and the test to spuriously fail.
#
# Prevent this by setting the speed of $h2 to 1Gbps.
# Prevent this by adding a shaper which limits the traffic in $h2 to
# 1Gbps.
ethtool -s $h2 speed 1000 autoneg off
tc qdisc replace dev $h2 root handle 10: tbf rate 1gbit \
burst 128K limit 1G
}
h2_destroy()
{
ethtool -s $h2 autoneg on
tc qdisc del dev $h2 root handle 10:
tc qdisc del dev $h2 clsact
host_destroy $h2
}
@ -150,12 +152,10 @@ h2_destroy()
h3_create()
{
host_create $h3 3
ethtool -s $h3 speed 1000 autoneg off
}
h3_destroy()
{
ethtool -s $h3 autoneg on
host_destroy $h3
}
@ -199,8 +199,9 @@ switch_create()
done
done
for intf in $swp2 $swp3 $swp4 $swp5; do
ethtool -s $intf speed 1000 autoneg off
for intf in $swp3 $swp4; do
tc qdisc replace dev $intf root handle 1: tbf rate 1gbit \
burst 128K limit 1G
done
ip link set dev br1_10 up
@ -220,15 +221,13 @@ switch_destroy()
devlink_port_pool_th_restore $swp3 8
tc qdisc del dev $swp3 root 2>/dev/null
ip link set dev br2_11 down
ip link set dev br2_10 down
ip link set dev br1_11 down
ip link set dev br1_10 down
for intf in $swp5 $swp4 $swp3 $swp2; do
ethtool -s $intf autoneg on
for intf in $swp4 $swp3; do
tc qdisc del dev $intf root handle 1:
done
for intf in $swp5 $swp3 $swp2 $swp4 $swp1; do
@ -536,7 +535,7 @@ do_red_test()
check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
local diff=$((limit - backlog))
pct=$((100 * diff / limit))
((0 <= pct && pct <= 10))
((-10 <= pct && pct <= 10))
check_err $? "backlog $backlog / $limit expected <= 10% distance"
log_test "TC $((vlan - 10)): RED backlog > limit"

View File

@ -25,7 +25,7 @@ BACKLOG2=500000
install_root_qdisc()
{
tc qdisc add dev $swp3 root handle 10: $QDISC \
tc qdisc add dev $swp3 parent 1: handle 10: $QDISC \
bands 8 priomap 7 6 5 4 3 2 1 0
}
@ -67,7 +67,7 @@ uninstall_qdisc_tc1()
uninstall_root_qdisc()
{
tc qdisc del dev $swp3 root
tc qdisc del dev $swp3 parent 1:
}
uninstall_qdisc()

View File

@ -18,7 +18,7 @@ install_qdisc()
{
local -a args=("$@")
tc qdisc add dev $swp3 root handle 108: red \
tc qdisc add dev $swp3 parent 1: handle 108: red \
limit 1000000 min $BACKLOG max $((BACKLOG + 1)) \
probability 1.0 avpkt 8000 burst 38 "${args[@]}"
sleep 1
@ -26,7 +26,7 @@ install_qdisc()
uninstall_qdisc()
{
tc qdisc del dev $swp3 root
tc qdisc del dev $swp3 parent 1:
}
ecn_test()