net: sched: red: don't reset the backlog on every stat dump

Commit 0dfb33a0d7 ("sch_red: report backlog information") copied
child's backlog into RED's backlog.  Back then RED did not maintain
its own backlog counts.  This has changed after commit 2ccccf5fb4
("net_sched: update hierarchical backlog too") and commit d7f4f332f0
("sch_red: update backlog as well").  Copying is no longer necessary.

Tested:

$ tc -s qdisc show dev veth0
qdisc red 1: root refcnt 2 limit 400000b min 30000b max 30000b ecn
 Sent 20942 bytes 221 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 1260b 14p requeues 14
  marked 0 early 0 pdrop 0 other 0
qdisc tbf 2: parent 1: rate 1Kbit burst 15000b lat 3585.0s
 Sent 20942 bytes 221 pkt (dropped 0, overlimits 138 requeues 0)
 backlog 1260b 14p requeues 14

Recently RED offload was added.  We need to make sure drivers don't
depend on resetting the stats.  This means backlog should be treated
like any other statistic:

  total_stat = new_hw_stat - prev_hw_stat;

Adjust mlxsw.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Nogah Frankel <nogahf@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski 2018-01-14 20:01:26 -08:00 committed by David S. Miller
parent 2d83619de8
commit 416ef9b15c
3 changed files with 25 additions and 4 deletions

View File

@ -247,6 +247,8 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
stats_base->drops = red_base->prob_drop + red_base->pdrop;
stats_base->backlog = 0;
}
static int
@ -306,6 +308,19 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
max, prob, p->is_ecn);
}
static void
mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_red_qopt_offload_params *p = params;
u64 backlog;
backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_qdisc->stats_base.backlog);
p->qstats->backlog -= backlog;
}
static int
mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@ -338,7 +353,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
u64 tx_bytes, tx_packets, overlimits, drops;
u64 tx_bytes, tx_packets, overlimits, drops, backlog;
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
@ -354,14 +369,18 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
stats_base->drops;
backlog = xstats->backlog[tclass_num];
_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
stats_ptr->qstats->overlimits += overlimits;
stats_ptr->qstats->drops += drops;
stats_ptr->qstats->backlog +=
mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
xstats->backlog[tclass_num]);
mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
backlog) -
mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
stats_base->backlog);
stats_base->backlog = backlog;
stats_base->drops += drops;
stats_base->overlimits += overlimits;
stats_base->tx_bytes += tx_bytes;
@ -375,6 +394,7 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.type = MLXSW_SP_QDISC_RED,
.check_params = mlxsw_sp_qdisc_red_check_params,
.replace = mlxsw_sp_qdisc_red_replace,
.unoffload = mlxsw_sp_qdisc_red_unoffload,
.destroy = mlxsw_sp_qdisc_red_destroy,
.get_stats = mlxsw_sp_qdisc_get_red_stats,
.get_xstats = mlxsw_sp_qdisc_get_red_xstats,

View File

@ -748,6 +748,7 @@ struct tc_red_qopt_offload_params {
u32 max;
u32 probability;
bool is_ecn;
struct gnet_stats_queue *qstats;
};
struct tc_red_qopt_offload {

View File

@ -167,6 +167,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
opt.set.max = q->parms.qth_max >> q->parms.Wlog;
opt.set.probability = q->parms.max_P;
opt.set.is_ecn = red_use_ecn(q);
opt.set.qstats = &sch->qstats;
} else {
opt.command = TC_RED_DESTROY;
}
@ -322,7 +323,6 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
};
int err;
sch->qstats.backlog = q->qdisc->qstats.backlog;
err = red_dump_offload_stats(sch, &opt);
if (err)
goto nla_put_failure;