Merge branch 'sparx5-qos'

Daniel Machon says:

====================
net: Add QoS offload support for sparx5

This patch series adds support for offloading QoS features with the tc
command suite, to the sparx5 switch. The new offloadable QoS features
introduced in this patch series are:

  - tc-mqprio for mapping traffic class to hardware queue. Queues are by
    default mapped 1:1  in hardware, as such the mqprio qdisc is used as
    an attachment point for qdiscs tbf and ets.

    $ tc qdisc add dev eth0 root handle 1:0 mqprio

  - tc-tbf for setting up shaping on scheduler elements of the HSCH
    (Hierarchical Scheduler) block. Shaping on either port output or
    queue output is supported.

    Port shaper: $ tc qdisc add dev eth0 root handle 1:0 tbf rate \
    10000000 burst 8192 limit 1m

    Queue shaper: $ tc qdisc replace dev eth0 parent 1:5 handle 2:0 tbf \
    rate 10000000 burst 8192 limit 1m

  - tc-ets for setting up strict and or bandwidth-sharing bands on one
    through eight priority queues.

    Configure a mix of strict and bw-sharing bands:
    $ tc qdisc add dev eth0 handle 1: root ets bands 8 strict 5 \
    quanta 1000 1000 1000 priomap 7 6 5 4 3 2 1 0

Patch #1 Sets up the tc hook.
Patch #2 Adds support for offloading the tc-mqprio qdisc.
Patch #3 Adds support for offloading the tc-tbf qdisc.
Patch #4 Adds support for offloading the tc-ets qdisc.
Patch #5 Updates the maintainers of the sparx5 driver.

========================================================================

v1:
https://lore.kernel.org/netdev/20220919120215.3815696-1-daniel.machon@microchip.com/

v1 -> v2:
  - Fix compiler warning in patch #2
  - Fix comment style in patch #4
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2022-09-23 09:53:11 +01:00
commit d56f9ddf97
9 changed files with 916 additions and 2 deletions

View File

@ -2401,6 +2401,7 @@ N: atmel
ARM/Microchip Sparx5 SoC support
M: Lars Povlsen <lars.povlsen@microchip.com>
M: Steen Hegelund <Steen.Hegelund@microchip.com>
M: Daniel Machon <daniel.machon@microchip.com>
M: UNGLinuxDriver@microchip.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported

View File

@ -8,4 +8,4 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
sparx5_ptp.o sparx5_pgid.o
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o

View File

@ -27,6 +27,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
#include "sparx5_qos.h"
#define QLIM_WM(fraction) \
((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
@ -868,6 +869,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
goto cleanup_ports;
}
err = sparx5_qos_init(sparx5);
if (err) {
dev_err(sparx5->dev, "Failed to initialize QoS\n");
goto cleanup_ports;
}
err = sparx5_ptp_init(sparx5);
if (err) {
dev_err(sparx5->dev, "PTP failed\n");

View File

@ -2993,6 +2993,147 @@ enum sparx5_target {
#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
/* HSCH:HSCH_CFG:CIR_CFG */
#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x)
#define HSCH_CIR_CFG_CIR_RATE_GET(x)\
FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x)
#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0)
#define HSCH_CIR_CFG_CIR_BURST_SET(x)\
FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x)
#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
/* HSCH:HSCH_CFG:EIR_CFG */
#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x)
#define HSCH_EIR_CFG_EIR_RATE_GET(x)\
FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x)
#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0)
#define HSCH_EIR_CFG_EIR_BURST_SET(x)\
FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x)
#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
/* HSCH:HSCH_CFG:SE_CFG */
#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x)
#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x)
#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3)
#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x)
#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x)
#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1)
#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
#define HSCH_SE_CFG_SE_STOP BIT(0)
#define HSCH_SE_CFG_SE_STOP_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_STOP, x)
#define HSCH_SE_CFG_SE_STOP_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
/* HSCH:HSCH_CFG:SE_CONNECT */
#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
/* HSCH:HSCH_CFG:SE_DLB_SENSE */
#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
/* HSCH:HSCH_DWRR:DWRR_ENTRY */
#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x)
#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\
FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x)
#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0)
#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\
FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\
FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0)
#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\
FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
/* HSCH:HSCH_MISC:SYS_CLK_PER */
#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
@ -3002,6 +3143,30 @@ enum sparx5_target {
#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
/* HSCH:SYSTEM:FLUSH_CTRL */
#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)

View File

@ -7,6 +7,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
#include "sparx5_tc.h"
/* The IFH bit position of the first VSTAX bit. This is because the
* VSTAX bit positions in Data sheet is starting from zero.
@ -228,6 +229,7 @@ static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_get_stats64 = sparx5_get_stats64,
.ndo_get_port_parent_id = sparx5_get_port_parent_id,
.ndo_eth_ioctl = sparx5_port_ioctl,
.ndo_setup_tc = sparx5_port_setup_tc,
};
bool sparx5_netdevice_check(const struct net_device *dev)
@ -240,10 +242,14 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
struct sparx5_port *spx5_port;
struct net_device *ndev;
ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
ndev = devm_alloc_etherdev_mqs(sparx5->dev, sizeof(struct sparx5_port),
SPX5_PRIOS, 1);
if (!ndev)
return ERR_PTR(-ENOMEM);
ndev->hw_features |= NETIF_F_HW_TC;
ndev->features |= NETIF_F_HW_TC;
SET_NETDEV_DEV(ndev, sparx5->dev);
spx5_port = netdev_priv(ndev);
spx5_port->ndev = ndev;

View File

@ -0,0 +1,513 @@
// SPDX-License-Identifier: GPL-2.0+
/* Microchip Sparx5 Switch driver
*
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*/
#include <net/pkt_cls.h>
#include "sparx5_main.h"
#include "sparx5_qos.h"
/* Max rates for leak groups */
static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
1048568, /* 1.049 Gbps */
2621420, /* 2.621 Gbps */
10485680, /* 10.486 Gbps */
26214200 /* 26.214 Gbps */
};
static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
{
u32 value;
value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
}
static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
u32 leak_time)
{
spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
HSCH_HSCH_TIMER_CFG(layer, group));
}
static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
{
u32 value;
value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
}
static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
u32 idx)
{
u32 value;
value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
}
static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
{
u32 itr, next;
itr = sparx5_lg_get_first(sparx5, layer, group);
for (;;) {
next = sparx5_lg_get_next(sparx5, layer, group, itr);
if (itr == next)
return itr;
itr = next;
}
}
static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
u32 idx)
{
return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
}
static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
u32 idx)
{
return idx == sparx5_lg_get_first(sparx5, layer, group);
}
static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
{
return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
}
static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
{
if (sparx5_lg_is_empty(sparx5, layer, group))
return false;
return sparx5_lg_get_first(sparx5, layer, group) ==
sparx5_lg_get_last(sparx5, layer, group);
}
static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
u32 leak_time)
{
sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
}
static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
{
sparx5_lg_set_leak_time(sparx5, layer, group, 0);
}
static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
u32 idx, u32 *group)
{
u32 itr, next;
int i;
for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
if (sparx5_lg_is_empty(sparx5, layer, i))
continue;
itr = sparx5_lg_get_first(sparx5, layer, i);
for (;;) {
next = sparx5_lg_get_next(sparx5, layer, i, itr);
if (itr == idx) {
*group = i;
return 0; /* Found it */
}
if (itr == next)
break; /* Was not found */
itr = next;
}
}
return -1;
}
static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
{
struct sparx5_layer *l = &layers[layer];
struct sparx5_lg *lg;
u32 i;
for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
lg = &l->leak_groups[i];
if (rate <= lg->max_rate) {
*group = i;
return 0;
}
}
return -1;
}
static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
u32 idx, u32 *prev, u32 *next, u32 *first)
{
u32 itr;
*first = sparx5_lg_get_first(sparx5, layer, group);
*prev = *first;
*next = *first;
itr = *first;
for (;;) {
*next = sparx5_lg_get_next(sparx5, layer, group, itr);
if (itr == idx)
return 0; /* Found it */
if (itr == *next)
return -1; /* Was not found */
*prev = itr;
itr = *next;
}
return -1;
}
static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
u32 se_first, u32 idx, u32 idx_next, bool empty)
{
u32 leak_time = layers[layer].leak_groups[group].leak_time;
/* Stop leaking */
sparx5_lg_disable(sparx5, layer, group);
if (empty)
return 0;
/* Select layer */
spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
/* Link elements */
spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
HSCH_SE_CONNECT(idx));
/* Set the first element. */
spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
HSCH_HSCH_LEAK_CFG(layer, group));
/* Start leaking */
sparx5_lg_enable(sparx5, layer, group, leak_time);
return 0;
}
static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
{
u32 first, next, prev;
bool empty = false;
/* idx *must* be present in the leak group */
WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
&first) < 0);
if (sparx5_lg_is_singular(sparx5, layer, group)) {
empty = true;
} else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
/* idx is removed, prev is now last */
idx = prev;
next = prev;
} else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
/* idx is removed and points to itself, first is next */
first = next;
next = idx;
} else {
/* Next is not touched */
idx = prev;
}
return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
empty);
}
static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
u32 idx)
{
u32 first, next, old_group;
pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
idx);
/* Is this SE already shaping ? */
if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
if (old_group != new_group) {
/* Delete from old group */
sparx5_lg_del(sparx5, layer, old_group, idx);
} else {
/* Nothing to do here */
return 0;
}
}
/* We always add to head of the list */
first = idx;
if (sparx5_lg_is_empty(sparx5, layer, new_group))
next = idx;
else
next = sparx5_lg_get_first(sparx5, layer, new_group);
return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
false);
}
static int sparx5_shaper_conf_set(struct sparx5_port *port,
const struct sparx5_shaper *sh, u32 layer,
u32 idx, u32 group)
{
int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
struct sparx5 *sparx5 = port->sparx5;
if (!sh->rate && !sh->burst)
sparx5_lg_action = &sparx5_lg_del;
else
sparx5_lg_action = &sparx5_lg_add;
/* Select layer */
spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
/* Set frame mode */
spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
sparx5, HSCH_SE_CFG(idx));
/* Set committed rate and burst */
spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
sparx5, HSCH_CIR_CFG(idx));
/* This has to be done after the shaper configuration has been set */
sparx5_lg_action(sparx5, layer, group, idx);
return 0;
}
static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
{
return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
1;
}
static int sparx5_dwrr_conf_set(struct sparx5_port *port,
struct sparx5_dwrr *dwrr)
{
int i;
spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
port->sparx5, HSCH_HSCH_CFG_CFG);
/* Number of *lower* indexes that are arbitrated dwrr */
spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
HSCH_SE_CFG(port->portno));
for (i = 0; i < dwrr->count; i++) {
spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
HSCH_DWRR_ENTRY(i));
}
return 0;
}
static int sparx5_leak_groups_init(struct sparx5 *sparx5)
{
struct sparx5_layer *layer;
u32 sys_clk_per_100ps;
struct sparx5_lg *lg;
u32 leak_time_us;
int i, ii;
sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
layer = &layers[i];
for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
lg = &layer->leak_groups[ii];
lg->max_rate = spx5_hsch_max_group_rate[ii];
/* Calculate the leak time in us, to serve a maximum
* rate of 'max_rate' for this group
*/
leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
/* Hardware wants leak time in ns */
lg->leak_time = 1000 * leak_time_us;
/* Calculate resolution */
lg->resolution = 1000 / leak_time_us;
/* Maximum number of shapers that can be served by
* this leak group
*/
lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
/* Example:
* Wanted bandwidth is 100Mbit:
*
* 100 mbps can be served by leak group zero.
*
* leak_time is 125000 ns.
* resolution is: 8
*
* cir = 100000 / 8 = 12500
* leaks_pr_sec = 125000 / 10^9 = 8000
* bw = 12500 * 8000 = 10^8 (100 Mbit)
*/
/* Disable by default - this also indicates an empty
* leak group
*/
sparx5_lg_disable(sparx5, i, ii);
}
}
return 0;
}
int sparx5_qos_init(struct sparx5 *sparx5)
{
int ret;
ret = sparx5_leak_groups_init(sparx5);
if (ret < 0)
return ret;
return 0;
}
int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
{
int i;
if (num_tc != SPX5_PRIOS) {
netdev_err(ndev, "Only %d traffic classes supported\n",
SPX5_PRIOS);
return -EINVAL;
}
netdev_set_num_tc(ndev, num_tc);
for (i = 0; i < num_tc; i++)
netdev_set_tc_queue(ndev, i, 1, i);
netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
ndev->num_tc, ndev->real_num_tx_queues);
return 0;
}
int sparx5_tc_mqprio_del(struct net_device *ndev)
{
netdev_reset_tc(ndev);
netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
ndev->num_tc, ndev->real_num_tx_queues);
return 0;
}
int sparx5_tc_tbf_add(struct sparx5_port *port,
struct tc_tbf_qopt_offload_replace_params *params,
u32 layer, u32 idx)
{
struct sparx5_shaper sh = {
.mode = SPX5_SE_MODE_DATARATE,
.rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
.burst = params->max_size,
};
struct sparx5_lg *lg;
u32 group;
/* Find suitable group for this se */
if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
pr_debug("Could not find leak group for se with rate: %d",
sh.rate);
return -EINVAL;
}
lg = &layers[layer].leak_groups[group];
pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
return -EINVAL;
/* Calculate committed rate and burst */
sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
return -EINVAL;
return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
}
int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
{
struct sparx5_shaper sh = {0};
u32 group;
sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
}
int sparx5_tc_ets_add(struct sparx5_port *port,
struct tc_ets_qopt_offload_replace_params *params)
{
struct sparx5_dwrr dwrr = {0};
/* Minimum weight for each iteration */
unsigned int w_min = 100;
int i;
/* Find minimum weight for all dwrr bands */
for (i = 0; i < SPX5_PRIOS; i++) {
if (params->quanta[i] == 0)
continue;
w_min = min(w_min, params->weights[i]);
}
for (i = 0; i < SPX5_PRIOS; i++) {
/* Strict band; skip */
if (params->quanta[i] == 0)
continue;
dwrr.count++;
/* On the sparx5, bands with higher indexes are preferred and
* arbitrated strict. Strict bands are put in the lower indexes,
* by tc, so we reverse the bands here.
*
* Also convert the weight to something the hardware
* understands.
*/
dwrr.cost[SPX5_PRIOS - i - 1] =
sparx5_weight_to_hw_cost(w_min, params->weights[i]);
}
return sparx5_dwrr_conf_set(port, &dwrr);
}
int sparx5_tc_ets_del(struct sparx5_port *port)
{
struct sparx5_dwrr dwrr = {0};
return sparx5_dwrr_conf_set(port, &dwrr);
}

View File

@ -0,0 +1,82 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Microchip Sparx5 Switch driver
*
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*/
#ifndef __SPARX5_QOS_H__
#define __SPARX5_QOS_H__
#include <linux/netdevice.h>
/* Number of Layers */
#define SPX5_HSCH_LAYER_CNT 3
/* Scheduling elements per layer */
#define SPX5_HSCH_L0_SE_CNT 5040
#define SPX5_HSCH_L1_SE_CNT 64
#define SPX5_HSCH_L2_SE_CNT 64
/* Calculate Layer 0 Scheduler Element when using normal hierarchy */
#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue)))
/* Number of leak groups */
#define SPX5_HSCH_LEAK_GRP_CNT 4
/* Scheduler modes */
#define SPX5_SE_MODE_LINERATE 0
#define SPX5_SE_MODE_DATARATE 1
/* Rate and burst */
#define SPX5_SE_RATE_MAX 262143
#define SPX5_SE_BURST_MAX 127
#define SPX5_SE_RATE_MIN 1
#define SPX5_SE_BURST_MIN 1
#define SPX5_SE_BURST_UNIT 4096
/* Dwrr */
#define SPX5_DWRR_COST_MAX 63
struct sparx5_shaper {
u32 mode;
u32 rate;
u32 burst;
};
struct sparx5_lg {
u32 max_rate;
u32 resolution;
u32 leak_time;
u32 max_ses;
};
struct sparx5_layer {
struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT];
};
struct sparx5_dwrr {
u32 count; /* Number of inputs running dwrr */
u8 cost[SPX5_PRIOS];
};
int sparx5_qos_init(struct sparx5 *sparx5);
/* Multi-Queue Priority */
int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc);
int sparx5_tc_mqprio_del(struct net_device *ndev);
/* Token Bucket Filter */
struct tc_tbf_qopt_offload_replace_params;
int sparx5_tc_tbf_add(struct sparx5_port *port,
struct tc_tbf_qopt_offload_replace_params *params,
u32 layer, u32 idx);
int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx);
/* Enhanced Transmission Selection */
struct tc_ets_qopt_offload_replace_params;
int sparx5_tc_ets_add(struct sparx5_port *port,
struct tc_ets_qopt_offload_replace_params *params);
int sparx5_tc_ets_del(struct sparx5_port *port);
#endif /* __SPARX5_QOS_H__ */

View File

@ -0,0 +1,125 @@
// SPDX-License-Identifier: GPL-2.0+
/* Microchip Sparx5 Switch driver
*
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*/
#include <net/pkt_cls.h>
#include "sparx5_tc.h"
#include "sparx5_main.h"
#include "sparx5_qos.h"
static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
u32 *idx)
{
if (parent == TC_H_ROOT) {
*layer = 2;
*idx = portno;
} else {
u32 queue = TC_H_MIN(parent) - 1;
*layer = 0;
*idx = SPX5_HSCH_L0_GET_IDX(portno, queue);
}
}
static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev,
struct tc_mqprio_qopt_offload *m)
{
m->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
if (m->qopt.num_tc == 0)
return sparx5_tc_mqprio_del(ndev);
else
return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc);
}
static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
struct tc_tbf_qopt_offload *qopt)
{
struct sparx5_port *port = netdev_priv(ndev);
u32 layer, se_idx;
sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
&se_idx);
switch (qopt->command) {
case TC_TBF_REPLACE:
return sparx5_tc_tbf_add(port, &qopt->replace_params, layer,
se_idx);
case TC_TBF_DESTROY:
return sparx5_tc_tbf_del(port, layer, se_idx);
case TC_TBF_STATS:
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
struct tc_ets_qopt_offload *qopt)
{
struct tc_ets_qopt_offload_replace_params *params =
&qopt->replace_params;
struct sparx5_port *port = netdev_priv(ndev);
int i;
/* Only allow ets on ports */
if (qopt->parent != TC_H_ROOT)
return -EOPNOTSUPP;
switch (qopt->command) {
case TC_ETS_REPLACE:
/* We support eight priorities */
if (params->bands != SPX5_PRIOS)
return -EOPNOTSUPP;
/* Sanity checks */
for (i = 0; i < SPX5_PRIOS; ++i) {
/* Priority map is *always* reverse e.g: 7 6 5 .. 0 */
if (params->priomap[i] != (7 - i))
return -EOPNOTSUPP;
/* Throw an error if we receive zero weights by tc */
if (params->quanta[i] && params->weights[i] == 0) {
pr_err("Invalid ets configuration; band %d has weight zero",
i);
return -EINVAL;
}
}
sparx5_tc_ets_add(port, params);
break;
case TC_ETS_DESTROY:
sparx5_tc_ets_del(port);
break;
case TC_ETS_GRAFT:
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
case TC_SETUP_QDISC_TBF:
return sparx5_tc_setup_qdisc_tbf(ndev, type_data);
case TC_SETUP_QDISC_ETS:
return sparx5_tc_setup_qdisc_ets(ndev, type_data);
default:
return -EOPNOTSUPP;
}
return 0;
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Microchip Sparx5 Switch driver
*
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*/
#ifndef __SPARX5_TC_H__
#define __SPARX5_TC_H__
#include <linux/netdevice.h>
int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
#endif /* __SPARX5_TC_H__ */