Merge branch 'DSA-mtu'

Vladimir Oltean says:

====================
Configure the MTU on DSA switches

This series adds support for configuring the MTU on front-panel switch
ports, while seamlessly adapting the CPU port and the DSA master to the
largest value plus the tagger overhead.

It also implements bridge MTU auto-normalization within the DSA core, as
resulted after the feedback of the implementation of this feature inside
the bridge driver in v2.

Support was added for quite a number of switches, in the hope that this
series would gain some traction:
 - sja1105
 - felix
 - vsc73xx
 - b53 and rest of the platform

V3 of this series was submitted here:
https://patchwork.ozlabs.org/cover/1262394/

V2 of this series was submitted here:
https://patchwork.ozlabs.org/cover/1261471/

V1 of this series was submitted here:
https://patchwork.ozlabs.org/cover/1199868/
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-03-27 16:07:25 -07:00
commit 1a147b74c2
20 changed files with 500 additions and 48 deletions

View File

@ -699,9 +699,6 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_write16(dev, B53_VLAN_PAGE, b53_write16(dev, B53_VLAN_PAGE,
B53_VLAN_PORT_DEF_TAG(i), def_vid); B53_VLAN_PORT_DEF_TAG(i), def_vid);
if (!is5325(dev) && !is5365(dev))
b53_set_jumbo(dev, dev->enable_jumbo, false);
return 0; return 0;
} }
EXPORT_SYMBOL(b53_configure_vlan); EXPORT_SYMBOL(b53_configure_vlan);
@ -807,8 +804,6 @@ static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
static int b53_reset_switch(struct b53_device *priv) static int b53_reset_switch(struct b53_device *priv)
{ {
/* reset vlans */ /* reset vlans */
priv->enable_jumbo = false;
memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
@ -2065,6 +2060,26 @@ int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
} }
EXPORT_SYMBOL(b53_set_mac_eee); EXPORT_SYMBOL(b53_set_mac_eee);
static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
{
struct b53_device *dev = ds->priv;
bool enable_jumbo;
bool allow_10_100;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
enable_jumbo = (mtu >= JMS_MIN_SIZE);
allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
}
static int b53_get_max_mtu(struct dsa_switch *ds, int port)
{
return JMS_MAX_SIZE;
}
static const struct dsa_switch_ops b53_switch_ops = { static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol, .get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup, .setup = b53_setup,
@ -2102,6 +2117,8 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_mdb_prepare = b53_mdb_prepare, .port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add, .port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del, .port_mdb_del = b53_mdb_del,
.port_max_mtu = b53_get_max_mtu,
.port_change_mtu = b53_change_mtu,
}; };
struct b53_chip_data { struct b53_chip_data {

View File

@ -532,6 +532,7 @@ static int felix_setup(struct dsa_switch *ds)
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_UC); ANA_PGID_PGID, PGID_UC);
ds->mtu_enforcement_ingress = true;
/* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040) /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
* isn't instantiated for the Felix PF. * isn't instantiated for the Felix PF.
* In-band AN may take a few ms to complete, so we need to poll. * In-band AN may take a few ms to complete, so we need to poll.
@ -609,6 +610,22 @@ static bool felix_txtstamp(struct dsa_switch *ds, int port,
return false; return false;
} }
static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_set_maxlen(ocelot, port, new_mtu);
return 0;
}
static int felix_get_max_mtu(struct dsa_switch *ds, int port)
{
struct ocelot *ocelot = ds->priv;
return ocelot_get_max_mtu(ocelot, port);
}
static int felix_cls_flower_add(struct dsa_switch *ds, int port, static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress) struct flow_cls_offload *cls, bool ingress)
{ {
@ -664,6 +681,8 @@ static const struct dsa_switch_ops felix_switch_ops = {
.port_hwtstamp_set = felix_hwtstamp_set, .port_hwtstamp_set = felix_hwtstamp_set,
.port_rxtstamp = felix_rxtstamp, .port_rxtstamp = felix_rxtstamp,
.port_txtstamp = felix_txtstamp, .port_txtstamp = felix_txtstamp,
.port_change_mtu = felix_change_mtu,
.port_max_mtu = felix_get_max_mtu,
.cls_flower_add = felix_cls_flower_add, .cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del, .cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats, .cls_flower_stats = felix_cls_flower_stats,

View File

@ -126,6 +126,7 @@ enum sja1105_reset_reason {
SJA1105_RX_HWTSTAMPING, SJA1105_RX_HWTSTAMPING,
SJA1105_AGEING_TIME, SJA1105_AGEING_TIME,
SJA1105_SCHEDULING, SJA1105_SCHEDULING,
SJA1105_BEST_EFFORT_POLICING,
}; };
int sja1105_static_config_reload(struct sja1105_private *priv, int sja1105_static_config_reload(struct sja1105_private *priv,

View File

@ -519,12 +519,12 @@ static int sja1105_init_avb_params(struct sja1105_private *priv)
#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing, static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
int index) int index, int mtu)
{ {
policing[index].sharindx = index; policing[index].sharindx = index;
policing[index].smax = 65535; /* Burst size in bytes */ policing[index].smax = 65535; /* Burst size in bytes */
policing[index].rate = SJA1105_RATE_MBPS(1000); policing[index].rate = SJA1105_RATE_MBPS(1000);
policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; policing[index].maxlen = mtu;
policing[index].partition = 0; policing[index].partition = 0;
} }
@ -556,12 +556,16 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
*/ */
for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) { for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i; int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
if (dsa_is_cpu_port(priv->ds, i))
mtu += VLAN_HLEN;
for (j = 0; j < SJA1105_NUM_TC; j++, k++) for (j = 0; j < SJA1105_NUM_TC; j++, k++)
sja1105_setup_policer(policing, k); sja1105_setup_policer(policing, k, mtu);
/* Set up this port's policer for broadcast traffic */ /* Set up this port's policer for broadcast traffic */
sja1105_setup_policer(policing, bcast); sja1105_setup_policer(policing, bcast, mtu);
} }
return 0; return 0;
} }
@ -1544,6 +1548,7 @@ static const char * const sja1105_reset_reasons[] = {
[SJA1105_RX_HWTSTAMPING] = "RX timestamping", [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
[SJA1105_AGEING_TIME] = "Ageing time", [SJA1105_AGEING_TIME] = "Ageing time",
[SJA1105_SCHEDULING] = "Time-aware scheduling", [SJA1105_SCHEDULING] = "Time-aware scheduling",
[SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
}; };
/* For situations where we need to change a setting at runtime that is only /* For situations where we need to change a setting at runtime that is only
@ -1952,6 +1957,8 @@ static int sja1105_setup(struct dsa_switch *ds)
/* Advertise the 8 egress queues */ /* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC; ds->num_tx_queues = SJA1105_NUM_TC;
ds->mtu_enforcement_ingress = true;
/* The DSA/switchdev model brings up switch ports in standalone mode by /* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under * default, and that means vlan_filtering is 0 since they're not under
* a bridge, so it's safe to set up switch tagging at this time. * a bridge, so it's safe to set up switch tagging at this time.
@ -2120,6 +2127,39 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME); return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
} }
static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
struct sja1105_l2_policing_entry *policing;
struct sja1105_private *priv = ds->priv;
int tc;
new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
if (dsa_is_cpu_port(ds, port))
new_mtu += VLAN_HLEN;
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
/* We set all 9 port policers to the same value, so just checking the
* broadcast one is fine.
*/
if (policing[bcast].maxlen == new_mtu)
return 0;
for (tc = 0; tc < SJA1105_NUM_TC; tc++)
policing[port * SJA1105_NUM_TC + tc].maxlen = new_mtu;
policing[bcast].maxlen = new_mtu;
return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
{
return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type, enum tc_setup_type type,
void *type_data) void *type_data)
@ -2215,6 +2255,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.setup = sja1105_setup, .setup = sja1105_setup,
.teardown = sja1105_teardown, .teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time, .set_ageing_time = sja1105_set_ageing_time,
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
.phylink_validate = sja1105_phylink_validate, .phylink_validate = sja1105_phylink_validate,
.phylink_mac_link_state = sja1105_mac_pcs_get_state, .phylink_mac_link_state = sja1105_mac_pcs_get_state,
.phylink_mac_config = sja1105_mac_config, .phylink_mac_config = sja1105_mac_config,

View File

@ -664,16 +664,6 @@ static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_TX_EN |
VSC73XX_MAC_CFG_RX_EN); VSC73XX_MAC_CFG_RX_EN);
/* Max length, we can do up to 9.6 KiB, so allow that.
* According to application not "VSC7398 Jumbo Frames" setting
* up the MTU to 9.6 KB does not affect the performance on standard
* frames, so just enable it. It is clear from the application note
* that "9.6 kilobytes" == 9600 bytes.
*/
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port,
VSC73XX_MAXLEN, 9600);
/* Flow control for the CPU port: /* Flow control for the CPU port:
* Use a zero delay pause frame when pause condition is left * Use a zero delay pause frame when pause condition is left
* Obey pause control frames * Obey pause control frames
@ -1030,6 +1020,24 @@ static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
} }
} }
static int vsc73xx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct vsc73xx *vsc = ds->priv;
return vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
VSC73XX_MAXLEN, new_mtu);
}
/* According to application not "VSC7398 Jumbo Frames" setting
* up the MTU to 9.6 KB does not affect the performance on standard
* frames. It is clear from the application note that
* "9.6 kilobytes" == 9600 bytes.
*/
static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port)
{
return 9600;
}
static const struct dsa_switch_ops vsc73xx_ds_ops = { static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol, .get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup, .setup = vsc73xx_setup,
@ -1041,6 +1049,8 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_sset_count = vsc73xx_get_sset_count, .get_sset_count = vsc73xx_get_sset_count,
.port_enable = vsc73xx_port_enable, .port_enable = vsc73xx_port_enable,
.port_disable = vsc73xx_port_disable, .port_disable = vsc73xx_port_disable,
.port_change_mtu = vsc73xx_change_mtu,
.port_max_mtu = vsc73xx_get_max_mtu,
}; };
static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset) static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)

View File

@ -1248,6 +1248,14 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
return 0; return 0;
} }
static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
{
struct bgmac *bgmac = netdev_priv(net_dev);
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
return 0;
}
static const struct net_device_ops bgmac_netdev_ops = { static const struct net_device_ops bgmac_netdev_ops = {
.ndo_open = bgmac_open, .ndo_open = bgmac_open,
.ndo_stop = bgmac_stop, .ndo_stop = bgmac_stop,
@ -1256,6 +1264,7 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_set_mac_address = bgmac_set_mac_address, .ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = phy_do_ioctl_running, .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = bgmac_change_mtu,
}; };
/************************************************** /**************************************************
@ -1530,6 +1539,9 @@ int bgmac_enet_probe(struct bgmac *bgmac)
net_dev->hw_features = net_dev->features; net_dev->hw_features = net_dev->features;
net_dev->vlan_features = net_dev->features; net_dev->vlan_features = net_dev->features;
/* Omit FCS from max MTU size */
net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
err = register_netdev(bgmac->net_dev); err = register_netdev(bgmac->net_dev);
if (err) { if (err) {
dev_err(bgmac->dev, "Cannot register net device\n"); dev_err(bgmac->dev, "Cannot register net device\n");

View File

@ -351,7 +351,7 @@
#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */ #define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */
#define BGMAC_DESC_CTL0_EOF 0x40000000 /* End of frame */ #define BGMAC_DESC_CTL0_EOF 0x40000000 /* End of frame */
#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */ #define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */
#define BGMAC_DESC_CTL1_LEN 0x00001FFF #define BGMAC_DESC_CTL1_LEN 0x00003FFF
#define BGMAC_PHY_NOREGS BRCM_PSEUDO_PHY_ADDR #define BGMAC_PHY_NOREGS BRCM_PSEUDO_PHY_ADDR
#define BGMAC_PHY_MASK 0x1F #define BGMAC_PHY_MASK 0x1F
@ -366,7 +366,8 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */ #define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \ #define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET) BGMAC_RX_FRAME_OFFSET)
#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */ /* Jumbo frame size with FCS */
#define BGMAC_RX_MAX_FRAME_SIZE 9724
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE) #define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \ #define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))

View File

@ -2185,13 +2185,25 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
* length of the tag and optional prefix needs to be accounted for privately,
* in order to be able to sustain communication at the requested @sdu.
*/ */
static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
{ {
struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_port *ocelot_port = ocelot->ports[port];
int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
int atop_wm; int atop_wm;
if (port == ocelot->npi) {
maxlen += OCELOT_TAG_LEN;
if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
maxlen += OCELOT_SHORT_PREFIX_LEN;
else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
maxlen += OCELOT_LONG_PREFIX_LEN;
}
ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG); ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
/* Set Pause WM hysteresis /* Set Pause WM hysteresis
@ -2209,6 +2221,24 @@ static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
SYS_ATOP, port); SYS_ATOP, port);
ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
} }
EXPORT_SYMBOL(ocelot_port_set_maxlen);
int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
{
int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
if (port == ocelot->npi) {
max_mtu -= OCELOT_TAG_LEN;
if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
max_mtu -= OCELOT_SHORT_PREFIX_LEN;
else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
max_mtu -= OCELOT_LONG_PREFIX_LEN;
}
return max_mtu;
}
EXPORT_SYMBOL(ocelot_get_max_mtu);
void ocelot_init_port(struct ocelot *ocelot, int port) void ocelot_init_port(struct ocelot *ocelot, int port)
{ {
@ -2318,6 +2348,10 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
{ {
int cpu = ocelot->num_phys_ports; int cpu = ocelot->num_phys_ports;
ocelot->npi = npi;
ocelot->inj_prefix = injection;
ocelot->xtr_prefix = extraction;
/* The unicast destination PGID for the CPU port module is unused */ /* The unicast destination PGID for the CPU port module is unused */
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
/* Instead set up a multicast destination PGID for traffic copied to /* Instead set up a multicast destination PGID for traffic copied to
@ -2330,19 +2364,10 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
ANA_PORT_PORT_CFG, cpu); ANA_PORT_PORT_CFG, cpu);
if (npi >= 0 && npi < ocelot->num_phys_ports) { if (npi >= 0 && npi < ocelot->num_phys_ports) {
int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi), QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi),
QSYS_EXT_CPU_CFG); QSYS_EXT_CPU_CFG);
if (injection == OCELOT_TAG_PREFIX_SHORT)
sdu += OCELOT_SHORT_PREFIX_LEN;
else if (injection == OCELOT_TAG_PREFIX_LONG)
sdu += OCELOT_LONG_PREFIX_LEN;
ocelot_port_set_maxlen(ocelot, npi, sdu);
/* Enable NPI port */ /* Enable NPI port */
ocelot_write_rix(ocelot, ocelot_write_rix(ocelot,
QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |

View File

@ -423,6 +423,28 @@ int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev)
} }
EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init); EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init);
int bcm_phy_enable_jumbo(struct phy_device *phydev)
{
int ret;
ret = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL);
if (ret < 0)
return ret;
/* Enable extended length packet reception */
ret = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
ret | MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN);
if (ret < 0)
return ret;
/* Enable the elastic FIFO for raising the transmission limit from
* 4.5KB to 10KB, at the expense of an additional 16 ns in propagation
* latency.
*/
return phy_set_bits(phydev, MII_BCM54XX_ECR, MII_BCM54XX_ECR_FIFOE);
}
EXPORT_SYMBOL_GPL(bcm_phy_enable_jumbo);
MODULE_DESCRIPTION("Broadcom PHY Library"); MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation"); MODULE_AUTHOR("Broadcom Corporation");

View File

@ -65,5 +65,6 @@ void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
struct ethtool_stats *stats, u64 *data); struct ethtool_stats *stats, u64 *data);
void bcm_phy_r_rc_cal_reset(struct phy_device *phydev); void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev); int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
int bcm_phy_enable_jumbo(struct phy_device *phydev);
#endif /* _LINUX_BCM_PHY_LIB_H */ #endif /* _LINUX_BCM_PHY_LIB_H */

View File

@ -178,6 +178,10 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
break; break;
} }
if (ret)
return ret;
ret = bcm_phy_enable_jumbo(phydev);
if (ret) if (ret)
return ret; return ret;

View File

@ -79,6 +79,7 @@
#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ #define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ #define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ #define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
#define MII_BCM54XX_ECR_FIFOE 0x0001 /* FIFO elasticity */
#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ #define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ #define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
@ -119,6 +120,7 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00 #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 #define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
#define MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN 0x4000
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010

View File

@ -284,6 +284,12 @@ struct dsa_switch {
*/ */
bool pcs_poll; bool pcs_poll;
/* For switches that only have the MRU configurable. To ensure the
* configured MTU is not exceeded, normalization of MRU on all bridged
* interfaces is needed.
*/
bool mtu_enforcement_ingress;
size_t num_ports; size_t num_ports;
}; };
@ -579,6 +585,16 @@ struct dsa_switch_ops {
struct devlink_param_gset_ctx *ctx); struct devlink_param_gset_ctx *ctx);
int (*devlink_param_set)(struct dsa_switch *ds, u32 id, int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx); struct devlink_param_gset_ctx *ctx);
/*
* MTU change functionality. Switches can also adjust their MRU through
* this method. By MTU, one understands the SDU (L2 payload) length.
* If the switch needs to account for the DSA tag on the CPU port, this
* method needs to to do so privately.
*/
int (*port_change_mtu)(struct dsa_switch *ds, int port,
int new_mtu);
int (*port_max_mtu)(struct dsa_switch *ds, int port);
}; };
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \

View File

@ -517,6 +517,11 @@ struct ocelot {
*/ */
u8 num_phys_ports; u8 num_phys_ports;
int npi;
enum ocelot_tag_prefix inj_prefix;
enum ocelot_tag_prefix xtr_prefix;
u32 *lags; u32 *lags;
struct list_head multicast; struct list_head multicast;
@ -611,6 +616,8 @@ int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
struct sk_buff *skb); struct sk_buff *skb);
void ocelot_get_txtstamp(struct ocelot *ocelot); void ocelot_get_txtstamp(struct ocelot *ocelot);
void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu);
int ocelot_get_max_mtu(struct ocelot *ocelot, int port);
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress); struct flow_cls_offload *f, bool ingress);
int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,

View File

@ -18,8 +18,8 @@
#include "dsa_priv.h" #include "dsa_priv.h"
static LIST_HEAD(dsa_tree_list);
static DEFINE_MUTEX(dsa2_mutex); static DEFINE_MUTEX(dsa2_mutex);
LIST_HEAD(dsa_tree_list);
static const struct devlink_ops dsa_devlink_ops = { static const struct devlink_ops dsa_devlink_ops = {
}; };

View File

@ -22,6 +22,7 @@ enum {
DSA_NOTIFIER_MDB_DEL, DSA_NOTIFIER_MDB_DEL,
DSA_NOTIFIER_VLAN_ADD, DSA_NOTIFIER_VLAN_ADD,
DSA_NOTIFIER_VLAN_DEL, DSA_NOTIFIER_VLAN_DEL,
DSA_NOTIFIER_MTU,
}; };
/* DSA_NOTIFIER_AGEING_TIME */ /* DSA_NOTIFIER_AGEING_TIME */
@ -61,6 +62,14 @@ struct dsa_notifier_vlan_info {
int port; int port;
}; };
/* DSA_NOTIFIER_MTU */
struct dsa_notifier_mtu_info {
bool propagate_upstream;
int sw_index;
int port;
int mtu;
};
struct dsa_slave_priv { struct dsa_slave_priv {
/* Copy of CPU port xmit for faster access in slave transmit hot path */ /* Copy of CPU port xmit for faster access in slave transmit hot path */
struct sk_buff * (*xmit)(struct sk_buff *skb, struct sk_buff * (*xmit)(struct sk_buff *skb,
@ -127,6 +136,8 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
struct switchdev_trans *trans); struct switchdev_trans *trans);
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock, int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
struct switchdev_trans *trans); struct switchdev_trans *trans);
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
bool propagate_upstream);
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid); u16 vid);
int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
@ -183,4 +194,8 @@ dsa_slave_to_master(const struct net_device *dev)
/* switch.c */ /* switch.c */
int dsa_switch_register_notifier(struct dsa_switch *ds); int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds); void dsa_switch_unregister_notifier(struct dsa_switch *ds);
/* dsa2.c */
extern struct list_head dsa_tree_list;
#endif #endif

View File

@ -314,20 +314,6 @@ static const struct attribute_group dsa_group = {
.attrs = dsa_slave_attrs, .attrs = dsa_slave_attrs,
}; };
static void dsa_master_set_mtu(struct net_device *dev, struct dsa_port *cpu_dp)
{
unsigned int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
int err;
rtnl_lock();
if (mtu <= dev->max_mtu) {
err = dev_set_mtu(dev, mtu);
if (err)
netdev_dbg(dev, "Unable to set MTU to include for DSA overheads\n");
}
rtnl_unlock();
}
static void dsa_master_reset_mtu(struct net_device *dev) static void dsa_master_reset_mtu(struct net_device *dev)
{ {
int err; int err;
@ -344,7 +330,12 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{ {
int ret; int ret;
dsa_master_set_mtu(dev, cpu_dp); rtnl_lock();
ret = dev_set_mtu(dev, ETH_DATA_LEN + cpu_dp->tag_ops->overhead);
rtnl_unlock();
if (ret)
netdev_warn(dev, "error %d setting MTU to include DSA overhead\n",
ret);
/* If we use a tagging format that doesn't have an ethertype /* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get * field, make sure that all packets from this point on get

View File

@ -297,6 +297,19 @@ int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
return ds->ops->port_egress_floods(ds, port, true, mrouter); return ds->ops->port_egress_floods(ds, port, true, mrouter);
} }
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
bool propagate_upstream)
{
struct dsa_notifier_mtu_info info = {
.sw_index = dp->ds->index,
.propagate_upstream = propagate_upstream,
.port = dp->index,
.mtu = new_mtu,
};
return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
}
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid) u16 vid)
{ {

View File

@ -1218,6 +1218,208 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
return dsa_port_vid_del(dp, vid); return dsa_port_vid_del(dp, vid);
} }
struct dsa_hw_port {
struct list_head list;
struct net_device *dev;
int old_mtu;
};
static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
{
const struct dsa_hw_port *p;
int err;
list_for_each_entry(p, hw_port_list, list) {
if (p->dev->mtu == mtu)
continue;
err = dev_set_mtu(p->dev, mtu);
if (err)
goto rollback;
}
return 0;
rollback:
list_for_each_entry_continue_reverse(p, hw_port_list, list) {
if (p->dev->mtu == p->old_mtu)
continue;
if (dev_set_mtu(p->dev, p->old_mtu))
netdev_err(p->dev, "Failed to restore MTU\n");
}
return err;
}
static void dsa_hw_port_list_free(struct list_head *hw_port_list)
{
struct dsa_hw_port *p, *n;
list_for_each_entry_safe(p, n, hw_port_list, list)
kfree(p);
}
/* Make the hardware datapath to/from @dev limited to a common MTU */
void dsa_bridge_mtu_normalization(struct dsa_port *dp)
{
struct list_head hw_port_list;
struct dsa_switch_tree *dst;
int min_mtu = ETH_MAX_MTU;
struct dsa_port *other_dp;
int err;
if (!dp->ds->mtu_enforcement_ingress)
return;
if (!dp->bridge_dev)
return;
INIT_LIST_HEAD(&hw_port_list);
/* Populate the list of ports that are part of the same bridge
* as the newly added/modified port
*/
list_for_each_entry(dst, &dsa_tree_list, list) {
list_for_each_entry(other_dp, &dst->ports, list) {
struct dsa_hw_port *hw_port;
struct net_device *slave;
if (other_dp->type != DSA_PORT_TYPE_USER)
continue;
if (other_dp->bridge_dev != dp->bridge_dev)
continue;
if (!other_dp->ds->mtu_enforcement_ingress)
continue;
slave = other_dp->slave;
if (min_mtu > slave->mtu)
min_mtu = slave->mtu;
hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
if (!hw_port)
goto out;
hw_port->dev = slave;
hw_port->old_mtu = slave->mtu;
list_add(&hw_port->list, &hw_port_list);
}
}
/* Attempt to configure the entire hardware bridge to the newly added
* interface's MTU first, regardless of whether the intention of the
* user was to raise or lower it.
*/
err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
if (!err)
goto out;
/* Clearly that didn't work out so well, so just set the minimum MTU on
* all hardware bridge ports now. If this fails too, then all ports will
* still have their old MTU rolled back anyway.
*/
dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
out:
dsa_hw_port_list_free(&hw_port_list);
}
static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
struct dsa_port *cpu_dp;
int port = p->dp->index;
int largest_mtu = 0;
int new_master_mtu;
int old_master_mtu;
int mtu_limit;
int cpu_mtu;
int err, i;
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
for (i = 0; i < ds->num_ports; i++) {
int slave_mtu;
if (!dsa_is_user_port(ds, i))
continue;
/* During probe, this function will be called for each slave
* device, while not all of them have been allocated. That's
* ok, it doesn't change what the maximum is, so ignore it.
*/
if (!dsa_to_port(ds, i)->slave)
continue;
/* Pretend that we already applied the setting, which we
* actually haven't (still haven't done all integrity checks)
*/
if (i == port)
slave_mtu = new_mtu;
else
slave_mtu = dsa_to_port(ds, i)->slave->mtu;
if (largest_mtu < slave_mtu)
largest_mtu = slave_mtu;
}
cpu_dp = dsa_to_port(ds, port)->cpu_dp;
mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
old_master_mtu = master->mtu;
new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
if (new_master_mtu > mtu_limit)
return -ERANGE;
/* If the master MTU isn't over limit, there's no need to check the CPU
* MTU, since that surely isn't either.
*/
cpu_mtu = largest_mtu;
/* Start applying stuff */
if (new_master_mtu != old_master_mtu) {
err = dev_set_mtu(master, new_master_mtu);
if (err < 0)
goto out_master_failed;
/* We only need to propagate the MTU of the CPU port to
* upstream switches.
*/
err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
if (err)
goto out_cpu_failed;
}
err = dsa_port_mtu_change(dp, new_mtu, false);
if (err)
goto out_port_failed;
dev->mtu = new_mtu;
dsa_bridge_mtu_normalization(dp);
return 0;
out_port_failed:
if (new_master_mtu != old_master_mtu)
dsa_port_mtu_change(cpu_dp, old_master_mtu -
cpu_dp->tag_ops->overhead,
true);
out_cpu_failed:
if (new_master_mtu != old_master_mtu)
dev_set_mtu(master, old_master_mtu);
out_master_failed:
return err;
}
static const struct ethtool_ops dsa_slave_ethtool_ops = { static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_drvinfo = dsa_slave_get_drvinfo, .get_drvinfo = dsa_slave_get_drvinfo,
.get_regs_len = dsa_slave_get_regs_len, .get_regs_len = dsa_slave_get_regs_len,
@ -1295,6 +1497,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
.ndo_get_devlink_port = dsa_slave_get_devlink_port, .ndo_get_devlink_port = dsa_slave_get_devlink_port,
.ndo_change_mtu = dsa_slave_change_mtu,
}; };
static struct device_type dsa_type = { static struct device_type dsa_type = {
@ -1465,7 +1668,10 @@ int dsa_slave_create(struct dsa_port *port)
slave_dev->priv_flags |= IFF_NO_QUEUE; slave_dev->priv_flags |= IFF_NO_QUEUE;
slave_dev->netdev_ops = &dsa_slave_netdev_ops; slave_dev->netdev_ops = &dsa_slave_netdev_ops;
slave_dev->min_mtu = 0; slave_dev->min_mtu = 0;
slave_dev->max_mtu = ETH_MAX_MTU; if (ds->ops->port_max_mtu)
slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
else
slave_dev->max_mtu = ETH_MAX_MTU;
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
SET_NETDEV_DEV(slave_dev, port->ds->dev); SET_NETDEV_DEV(slave_dev, port->ds->dev);
@ -1483,6 +1689,15 @@ int dsa_slave_create(struct dsa_port *port)
p->xmit = cpu_dp->tag_ops->xmit; p->xmit = cpu_dp->tag_ops->xmit;
port->slave = slave_dev; port->slave = slave_dev;
rtnl_lock();
ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
rtnl_unlock();
if (ret && ret != -EOPNOTSUPP) {
dev_err(ds->dev, "error %d setting MTU on port %d\n",
ret, port->index);
goto out_free;
}
netif_carrier_off(slave_dev); netif_carrier_off(slave_dev);
ret = dsa_slave_phy_setup(slave_dev); ret = dsa_slave_phy_setup(slave_dev);
@ -1545,6 +1760,8 @@ static int dsa_slave_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) { if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking) { if (info->linking) {
err = dsa_port_bridge_join(dp, info->upper_dev); err = dsa_port_bridge_join(dp, info->upper_dev);
if (!err)
dsa_bridge_mtu_normalization(dp);
err = notifier_from_errno(err); err = notifier_from_errno(err);
} else { } else {
dsa_port_bridge_leave(dp, info->upper_dev); dsa_port_bridge_leave(dp, info->upper_dev);

View File

@ -52,6 +52,40 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds,
return 0; return 0;
} }
static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
struct dsa_notifier_mtu_info *info)
{
if (ds->index == info->sw_index)
return (port == info->port) || dsa_is_dsa_port(ds, port);
if (!info->propagate_upstream)
return false;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
return true;
return false;
}
static int dsa_switch_mtu(struct dsa_switch *ds,
struct dsa_notifier_mtu_info *info)
{
int port, ret;
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_switch_mtu_match(ds, port, info)) {
ret = ds->ops->port_change_mtu(ds, port, info->mtu);
if (ret)
return ret;
}
}
return 0;
}
static int dsa_switch_bridge_join(struct dsa_switch *ds, static int dsa_switch_bridge_join(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info) struct dsa_notifier_bridge_info *info)
{ {
@ -328,6 +362,9 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_VLAN_DEL: case DSA_NOTIFIER_VLAN_DEL:
err = dsa_switch_vlan_del(ds, info); err = dsa_switch_vlan_del(ds, info);
break; break;
case DSA_NOTIFIER_MTU:
err = dsa_switch_mtu(ds, info);
break;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
break; break;