forked from Minki/linux
d371b7c92d
When ports are standalone (after they left the bridge), they should have no VLAN filtering semantics (they should pass all traffic to the CPU). Currently this is not true for switchdev drivers, because the bridge "forgets" to unset that. Normally one would think that doing this at the bridge layer would be a better idea, i.e. call br_vlan_filter_toggle() from br_del_if(), similar to how nbp_vlan_init() is called from br_add_if(). However what complicates that approach, and makes this one preferable, is the fact that for the bridge core, vlan_filtering is a per-bridge setting, whereas for switchdev/DSA it is per-port. Also there are switches where the setting is per the entire device, and unsetting vlan_filtering one by one, for each leaving port, would not be possible from the bridge core without a certain level of awareness. So do this in DSA and let drivers be unaware of it. Signed-off-by: Vladimir Oltean <olteanv@gmail.com> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
374 lines
9.6 KiB
C
374 lines
9.6 KiB
C
/*
|
|
* Handling of a single switch chip, part of a switch fabric
|
|
*
|
|
* Copyright (c) 2017 Savoir-faire Linux Inc.
|
|
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/if_bridge.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <net/switchdev.h>
|
|
|
|
#include "dsa_priv.h"
|
|
|
|
static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
|
|
unsigned int ageing_time)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < ds->num_ports; ++i) {
|
|
struct dsa_port *dp = &ds->ports[i];
|
|
|
|
if (dp->ageing_time && dp->ageing_time < ageing_time)
|
|
ageing_time = dp->ageing_time;
|
|
}
|
|
|
|
return ageing_time;
|
|
}
|
|
|
|
static int dsa_switch_ageing_time(struct dsa_switch *ds,
|
|
struct dsa_notifier_ageing_time_info *info)
|
|
{
|
|
unsigned int ageing_time = info->ageing_time;
|
|
struct switchdev_trans *trans = info->trans;
|
|
|
|
if (switchdev_trans_ph_prepare(trans)) {
|
|
if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
|
|
return -ERANGE;
|
|
if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
|
|
return -ERANGE;
|
|
return 0;
|
|
}
|
|
|
|
/* Program the fastest ageing time in case of multiple bridges */
|
|
ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
|
|
|
|
if (ds->ops->set_ageing_time)
|
|
return ds->ops->set_ageing_time(ds, ageing_time);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dsa_switch_bridge_join(struct dsa_switch *ds,
|
|
struct dsa_notifier_bridge_info *info)
|
|
{
|
|
if (ds->index == info->sw_index && ds->ops->port_bridge_join)
|
|
return ds->ops->port_bridge_join(ds, info->port, info->br);
|
|
|
|
if (ds->index != info->sw_index && ds->ops->crosschip_bridge_join)
|
|
return ds->ops->crosschip_bridge_join(ds, info->sw_index,
|
|
info->port, info->br);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dsa_switch_bridge_leave(struct dsa_switch *ds,
|
|
struct dsa_notifier_bridge_info *info)
|
|
{
|
|
bool unset_vlan_filtering = br_vlan_enabled(info->br);
|
|
int err, i;
|
|
|
|
if (ds->index == info->sw_index && ds->ops->port_bridge_leave)
|
|
ds->ops->port_bridge_leave(ds, info->port, info->br);
|
|
|
|
if (ds->index != info->sw_index && ds->ops->crosschip_bridge_leave)
|
|
ds->ops->crosschip_bridge_leave(ds, info->sw_index, info->port,
|
|
info->br);
|
|
|
|
/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
|
|
* event for changing vlan_filtering setting upon slave ports leaving
|
|
* it. That is a good thing, because that lets us handle it and also
|
|
* handle the case where the switch's vlan_filtering setting is global
|
|
* (not per port). When that happens, the correct moment to trigger the
|
|
* vlan_filtering callback is only when the last port left this bridge.
|
|
*/
|
|
if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
|
|
for (i = 0; i < ds->num_ports; i++) {
|
|
if (i == info->port)
|
|
continue;
|
|
if (dsa_to_port(ds, i)->bridge_dev == info->br) {
|
|
unset_vlan_filtering = false;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
if (unset_vlan_filtering) {
|
|
struct switchdev_trans trans = {0};
|
|
|
|
err = dsa_port_vlan_filtering(&ds->ports[info->port],
|
|
false, &trans);
|
|
if (err && err != EOPNOTSUPP)
|
|
return err;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int dsa_switch_fdb_add(struct dsa_switch *ds,
|
|
struct dsa_notifier_fdb_info *info)
|
|
{
|
|
int port = dsa_towards_port(ds, info->sw_index, info->port);
|
|
|
|
if (!ds->ops->port_fdb_add)
|
|
return -EOPNOTSUPP;
|
|
|
|
return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
|
|
}
|
|
|
|
static int dsa_switch_fdb_del(struct dsa_switch *ds,
|
|
struct dsa_notifier_fdb_info *info)
|
|
{
|
|
int port = dsa_towards_port(ds, info->sw_index, info->port);
|
|
|
|
if (!ds->ops->port_fdb_del)
|
|
return -EOPNOTSUPP;
|
|
|
|
return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
|
|
}
|
|
|
|
static int
|
|
dsa_switch_mdb_prepare_bitmap(struct dsa_switch *ds,
|
|
const struct switchdev_obj_port_mdb *mdb,
|
|
const unsigned long *bitmap)
|
|
{
|
|
int port, err;
|
|
|
|
if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
|
|
return -EOPNOTSUPP;
|
|
|
|
for_each_set_bit(port, bitmap, ds->num_ports) {
|
|
err = ds->ops->port_mdb_prepare(ds, port, mdb);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
|
|
const struct switchdev_obj_port_mdb *mdb,
|
|
const unsigned long *bitmap)
|
|
{
|
|
int port;
|
|
|
|
for_each_set_bit(port, bitmap, ds->num_ports)
|
|
ds->ops->port_mdb_add(ds, port, mdb);
|
|
}
|
|
|
|
static int dsa_switch_mdb_add(struct dsa_switch *ds,
|
|
struct dsa_notifier_mdb_info *info)
|
|
{
|
|
const struct switchdev_obj_port_mdb *mdb = info->mdb;
|
|
struct switchdev_trans *trans = info->trans;
|
|
int port;
|
|
|
|
/* Build a mask of Multicast group members */
|
|
bitmap_zero(ds->bitmap, ds->num_ports);
|
|
if (ds->index == info->sw_index)
|
|
set_bit(info->port, ds->bitmap);
|
|
for (port = 0; port < ds->num_ports; port++)
|
|
if (dsa_is_dsa_port(ds, port))
|
|
set_bit(port, ds->bitmap);
|
|
|
|
if (switchdev_trans_ph_prepare(trans))
|
|
return dsa_switch_mdb_prepare_bitmap(ds, mdb, ds->bitmap);
|
|
|
|
dsa_switch_mdb_add_bitmap(ds, mdb, ds->bitmap);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dsa_switch_mdb_del(struct dsa_switch *ds,
|
|
struct dsa_notifier_mdb_info *info)
|
|
{
|
|
const struct switchdev_obj_port_mdb *mdb = info->mdb;
|
|
|
|
if (!ds->ops->port_mdb_del)
|
|
return -EOPNOTSUPP;
|
|
|
|
if (ds->index == info->sw_index)
|
|
return ds->ops->port_mdb_del(ds, info->port, mdb);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dsa_port_vlan_device_check(struct net_device *vlan_dev,
|
|
int vlan_dev_vid,
|
|
void *arg)
|
|
{
|
|
struct switchdev_obj_port_vlan *vlan = arg;
|
|
u16 vid;
|
|
|
|
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
|
|
if (vid == vlan_dev_vid)
|
|
return -EBUSY;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dsa_port_vlan_check(struct dsa_switch *ds, int port,
|
|
const struct switchdev_obj_port_vlan *vlan)
|
|
{
|
|
const struct dsa_port *dp = dsa_to_port(ds, port);
|
|
int err = 0;
|
|
|
|
/* Device is not bridged, let it proceed with the VLAN device
|
|
* creation.
|
|
*/
|
|
if (!dp->bridge_dev)
|
|
return err;
|
|
|
|
/* dsa_slave_vlan_rx_{add,kill}_vid() cannot use the prepare phase and
|
|
* already checks whether there is an overlapping bridge VLAN entry
|
|
* with the same VID, so here we only need to check that if we are
|
|
* adding a bridge VLAN entry there is not an overlapping VLAN device
|
|
* claiming that VID.
|
|
*/
|
|
return vlan_for_each(dp->slave, dsa_port_vlan_device_check,
|
|
(void *)vlan);
|
|
}
|
|
|
|
static int
|
|
dsa_switch_vlan_prepare_bitmap(struct dsa_switch *ds,
|
|
const struct switchdev_obj_port_vlan *vlan,
|
|
const unsigned long *bitmap)
|
|
{
|
|
int port, err;
|
|
|
|
if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
|
|
return -EOPNOTSUPP;
|
|
|
|
for_each_set_bit(port, bitmap, ds->num_ports) {
|
|
err = dsa_port_vlan_check(ds, port, vlan);
|
|
if (err)
|
|
return err;
|
|
|
|
err = ds->ops->port_vlan_prepare(ds, port, vlan);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
dsa_switch_vlan_add_bitmap(struct dsa_switch *ds,
|
|
const struct switchdev_obj_port_vlan *vlan,
|
|
const unsigned long *bitmap)
|
|
{
|
|
int port;
|
|
|
|
for_each_set_bit(port, bitmap, ds->num_ports)
|
|
ds->ops->port_vlan_add(ds, port, vlan);
|
|
}
|
|
|
|
static int dsa_switch_vlan_add(struct dsa_switch *ds,
|
|
struct dsa_notifier_vlan_info *info)
|
|
{
|
|
const struct switchdev_obj_port_vlan *vlan = info->vlan;
|
|
struct switchdev_trans *trans = info->trans;
|
|
int port;
|
|
|
|
/* Build a mask of VLAN members */
|
|
bitmap_zero(ds->bitmap, ds->num_ports);
|
|
if (ds->index == info->sw_index)
|
|
set_bit(info->port, ds->bitmap);
|
|
for (port = 0; port < ds->num_ports; port++)
|
|
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
|
|
set_bit(port, ds->bitmap);
|
|
|
|
if (switchdev_trans_ph_prepare(trans))
|
|
return dsa_switch_vlan_prepare_bitmap(ds, vlan, ds->bitmap);
|
|
|
|
dsa_switch_vlan_add_bitmap(ds, vlan, ds->bitmap);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dsa_switch_vlan_del(struct dsa_switch *ds,
|
|
struct dsa_notifier_vlan_info *info)
|
|
{
|
|
const struct switchdev_obj_port_vlan *vlan = info->vlan;
|
|
|
|
if (!ds->ops->port_vlan_del)
|
|
return -EOPNOTSUPP;
|
|
|
|
if (ds->index == info->sw_index)
|
|
return ds->ops->port_vlan_del(ds, info->port, vlan);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dsa_switch_event(struct notifier_block *nb,
|
|
unsigned long event, void *info)
|
|
{
|
|
struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
|
|
int err;
|
|
|
|
switch (event) {
|
|
case DSA_NOTIFIER_AGEING_TIME:
|
|
err = dsa_switch_ageing_time(ds, info);
|
|
break;
|
|
case DSA_NOTIFIER_BRIDGE_JOIN:
|
|
err = dsa_switch_bridge_join(ds, info);
|
|
break;
|
|
case DSA_NOTIFIER_BRIDGE_LEAVE:
|
|
err = dsa_switch_bridge_leave(ds, info);
|
|
break;
|
|
case DSA_NOTIFIER_FDB_ADD:
|
|
err = dsa_switch_fdb_add(ds, info);
|
|
break;
|
|
case DSA_NOTIFIER_FDB_DEL:
|
|
err = dsa_switch_fdb_del(ds, info);
|
|
break;
|
|
case DSA_NOTIFIER_MDB_ADD:
|
|
err = dsa_switch_mdb_add(ds, info);
|
|
break;
|
|
case DSA_NOTIFIER_MDB_DEL:
|
|
err = dsa_switch_mdb_del(ds, info);
|
|
break;
|
|
case DSA_NOTIFIER_VLAN_ADD:
|
|
err = dsa_switch_vlan_add(ds, info);
|
|
break;
|
|
case DSA_NOTIFIER_VLAN_DEL:
|
|
err = dsa_switch_vlan_del(ds, info);
|
|
break;
|
|
default:
|
|
err = -EOPNOTSUPP;
|
|
break;
|
|
}
|
|
|
|
/* Non-switchdev operations cannot be rolled back. If a DSA driver
|
|
* returns an error during the chained call, switch chips may be in an
|
|
* inconsistent state.
|
|
*/
|
|
if (err)
|
|
dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
|
|
event, err);
|
|
|
|
return notifier_from_errno(err);
|
|
}
|
|
|
|
int dsa_switch_register_notifier(struct dsa_switch *ds)
|
|
{
|
|
ds->nb.notifier_call = dsa_switch_event;
|
|
|
|
return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
|
|
}
|
|
|
|
void dsa_switch_unregister_notifier(struct dsa_switch *ds)
|
|
{
|
|
int err;
|
|
|
|
err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
|
|
if (err)
|
|
dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
|
|
}
|