2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2010-02-27 19:41:45 +00:00
|
|
|
/*
|
|
|
|
* Bridge multicast support.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
2014-06-07 16:26:28 +00:00
|
|
|
#include <linux/export.h>
|
2010-02-27 19:41:45 +00:00
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/igmp.h>
|
2019-01-21 06:26:28 +00:00
|
|
|
#include <linux/in.h>
|
2010-02-27 19:41:45 +00:00
|
|
|
#include <linux/jhash.h>
|
|
|
|
#include <linux/kernel.h>
|
2010-02-27 19:41:51 +00:00
|
|
|
#include <linux/log2.h>
|
2010-02-27 19:41:45 +00:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/netfilter_bridge.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/timer.h>
|
2013-05-21 21:52:54 +00:00
|
|
|
#include <linux/inetdevice.h>
|
2016-10-31 12:21:05 +00:00
|
|
|
#include <linux/mroute.h>
|
2010-02-27 19:41:45 +00:00
|
|
|
#include <net/ip.h>
|
2017-02-09 13:54:40 +00:00
|
|
|
#include <net/switchdev.h>
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2019-01-21 06:26:28 +00:00
|
|
|
#include <linux/icmpv6.h>
|
2010-04-22 16:54:22 +00:00
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/mld.h>
|
2010-04-27 17:16:54 +00:00
|
|
|
#include <net/ip6_checksum.h>
|
2013-09-04 00:13:39 +00:00
|
|
|
#include <net/addrconf.h>
|
2010-04-22 16:54:22 +00:00
|
|
|
#endif
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
#include <trace/events/bridge.h>
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
#include "br_private.h"
|
2021-01-20 14:51:54 +00:00
|
|
|
#include "br_private_mcast_eht.h"
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
static const struct rhashtable_params br_mdb_rht_params = {
|
|
|
|
.head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
|
|
|
|
.key_offset = offsetof(struct net_bridge_mdb_entry, addr),
|
|
|
|
.key_len = sizeof(struct br_ip),
|
|
|
|
.automatic_shrinking = true,
|
|
|
|
};
|
|
|
|
|
2020-09-22 07:30:22 +00:00
|
|
|
static const struct rhashtable_params br_sg_port_rht_params = {
|
|
|
|
.head_offset = offsetof(struct net_bridge_port_group, rhnode),
|
|
|
|
.key_offset = offsetof(struct net_bridge_port_group, key),
|
|
|
|
.key_len = sizeof(struct net_bridge_port_group_sg_key),
|
|
|
|
.automatic_shrinking = true,
|
|
|
|
};
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_own_query *query);
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx);
|
|
|
|
static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2015-07-13 12:28:37 +00:00
|
|
|
__be32 group,
|
2017-01-21 20:01:32 +00:00
|
|
|
__u16 vid,
|
|
|
|
const unsigned char *src);
|
2020-09-07 09:56:10 +00:00
|
|
|
static void br_multicast_port_group_rexmit(struct timer_list *t);
|
2017-01-21 20:01:32 +00:00
|
|
|
|
2021-05-13 13:20:50 +00:00
|
|
|
static void
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
|
|
|
|
static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx);
|
2021-05-14 01:53:48 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2015-07-13 12:28:37 +00:00
|
|
|
const struct in6_addr *group,
|
2017-01-21 20:01:32 +00:00
|
|
|
__u16 vid, const unsigned char *src);
|
2015-07-13 12:28:37 +00:00
|
|
|
#endif
|
2020-09-22 07:30:23 +00:00
|
|
|
static struct net_bridge_port_group *
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_add_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2020-09-22 07:30:23 +00:00
|
|
|
struct br_ip *group,
|
|
|
|
const unsigned char *src,
|
|
|
|
u8 filter_mode,
|
2020-09-22 07:30:25 +00:00
|
|
|
bool igmpv2_mldv1,
|
|
|
|
bool blocked);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
static void br_multicast_find_del_pg(struct net_bridge *br,
|
|
|
|
struct net_bridge_port_group *pg);
|
2021-07-19 17:06:26 +00:00
|
|
|
static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
|
2012-04-13 02:37:42 +00:00
|
|
|
|
2022-02-15 16:53:03 +00:00
|
|
|
static int br_mc_disabled_update(struct net_device *dev, bool value,
|
|
|
|
struct netlink_ext_ack *extack);
|
|
|
|
|
2020-09-22 07:30:22 +00:00
|
|
|
static struct net_bridge_port_group *
|
|
|
|
br_sg_port_find(struct net_bridge *br,
|
|
|
|
struct net_bridge_port_group_sg_key *sg_p)
|
|
|
|
{
|
|
|
|
lockdep_assert_held_once(&br->multicast_lock);
|
|
|
|
|
|
|
|
return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
|
|
|
|
br_sg_port_rht_params);
|
|
|
|
}
|
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
|
|
|
|
struct br_ip *dst)
|
2010-04-22 16:54:22 +00:00
|
|
|
{
|
2018-12-05 13:14:24 +00:00
|
|
|
return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
|
|
|
|
struct br_ip *dst)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2018-12-05 13:14:24 +00:00
|
|
|
struct net_bridge_mdb_entry *ent;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
lockdep_assert_held_once(&br->multicast_lock);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
|
|
|
|
rcu_read_unlock();
|
2010-07-05 14:50:08 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
return ent;
|
2010-07-05 14:50:08 +00:00
|
|
|
}
|
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
|
|
|
|
__be32 dst, __u16 vid)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2010-04-18 03:42:07 +00:00
|
|
|
struct br_ip br_dst;
|
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
memset(&br_dst, 0, sizeof(br_dst));
|
2020-09-22 07:30:17 +00:00
|
|
|
br_dst.dst.ip4 = dst;
|
2010-04-18 03:42:07 +00:00
|
|
|
br_dst.proto = htons(ETH_P_IP);
|
2013-02-13 12:00:17 +00:00
|
|
|
br_dst.vid = vid;
|
2010-03-16 03:38:25 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
return br_mdb_ip_get(br, &br_dst);
|
2010-04-18 03:42:07 +00:00
|
|
|
}
|
|
|
|
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2018-12-05 13:14:24 +00:00
|
|
|
static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
|
|
|
|
const struct in6_addr *dst,
|
|
|
|
__u16 vid)
|
2010-04-22 16:54:22 +00:00
|
|
|
{
|
|
|
|
struct br_ip br_dst;
|
2010-03-16 03:38:25 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
memset(&br_dst, 0, sizeof(br_dst));
|
2020-09-22 07:30:17 +00:00
|
|
|
br_dst.dst.ip6 = *dst;
|
2010-04-22 16:54:22 +00:00
|
|
|
br_dst.proto = htons(ETH_P_IPV6);
|
2013-02-13 12:00:17 +00:00
|
|
|
br_dst.vid = vid;
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
return br_mdb_ip_get(br, &br_dst);
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
|
2013-03-07 03:05:33 +00:00
|
|
|
struct sk_buff *skb, u16 vid)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge *br = brmctx->br;
|
2010-04-18 03:42:07 +00:00
|
|
|
struct br_ip ip;
|
|
|
|
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
|
|
|
|
br_multicast_ctx_vlan_global_disabled(brmctx))
|
2010-02-27 19:41:45 +00:00
|
|
|
return NULL;
|
|
|
|
|
2010-04-18 03:42:07 +00:00
|
|
|
if (BR_INPUT_SKB_CB(skb)->igmp)
|
2010-02-27 19:41:45 +00:00
|
|
|
return NULL;
|
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
memset(&ip, 0, sizeof(ip));
|
2010-04-18 03:42:07 +00:00
|
|
|
ip.proto = skb->protocol;
|
2013-03-07 03:05:33 +00:00
|
|
|
ip.vid = vid;
|
2010-04-18 03:42:07 +00:00
|
|
|
|
2010-02-27 19:41:45 +00:00
|
|
|
switch (skb->protocol) {
|
|
|
|
case htons(ETH_P_IP):
|
2020-09-22 07:30:17 +00:00
|
|
|
ip.dst.ip4 = ip_hdr(skb)->daddr;
|
2021-07-19 17:06:25 +00:00
|
|
|
if (brmctx->multicast_igmp_version == 3) {
|
2020-09-22 07:30:20 +00:00
|
|
|
struct net_bridge_mdb_entry *mdb;
|
|
|
|
|
|
|
|
ip.src.ip4 = ip_hdr(skb)->saddr;
|
|
|
|
mdb = br_mdb_ip_get_rcu(br, &ip);
|
|
|
|
if (mdb)
|
|
|
|
return mdb;
|
|
|
|
ip.src.ip4 = 0;
|
|
|
|
}
|
2010-04-18 03:42:07 +00:00
|
|
|
break;
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 16:54:22 +00:00
|
|
|
case htons(ETH_P_IPV6):
|
2020-09-22 07:30:17 +00:00
|
|
|
ip.dst.ip6 = ipv6_hdr(skb)->daddr;
|
2021-07-19 17:06:25 +00:00
|
|
|
if (brmctx->multicast_mld_version == 2) {
|
2020-09-22 07:30:20 +00:00
|
|
|
struct net_bridge_mdb_entry *mdb;
|
|
|
|
|
|
|
|
ip.src.ip6 = ipv6_hdr(skb)->saddr;
|
|
|
|
mdb = br_mdb_ip_get_rcu(br, &ip);
|
|
|
|
if (mdb)
|
|
|
|
return mdb;
|
|
|
|
memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
|
|
|
|
}
|
2010-04-22 16:54:22 +00:00
|
|
|
break;
|
|
|
|
#endif
|
2010-04-18 03:42:07 +00:00
|
|
|
default:
|
2020-10-28 23:38:31 +00:00
|
|
|
ip.proto = 0;
|
|
|
|
ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
return br_mdb_ip_get_rcu(br, &ip);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:29 +00:00
|
|
|
/* IMPORTANT: this function must be used only when the contexts cannot be
|
|
|
|
* passed down (e.g. timer) and must be used for read-only purposes because
|
|
|
|
* the vlan snooping option can change, so it can return any context
|
|
|
|
* (non-vlan or vlan). Its initial intended purpose is to read timer values
|
|
|
|
* from the *current* context based on the option. At worst that could lead
|
|
|
|
* to inconsistent timers when the contexts are changed, i.e. src timer
|
|
|
|
* which needs to re-arm with a specific delay taken from the old context
|
|
|
|
*/
|
|
|
|
static struct net_bridge_mcast_port *
|
|
|
|
br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
|
|
|
|
{
|
|
|
|
struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
|
|
|
|
struct net_bridge_vlan *vlan;
|
|
|
|
|
|
|
|
lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
|
|
|
|
|
|
|
|
/* if vlan snooping is disabled use the port's multicast context */
|
|
|
|
if (!pg->key.addr.vid ||
|
|
|
|
!br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* locking is tricky here, due to different rules for multicast and
|
|
|
|
* vlans we need to take rcu to find the vlan and make sure it has
|
|
|
|
* the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
|
|
|
|
* multicast_lock which must be already held here, so the vlan's pmctx
|
|
|
|
* can safely be used on return
|
|
|
|
*/
|
|
|
|
rcu_read_lock();
|
2021-08-16 14:57:06 +00:00
|
|
|
vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
|
2021-07-19 17:06:29 +00:00
|
|
|
if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
|
|
|
|
pmctx = &vlan->port_mcast_ctx;
|
|
|
|
else
|
|
|
|
pmctx = NULL;
|
|
|
|
rcu_read_unlock();
|
|
|
|
out:
|
|
|
|
return pmctx;
|
|
|
|
}
|
|
|
|
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
static struct net_bridge_mcast_port *
|
|
|
|
br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid)
|
|
|
|
{
|
|
|
|
struct net_bridge_mcast_port *pmctx = NULL;
|
|
|
|
struct net_bridge_vlan *vlan;
|
|
|
|
|
|
|
|
lockdep_assert_held_once(&port->br->multicast_lock);
|
|
|
|
|
|
|
|
if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Take RCU to access the vlan. */
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid);
|
|
|
|
if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
|
|
|
|
pmctx = &vlan->port_mcast_ctx;
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return pmctx;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:31 +00:00
|
|
|
/* when snooping we need to check if the contexts should be used
|
|
|
|
* in the following order:
|
|
|
|
* - if pmctx is non-NULL (port), check if it should be used
|
|
|
|
* - if pmctx is NULL (bridge), check if brmctx should be used
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
|
|
|
|
const struct net_bridge_mcast_port *pmctx)
|
|
|
|
{
|
|
|
|
if (!netif_running(brmctx->br->dev))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (pmctx)
|
|
|
|
return !br_multicast_port_ctx_state_disabled(pmctx);
|
|
|
|
else
|
|
|
|
return !br_multicast_ctx_vlan_disabled(brmctx);
|
|
|
|
}
|
|
|
|
|
2020-09-22 07:30:23 +00:00
|
|
|
static bool br_port_group_equal(struct net_bridge_port_group *p,
|
|
|
|
struct net_bridge_port *port,
|
|
|
|
const unsigned char *src)
|
|
|
|
{
|
|
|
|
if (p->key.port != port)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(port->flags & BR_MULTICAST_TO_UNICAST))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return ether_addr_equal(src, p->eth_addr);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg,
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
struct br_ip *sg_ip)
|
|
|
|
{
|
|
|
|
struct net_bridge_port_group_sg_key sg_key;
|
|
|
|
struct net_bridge_port_group *src_pg;
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mcast *brmctx;
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
|
|
|
|
memset(&sg_key, 0, sizeof(sg_key));
|
2021-07-19 17:06:27 +00:00
|
|
|
brmctx = br_multicast_port_ctx_get_global(pmctx);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
sg_key.port = pg->key.port;
|
|
|
|
sg_key.addr = *sg_ip;
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_sg_port_find(brmctx->br, &sg_key))
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
src_pg = __br_multicast_add_group(brmctx, pmctx,
|
|
|
|
sg_ip, pg->eth_addr,
|
2020-09-22 07:30:25 +00:00
|
|
|
MCAST_INCLUDE, false, false);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
if (IS_ERR_OR_NULL(src_pg) ||
|
|
|
|
src_pg->rt_protocol != RTPROT_KERNEL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
|
|
|
|
struct br_ip *sg_ip)
|
|
|
|
{
|
|
|
|
struct net_bridge_port_group_sg_key sg_key;
|
|
|
|
struct net_bridge *br = pg->key.port->br;
|
|
|
|
struct net_bridge_port_group *src_pg;
|
|
|
|
|
|
|
|
memset(&sg_key, 0, sizeof(sg_key));
|
|
|
|
sg_key.port = pg->key.port;
|
|
|
|
sg_key.addr = *sg_ip;
|
|
|
|
src_pg = br_sg_port_find(br, &sg_key);
|
|
|
|
if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
|
|
|
|
src_pg->rt_protocol != RTPROT_KERNEL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
br_multicast_find_del_pg(br, src_pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* When a port group transitions to (or is added as) EXCLUDE we need to add it
|
|
|
|
* to all other ports' S,G entries which are not blocked by the current group
|
|
|
|
* for proper replication, the assumption is that any S,G blocked entries
|
|
|
|
* are already added so the S,G,port lookup should skip them.
|
|
|
|
* When a port group transitions from EXCLUDE -> INCLUDE mode or is being
|
|
|
|
* deleted we need to remove it from all ports' S,G entries where it was
|
|
|
|
* automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
|
|
|
|
*/
|
|
|
|
void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
|
|
|
|
u8 filter_mode)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = pg->key.port->br;
|
|
|
|
struct net_bridge_port_group *pg_lst;
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx;
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct br_ip sg_ip;
|
|
|
|
|
|
|
|
if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mp = br_mdb_ip_get(br, &pg->key.addr);
|
|
|
|
if (!mp)
|
|
|
|
return;
|
2021-07-19 17:06:30 +00:00
|
|
|
pmctx = br_multicast_pg_to_port_ctx(pg);
|
|
|
|
if (!pmctx)
|
|
|
|
return;
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
|
|
|
|
memset(&sg_ip, 0, sizeof(sg_ip));
|
|
|
|
sg_ip = pg->key.addr;
|
2021-07-19 17:06:27 +00:00
|
|
|
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
for (pg_lst = mlock_dereference(mp->ports, br);
|
|
|
|
pg_lst;
|
|
|
|
pg_lst = mlock_dereference(pg_lst->next, br)) {
|
|
|
|
struct net_bridge_group_src *src_ent;
|
|
|
|
|
|
|
|
if (pg_lst == pg)
|
|
|
|
continue;
|
|
|
|
hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
|
|
|
|
if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
|
|
|
|
continue;
|
|
|
|
sg_ip.src = src_ent->addr.src;
|
|
|
|
switch (filter_mode) {
|
|
|
|
case MCAST_INCLUDE:
|
|
|
|
__fwd_del_star_excl(pg, &sg_ip);
|
|
|
|
break;
|
|
|
|
case MCAST_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
__fwd_add_star_excl(pmctx, pg, &sg_ip);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-22 07:30:26 +00:00
|
|
|
/* called when adding a new S,G with host_joined == false by default */
|
|
|
|
static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
|
|
|
|
struct net_bridge_port_group *sg)
|
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *sg_mp;
|
|
|
|
|
|
|
|
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
|
|
|
|
return;
|
|
|
|
if (!star_mp->host_joined)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
|
|
|
|
if (!sg_mp)
|
|
|
|
return;
|
|
|
|
sg_mp->host_joined = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set the host_joined state of all of *,G's S,G entries */
|
|
|
|
static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = star_mp->br;
|
|
|
|
struct net_bridge_mdb_entry *sg_mp;
|
|
|
|
struct net_bridge_port_group *pg;
|
|
|
|
struct br_ip sg_ip;
|
|
|
|
|
|
|
|
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(&sg_ip, 0, sizeof(sg_ip));
|
|
|
|
sg_ip = star_mp->addr;
|
|
|
|
for (pg = mlock_dereference(star_mp->ports, br);
|
|
|
|
pg;
|
|
|
|
pg = mlock_dereference(pg->next, br)) {
|
|
|
|
struct net_bridge_group_src *src_ent;
|
|
|
|
|
|
|
|
hlist_for_each_entry(src_ent, &pg->src_list, node) {
|
|
|
|
if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
|
|
|
|
continue;
|
|
|
|
sg_ip.src = src_ent->addr.src;
|
|
|
|
sg_mp = br_mdb_ip_get(br, &sg_ip);
|
|
|
|
if (!sg_mp)
|
|
|
|
continue;
|
|
|
|
sg_mp->host_joined = star_mp->host_joined;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
|
|
|
|
{
|
|
|
|
struct net_bridge_port_group __rcu **pp;
|
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
|
|
|
|
/* *,G exclude ports are only added to S,G entries */
|
|
|
|
if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
|
|
|
|
* we should ignore perm entries since they're managed by user-space
|
|
|
|
*/
|
|
|
|
for (pp = &sgmp->ports;
|
|
|
|
(p = mlock_dereference(*pp, sgmp->br)) != NULL;
|
|
|
|
pp = &p->next)
|
|
|
|
if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
|
|
|
|
MDB_PG_FLAGS_PERMANENT)))
|
|
|
|
return;
|
|
|
|
|
2020-09-22 07:30:26 +00:00
|
|
|
/* currently the host can only have joined the *,G which means
|
|
|
|
* we treat it as EXCLUDE {}, so for an S,G it's considered a
|
|
|
|
* STAR_EXCLUDE entry and we can safely leave it
|
|
|
|
*/
|
|
|
|
sgmp->host_joined = false;
|
|
|
|
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
for (pp = &sgmp->ports;
|
|
|
|
(p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
|
|
|
|
if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
|
|
|
|
br_multicast_del_pg(sgmp, p, pp);
|
|
|
|
else
|
|
|
|
pp = &p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
|
|
|
|
struct net_bridge_port_group *sg)
|
|
|
|
{
|
|
|
|
struct net_bridge_port_group_sg_key sg_key;
|
|
|
|
struct net_bridge *br = star_mp->br;
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx;
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
struct net_bridge_port_group *pg;
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mcast *brmctx;
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
|
|
|
|
if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
|
|
|
|
return;
|
|
|
|
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
|
|
|
|
return;
|
|
|
|
|
2020-09-22 07:30:26 +00:00
|
|
|
br_multicast_sg_host_state(star_mp, sg);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
memset(&sg_key, 0, sizeof(sg_key));
|
|
|
|
sg_key.addr = sg->key.addr;
|
|
|
|
/* we need to add all exclude ports to the S,G */
|
|
|
|
for (pg = mlock_dereference(star_mp->ports, br);
|
|
|
|
pg;
|
|
|
|
pg = mlock_dereference(pg->next, br)) {
|
|
|
|
struct net_bridge_port_group *src_pg;
|
|
|
|
|
|
|
|
if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
sg_key.port = pg->key.port;
|
|
|
|
if (br_sg_port_find(br, &sg_key))
|
|
|
|
continue;
|
|
|
|
|
2021-07-19 17:06:30 +00:00
|
|
|
pmctx = br_multicast_pg_to_port_ctx(pg);
|
|
|
|
if (!pmctx)
|
|
|
|
continue;
|
|
|
|
brmctx = br_multicast_port_ctx_get_global(pmctx);
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
src_pg = __br_multicast_add_group(brmctx, pmctx,
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
&sg->key.addr,
|
|
|
|
sg->eth_addr,
|
2020-09-22 07:30:25 +00:00
|
|
|
MCAST_INCLUDE, false, false);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
if (IS_ERR_OR_NULL(src_pg) ||
|
|
|
|
src_pg->rt_protocol != RTPROT_KERNEL)
|
|
|
|
continue;
|
|
|
|
src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-22 07:30:23 +00:00
|
|
|
static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
|
|
|
|
{
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
struct net_bridge_mdb_entry *star_mp;
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx;
|
2020-09-22 07:30:23 +00:00
|
|
|
struct net_bridge_port_group *sg;
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mcast *brmctx;
|
2020-09-22 07:30:23 +00:00
|
|
|
struct br_ip sg_ip;
|
|
|
|
|
|
|
|
if (src->flags & BR_SGRP_F_INSTALLED)
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(&sg_ip, 0, sizeof(sg_ip));
|
2021-07-19 17:06:30 +00:00
|
|
|
pmctx = br_multicast_pg_to_port_ctx(src->pg);
|
|
|
|
if (!pmctx)
|
|
|
|
return;
|
2021-07-19 17:06:27 +00:00
|
|
|
brmctx = br_multicast_port_ctx_get_global(pmctx);
|
2020-09-22 07:30:23 +00:00
|
|
|
sg_ip = src->pg->key.addr;
|
|
|
|
sg_ip.src = src->addr.src;
|
2021-07-19 17:06:25 +00:00
|
|
|
|
|
|
|
sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
|
2020-09-22 07:30:25 +00:00
|
|
|
src->pg->eth_addr, MCAST_INCLUDE, false,
|
|
|
|
!timer_pending(&src->timer));
|
2020-09-22 07:30:23 +00:00
|
|
|
if (IS_ERR_OR_NULL(sg))
|
|
|
|
return;
|
|
|
|
src->flags |= BR_SGRP_F_INSTALLED;
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
|
2020-09-22 07:30:23 +00:00
|
|
|
|
|
|
|
/* if it was added by user-space as perm we can skip next steps */
|
|
|
|
if (sg->rt_protocol != RTPROT_KERNEL &&
|
|
|
|
(sg->flags & MDB_PG_FLAGS_PERMANENT))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* the kernel is now responsible for removing this S,G */
|
|
|
|
del_timer(&sg->timer);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
|
|
|
|
if (!star_mp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
br_multicast_sg_add_exclude_ports(star_mp, sg);
|
2020-09-22 07:30:23 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 14:52:03 +00:00
|
|
|
static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
|
|
|
|
bool fastleave)
|
2020-09-22 07:30:23 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_port_group *p, *pg = src->pg;
|
|
|
|
struct net_bridge_port_group __rcu **pp;
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct br_ip sg_ip;
|
|
|
|
|
|
|
|
memset(&sg_ip, 0, sizeof(sg_ip));
|
|
|
|
sg_ip = pg->key.addr;
|
|
|
|
sg_ip.src = src->addr.src;
|
|
|
|
|
|
|
|
mp = br_mdb_ip_get(src->br, &sg_ip);
|
|
|
|
if (!mp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, src->br)) != NULL;
|
|
|
|
pp = &p->next) {
|
|
|
|
if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (p->rt_protocol != RTPROT_KERNEL &&
|
bridge: mcast: Add a flag for user installed source entries
There are a few places where the bridge driver differentiates between
(S, G) entries installed by the kernel (in response to Membership
Reports) and those installed by user space. One of them is when deleting
an (S, G) entry corresponding to a source entry that is being deleted.
While user space cannot currently add a source entry to a (*, G), it can
add an (S, G) entry that later corresponds to a source entry created by
the reception of a Membership Report. If this source entry is later
deleted because its source timer expired or because the (*, G) entry is
being deleted, the bridge driver will not delete the corresponding (S,
G) entry if it was added by user space as permanent.
This is going to be a problem when the ability to install a (*, G) with
a source list is exposed to user space. In this case, when user space
installs the (*, G) as permanent, then all the (S, G) entries
corresponding to its source list will also be installed as permanent.
When user space deletes the (*, G), all the source entries will be
deleted and the expectation is that the corresponding (S, G) entries
will be deleted as well.
Solve this by introducing a new source entry flag denoting that the
entry was installed by user space. When the entry is deleted, delete the
corresponding (S, G) entry even if it was installed by user space as
permanent, as the flag tells us that it was installed in response to the
source entry being created.
The flag will be set in a subsequent patch where source entries are
created in response to user requests.
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-12-10 14:56:26 +00:00
|
|
|
(p->flags & MDB_PG_FLAGS_PERMANENT) &&
|
|
|
|
!(src->flags & BR_SGRP_F_USER_ADDED))
|
2020-09-22 07:30:23 +00:00
|
|
|
break;
|
|
|
|
|
2021-01-20 14:52:03 +00:00
|
|
|
if (fastleave)
|
|
|
|
p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
|
2020-09-22 07:30:23 +00:00
|
|
|
br_multicast_del_pg(mp, p, pp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
src->flags &= ~BR_SGRP_F_INSTALLED;
|
|
|
|
}
|
|
|
|
|
2020-09-22 07:30:25 +00:00
|
|
|
/* install S,G and based on src's timer enable or disable forwarding */
|
2020-09-22 07:30:23 +00:00
|
|
|
static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
|
|
|
|
{
|
2020-09-22 07:30:25 +00:00
|
|
|
struct net_bridge_port_group_sg_key sg_key;
|
|
|
|
struct net_bridge_port_group *sg;
|
|
|
|
u8 old_flags;
|
|
|
|
|
2020-09-22 07:30:23 +00:00
|
|
|
br_multicast_fwd_src_add(src);
|
2020-09-22 07:30:25 +00:00
|
|
|
|
|
|
|
memset(&sg_key, 0, sizeof(sg_key));
|
|
|
|
sg_key.addr = src->pg->key.addr;
|
|
|
|
sg_key.addr.src = src->addr.src;
|
|
|
|
sg_key.port = src->pg->key.port;
|
|
|
|
|
|
|
|
sg = br_sg_port_find(src->br, &sg_key);
|
|
|
|
if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
|
|
|
|
return;
|
|
|
|
|
|
|
|
old_flags = sg->flags;
|
|
|
|
if (timer_pending(&src->timer))
|
|
|
|
sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
|
|
|
|
else
|
|
|
|
sg->flags |= MDB_PG_FLAGS_BLOCKED;
|
|
|
|
|
|
|
|
if (old_flags != sg->flags) {
|
|
|
|
struct net_bridge_mdb_entry *sg_mp;
|
|
|
|
|
|
|
|
sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
|
|
|
|
if (!sg_mp)
|
|
|
|
return;
|
|
|
|
br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
|
|
|
|
}
|
2020-09-22 07:30:23 +00:00
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:19 +00:00
|
|
|
static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
|
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
|
|
|
|
mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
|
|
|
|
WARN_ON(!hlist_unhashed(&mp->mdb_node));
|
|
|
|
WARN_ON(mp->ports);
|
|
|
|
|
2022-12-20 18:45:19 +00:00
|
|
|
timer_shutdown_sync(&mp->timer);
|
2020-09-07 09:56:19 +00:00
|
|
|
kfree_rcu(mp, rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = mp->br;
|
|
|
|
|
|
|
|
rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
|
|
|
|
br_mdb_rht_params);
|
|
|
|
hlist_del_init_rcu(&mp->mdb_node);
|
|
|
|
hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
|
|
|
|
queue_work(system_long_wq, &br->mcast_gc_work);
|
|
|
|
}
|
|
|
|
|
2017-11-03 06:21:10 +00:00
|
|
|
static void br_multicast_group_expired(struct timer_list *t)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2017-11-03 06:21:10 +00:00
|
|
|
struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
|
2010-02-27 19:41:45 +00:00
|
|
|
struct net_bridge *br = mp->br;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
2020-09-22 07:30:23 +00:00
|
|
|
if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
|
|
|
|
timer_pending(&mp->timer))
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
|
|
|
|
2019-08-17 11:22:13 +00:00
|
|
|
br_multicast_host_leave(mp, true);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
if (mp->ports)
|
|
|
|
goto out;
|
2020-09-07 09:56:19 +00:00
|
|
|
br_multicast_del_mdb_entry(mp);
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2020-09-07 09:56:19 +00:00
|
|
|
static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
|
|
|
|
{
|
|
|
|
struct net_bridge_group_src *src;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2020-09-07 09:56:19 +00:00
|
|
|
src = container_of(gc, struct net_bridge_group_src, mcast_gc);
|
|
|
|
WARN_ON(!hlist_unhashed(&src->node));
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2022-12-20 18:45:19 +00:00
|
|
|
timer_shutdown_sync(&src->timer);
|
2020-09-07 09:56:19 +00:00
|
|
|
kfree_rcu(src, rcu);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2022-12-10 14:56:25 +00:00
|
|
|
void __br_multicast_del_group_src(struct net_bridge_group_src *src)
|
2020-09-07 09:56:07 +00:00
|
|
|
{
|
2020-09-22 07:30:22 +00:00
|
|
|
struct net_bridge *br = src->pg->key.port->br;
|
2020-09-07 09:56:07 +00:00
|
|
|
|
|
|
|
hlist_del_init_rcu(&src->node);
|
|
|
|
src->pg->src_ents--;
|
2020-09-07 09:56:19 +00:00
|
|
|
hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
|
|
|
|
queue_work(system_long_wq, &br->mcast_gc_work);
|
|
|
|
}
|
|
|
|
|
2022-12-10 14:56:25 +00:00
|
|
|
void br_multicast_del_group_src(struct net_bridge_group_src *src,
|
|
|
|
bool fastleave)
|
|
|
|
{
|
|
|
|
br_multicast_fwd_src_remove(src, fastleave);
|
|
|
|
__br_multicast_del_group_src(src);
|
|
|
|
}
|
|
|
|
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
static int
|
|
|
|
br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct netlink_ext_ack *extack,
|
|
|
|
const char *what)
|
|
|
|
{
|
|
|
|
u32 max = READ_ONCE(pmctx->mdb_max_entries);
|
|
|
|
u32 n = READ_ONCE(pmctx->mdb_n_entries);
|
|
|
|
|
|
|
|
if (max && n >= max) {
|
|
|
|
NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u",
|
|
|
|
what, n, max);
|
|
|
|
return -E2BIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
WRITE_ONCE(pmctx->mdb_n_entries, n + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx)
|
|
|
|
{
|
|
|
|
u32 n = READ_ONCE(pmctx->mdb_n_entries);
|
|
|
|
|
|
|
|
WARN_ON_ONCE(n == 0);
|
|
|
|
WRITE_ONCE(pmctx->mdb_n_entries, n - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int br_multicast_port_ngroups_inc(struct net_bridge_port *port,
|
|
|
|
const struct br_ip *group,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct net_bridge_mcast_port *pmctx;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
lockdep_assert_held_once(&port->br->multicast_lock);
|
|
|
|
|
|
|
|
/* Always count on the port context. */
|
|
|
|
err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack,
|
|
|
|
"Port");
|
|
|
|
if (err) {
|
|
|
|
trace_br_mdb_full(port->dev, group);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only count on the VLAN context if VID is given, and if snooping on
|
|
|
|
* that VLAN is enabled.
|
|
|
|
*/
|
|
|
|
if (!group->vid)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid);
|
|
|
|
if (!pmctx)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN");
|
|
|
|
if (err) {
|
|
|
|
trace_br_mdb_full(port->dev, group);
|
|
|
|
goto dec_one_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dec_one_out:
|
|
|
|
br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid)
|
|
|
|
{
|
|
|
|
struct net_bridge_mcast_port *pmctx;
|
|
|
|
|
|
|
|
lockdep_assert_held_once(&port->br->multicast_lock);
|
|
|
|
|
|
|
|
if (vid) {
|
|
|
|
pmctx = br_multicast_port_vid_to_port_ctx(port, vid);
|
|
|
|
if (pmctx)
|
|
|
|
br_multicast_port_ngroups_dec_one(pmctx);
|
|
|
|
}
|
|
|
|
br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
|
|
|
|
}
|
|
|
|
|
2023-02-02 17:59:26 +00:00
|
|
|
u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx)
|
|
|
|
{
|
|
|
|
return READ_ONCE(pmctx->mdb_n_entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max)
|
|
|
|
{
|
|
|
|
WRITE_ONCE(pmctx->mdb_max_entries, max);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx)
|
|
|
|
{
|
|
|
|
return READ_ONCE(pmctx->mdb_max_entries);
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:19 +00:00
|
|
|
static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
|
|
|
|
{
|
|
|
|
struct net_bridge_port_group *pg;
|
|
|
|
|
|
|
|
pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
|
|
|
|
WARN_ON(!hlist_unhashed(&pg->mglist));
|
|
|
|
WARN_ON(!hlist_empty(&pg->src_list));
|
|
|
|
|
2022-12-20 18:45:19 +00:00
|
|
|
timer_shutdown_sync(&pg->rexmit_timer);
|
|
|
|
timer_shutdown_sync(&pg->timer);
|
2020-09-07 09:56:19 +00:00
|
|
|
kfree_rcu(pg, rcu);
|
2020-09-07 09:56:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:06 +00:00
|
|
|
void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
|
|
|
|
struct net_bridge_port_group *pg,
|
|
|
|
struct net_bridge_port_group __rcu **pp)
|
|
|
|
{
|
2020-09-22 07:30:22 +00:00
|
|
|
struct net_bridge *br = pg->key.port->br;
|
2020-09-07 09:56:07 +00:00
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
struct hlist_node *tmp;
|
2020-09-07 09:56:06 +00:00
|
|
|
|
|
|
|
rcu_assign_pointer(*pp, pg->next);
|
|
|
|
hlist_del_init(&pg->mglist);
|
2021-01-20 14:51:56 +00:00
|
|
|
br_multicast_eht_clean_sets(pg);
|
2020-09-07 09:56:07 +00:00
|
|
|
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
|
2021-01-20 14:52:03 +00:00
|
|
|
br_multicast_del_group_src(ent, false);
|
2020-09-07 09:56:12 +00:00
|
|
|
br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
|
2020-09-25 10:25:49 +00:00
|
|
|
if (!br_multicast_is_star_g(&mp->addr)) {
|
|
|
|
rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
|
|
|
|
br_sg_port_rht_params);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
br_multicast_sg_del_exclude_ports(mp);
|
2020-09-25 10:25:49 +00:00
|
|
|
} else {
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
|
2020-09-25 10:25:49 +00:00
|
|
|
}
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid);
|
2020-09-07 09:56:19 +00:00
|
|
|
hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
|
|
|
|
queue_work(system_long_wq, &br->mcast_gc_work);
|
2020-09-07 09:56:06 +00:00
|
|
|
|
|
|
|
if (!mp->ports && !mp->host_joined && netif_running(br->dev))
|
|
|
|
mod_timer(&mp->timer, jiffies);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_multicast_find_del_pg(struct net_bridge *br,
|
|
|
|
struct net_bridge_port_group *pg)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2020-09-07 09:56:07 +00:00
|
|
|
struct net_bridge_port_group __rcu **pp;
|
2010-02-27 19:41:45 +00:00
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct net_bridge_port_group *p;
|
2010-11-15 06:38:10 +00:00
|
|
|
|
2020-09-22 07:30:22 +00:00
|
|
|
mp = br_mdb_ip_get(br, &pg->key.addr);
|
2010-02-27 19:41:45 +00:00
|
|
|
if (WARN_ON(!mp))
|
|
|
|
return;
|
|
|
|
|
2010-11-15 06:38:10 +00:00
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, br)) != NULL;
|
|
|
|
pp = &p->next) {
|
2010-02-27 19:41:45 +00:00
|
|
|
if (p != pg)
|
|
|
|
continue;
|
|
|
|
|
2020-09-07 09:56:06 +00:00
|
|
|
br_multicast_del_pg(mp, pg, pp);
|
2010-02-27 19:41:45 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
|
2017-11-03 06:21:10 +00:00
|
|
|
static void br_multicast_port_group_expired(struct timer_list *t)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2017-11-03 06:21:10 +00:00
|
|
|
struct net_bridge_port_group *pg = from_timer(pg, t, timer);
|
2020-09-07 09:56:13 +00:00
|
|
|
struct net_bridge_group_src *src_ent;
|
2020-09-22 07:30:22 +00:00
|
|
|
struct net_bridge *br = pg->key.port->br;
|
2020-09-07 09:56:13 +00:00
|
|
|
struct hlist_node *tmp;
|
|
|
|
bool changed;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
|
2016-02-03 08:57:05 +00:00
|
|
|
hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
|
|
|
|
2020-09-07 09:56:13 +00:00
|
|
|
changed = !!(pg->filter_mode == MCAST_EXCLUDE);
|
|
|
|
pg->filter_mode = MCAST_INCLUDE;
|
|
|
|
hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
|
|
|
|
if (!timer_pending(&src_ent->timer)) {
|
2021-01-20 14:52:03 +00:00
|
|
|
br_multicast_del_group_src(src_ent, false);
|
2020-09-07 09:56:13 +00:00
|
|
|
changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hlist_empty(&pg->src_list)) {
|
|
|
|
br_multicast_find_del_pg(br, pg);
|
|
|
|
} else if (changed) {
|
2020-09-22 07:30:22 +00:00
|
|
|
struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
if (changed && br_multicast_is_star_g(&pg->key.addr))
|
|
|
|
br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
|
|
|
|
|
2020-09-07 09:56:13 +00:00
|
|
|
if (WARN_ON(!mp))
|
|
|
|
goto out;
|
|
|
|
br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
|
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:19 +00:00
|
|
|
static void br_multicast_gc(struct hlist_head *head)
|
|
|
|
{
|
|
|
|
struct net_bridge_mcast_gc *gcent;
|
|
|
|
struct hlist_node *tmp;
|
|
|
|
|
|
|
|
hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
|
|
|
|
hlist_del_init(&gcent->gc_node);
|
|
|
|
gcent->destroy(gcent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:32 +00:00
|
|
|
static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct net_bridge_vlan *vlan = NULL;
|
|
|
|
|
|
|
|
if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
|
|
|
|
vlan = pmctx->vlan;
|
|
|
|
else if (br_multicast_ctx_is_vlan(brmctx))
|
|
|
|
vlan = brmctx->vlan;
|
|
|
|
|
|
|
|
if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
|
|
|
|
u16 vlan_proto;
|
|
|
|
|
|
|
|
if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
|
|
|
|
return;
|
|
|
|
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
|
2021-07-19 17:06:32 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2020-09-07 09:56:09 +00:00
|
|
|
struct net_bridge_port_group *pg,
|
|
|
|
__be32 ip_dst, __be32 group,
|
|
|
|
bool with_srcs, bool over_lmqt,
|
2020-09-07 09:56:10 +00:00
|
|
|
u8 sflag, u8 *igmp_type,
|
|
|
|
bool *need_rexmit)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2020-09-22 07:30:22 +00:00
|
|
|
struct net_bridge_port *p = pg ? pg->key.port : NULL;
|
2020-09-07 09:56:09 +00:00
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
size_t pkt_size, igmp_hdr_size;
|
|
|
|
unsigned long now = jiffies;
|
2016-11-21 12:03:24 +00:00
|
|
|
struct igmpv3_query *ihv3;
|
2020-09-07 09:56:09 +00:00
|
|
|
void *csum_start = NULL;
|
|
|
|
__sum16 *csum = NULL;
|
2010-02-27 19:41:45 +00:00
|
|
|
struct sk_buff *skb;
|
|
|
|
struct igmphdr *ih;
|
|
|
|
struct ethhdr *eth;
|
2020-09-07 09:56:09 +00:00
|
|
|
unsigned long lmqt;
|
2010-02-27 19:41:45 +00:00
|
|
|
struct iphdr *iph;
|
2020-09-07 09:56:09 +00:00
|
|
|
u16 lmqt_srcs = 0;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2016-11-21 12:03:24 +00:00
|
|
|
igmp_hdr_size = sizeof(*ih);
|
2021-07-19 17:06:24 +00:00
|
|
|
if (brmctx->multicast_igmp_version == 3) {
|
2016-11-21 12:03:24 +00:00
|
|
|
igmp_hdr_size = sizeof(*ihv3);
|
2020-09-07 09:56:09 +00:00
|
|
|
if (pg && with_srcs) {
|
2021-07-19 17:06:24 +00:00
|
|
|
lmqt = now + (brmctx->multicast_last_member_interval *
|
|
|
|
brmctx->multicast_last_member_count);
|
2020-09-07 09:56:09 +00:00
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node) {
|
|
|
|
if (over_lmqt == time_after(ent->timer.expires,
|
|
|
|
lmqt) &&
|
|
|
|
ent->src_query_rexmit_cnt > 0)
|
|
|
|
lmqt_srcs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!lmqt_srcs)
|
|
|
|
return NULL;
|
|
|
|
igmp_hdr_size += lmqt_srcs * sizeof(__be32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
|
|
|
|
if ((p && pkt_size > p->dev->mtu) ||
|
2021-07-19 17:06:25 +00:00
|
|
|
pkt_size > brmctx->br->dev->mtu)
|
2020-09-07 09:56:09 +00:00
|
|
|
return NULL;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
|
2010-02-27 19:41:45 +00:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:32 +00:00
|
|
|
__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
|
2010-02-27 19:41:45 +00:00
|
|
|
skb->protocol = htons(ETH_P_IP);
|
|
|
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
eth = eth_hdr(skb);
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
|
2020-09-07 09:56:09 +00:00
|
|
|
ip_eth_mc_map(ip_dst, eth->h_dest);
|
2010-02-27 19:41:45 +00:00
|
|
|
eth->h_proto = htons(ETH_P_IP);
|
|
|
|
skb_put(skb, sizeof(*eth));
|
|
|
|
|
|
|
|
skb_set_network_header(skb, skb->len);
|
|
|
|
iph = ip_hdr(skb);
|
2020-09-07 09:56:09 +00:00
|
|
|
iph->tot_len = htons(pkt_size - sizeof(*eth));
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
iph->version = 4;
|
|
|
|
iph->ihl = 6;
|
|
|
|
iph->tos = 0xc0;
|
|
|
|
iph->id = 0;
|
|
|
|
iph->frag_off = htons(IP_DF);
|
|
|
|
iph->ttl = 1;
|
|
|
|
iph->protocol = IPPROTO_IGMP;
|
2021-07-19 17:06:25 +00:00
|
|
|
iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
|
|
|
|
inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
|
2020-09-07 09:56:09 +00:00
|
|
|
iph->daddr = ip_dst;
|
2010-02-27 19:41:45 +00:00
|
|
|
((u8 *)&iph[1])[0] = IPOPT_RA;
|
|
|
|
((u8 *)&iph[1])[1] = 4;
|
|
|
|
((u8 *)&iph[1])[2] = 0;
|
|
|
|
((u8 *)&iph[1])[3] = 0;
|
|
|
|
ip_send_check(iph);
|
|
|
|
skb_put(skb, 24);
|
|
|
|
|
|
|
|
skb_set_transport_header(skb, skb->len);
|
2016-06-28 14:57:06 +00:00
|
|
|
*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
switch (brmctx->multicast_igmp_version) {
|
2016-11-21 12:03:24 +00:00
|
|
|
case 2:
|
|
|
|
ih = igmp_hdr(skb);
|
|
|
|
ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
|
2021-07-19 17:06:24 +00:00
|
|
|
ih->code = (group ? brmctx->multicast_last_member_interval :
|
|
|
|
brmctx->multicast_query_response_interval) /
|
2016-11-21 12:03:24 +00:00
|
|
|
(HZ / IGMP_TIMER_SCALE);
|
|
|
|
ih->group = group;
|
|
|
|
ih->csum = 0;
|
2020-09-07 09:56:09 +00:00
|
|
|
csum = &ih->csum;
|
|
|
|
csum_start = (void *)ih;
|
2016-11-21 12:03:24 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
ihv3 = igmpv3_query_hdr(skb);
|
|
|
|
ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
|
2021-07-19 17:06:24 +00:00
|
|
|
ihv3->code = (group ? brmctx->multicast_last_member_interval :
|
|
|
|
brmctx->multicast_query_response_interval) /
|
2016-11-21 12:03:24 +00:00
|
|
|
(HZ / IGMP_TIMER_SCALE);
|
|
|
|
ihv3->group = group;
|
2021-07-19 17:06:24 +00:00
|
|
|
ihv3->qqic = brmctx->multicast_query_interval / HZ;
|
2020-09-07 09:56:09 +00:00
|
|
|
ihv3->nsrcs = htons(lmqt_srcs);
|
2016-11-21 12:03:24 +00:00
|
|
|
ihv3->resv = 0;
|
2020-09-07 09:56:09 +00:00
|
|
|
ihv3->suppress = sflag;
|
2016-11-21 12:03:24 +00:00
|
|
|
ihv3->qrv = 2;
|
|
|
|
ihv3->csum = 0;
|
2020-09-07 09:56:09 +00:00
|
|
|
csum = &ihv3->csum;
|
|
|
|
csum_start = (void *)ihv3;
|
|
|
|
if (!pg || !with_srcs)
|
|
|
|
break;
|
|
|
|
|
|
|
|
lmqt_srcs = 0;
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node) {
|
|
|
|
if (over_lmqt == time_after(ent->timer.expires,
|
|
|
|
lmqt) &&
|
|
|
|
ent->src_query_rexmit_cnt > 0) {
|
2020-09-22 07:30:16 +00:00
|
|
|
ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
|
2020-09-07 09:56:09 +00:00
|
|
|
ent->src_query_rexmit_cnt--;
|
2020-09-07 09:56:10 +00:00
|
|
|
if (need_rexmit && ent->src_query_rexmit_cnt)
|
|
|
|
*need_rexmit = true;
|
2020-09-07 09:56:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-11-21 12:03:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:09 +00:00
|
|
|
if (WARN_ON(!csum || !csum_start)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*csum = ip_compute_csum(csum_start, igmp_hdr_size);
|
2016-11-21 12:03:24 +00:00
|
|
|
skb_put(skb, igmp_hdr_size);
|
2010-02-27 19:41:45 +00:00
|
|
|
__skb_pull(skb, sizeof(*eth));
|
|
|
|
|
|
|
|
out:
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
|
2021-07-19 17:06:32 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2020-09-07 09:56:09 +00:00
|
|
|
struct net_bridge_port_group *pg,
|
|
|
|
const struct in6_addr *ip6_dst,
|
|
|
|
const struct in6_addr *group,
|
|
|
|
bool with_srcs, bool over_llqt,
|
2020-09-07 09:56:10 +00:00
|
|
|
u8 sflag, u8 *igmp_type,
|
|
|
|
bool *need_rexmit)
|
2010-04-22 16:54:22 +00:00
|
|
|
{
|
2020-09-22 07:30:22 +00:00
|
|
|
struct net_bridge_port *p = pg ? pg->key.port : NULL;
|
2020-09-07 09:56:09 +00:00
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
size_t pkt_size, mld_hdr_size;
|
|
|
|
unsigned long now = jiffies;
|
2016-11-21 12:03:25 +00:00
|
|
|
struct mld2_query *mld2q;
|
2020-09-07 09:56:09 +00:00
|
|
|
void *csum_start = NULL;
|
2016-11-21 12:03:25 +00:00
|
|
|
unsigned long interval;
|
2020-09-07 09:56:09 +00:00
|
|
|
__sum16 *csum = NULL;
|
2010-04-22 16:54:22 +00:00
|
|
|
struct ipv6hdr *ip6h;
|
|
|
|
struct mld_msg *mldq;
|
2016-11-21 12:03:25 +00:00
|
|
|
struct sk_buff *skb;
|
2020-09-07 09:56:09 +00:00
|
|
|
unsigned long llqt;
|
2010-04-22 16:54:22 +00:00
|
|
|
struct ethhdr *eth;
|
2020-09-07 09:56:09 +00:00
|
|
|
u16 llqt_srcs = 0;
|
2010-04-22 16:54:22 +00:00
|
|
|
u8 *hopopt;
|
|
|
|
|
2016-11-21 12:03:25 +00:00
|
|
|
mld_hdr_size = sizeof(*mldq);
|
2021-07-19 17:06:24 +00:00
|
|
|
if (brmctx->multicast_mld_version == 2) {
|
2016-11-21 12:03:25 +00:00
|
|
|
mld_hdr_size = sizeof(*mld2q);
|
2020-09-07 09:56:09 +00:00
|
|
|
if (pg && with_srcs) {
|
2021-07-19 17:06:24 +00:00
|
|
|
llqt = now + (brmctx->multicast_last_member_interval *
|
|
|
|
brmctx->multicast_last_member_count);
|
2020-09-07 09:56:09 +00:00
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node) {
|
|
|
|
if (over_llqt == time_after(ent->timer.expires,
|
|
|
|
llqt) &&
|
|
|
|
ent->src_query_rexmit_cnt > 0)
|
|
|
|
llqt_srcs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!llqt_srcs)
|
|
|
|
return NULL;
|
|
|
|
mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
|
|
|
|
if ((p && pkt_size > p->dev->mtu) ||
|
2021-07-19 17:06:25 +00:00
|
|
|
pkt_size > brmctx->br->dev->mtu)
|
2020-09-07 09:56:09 +00:00
|
|
|
return NULL;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
|
2010-04-22 16:54:22 +00:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:32 +00:00
|
|
|
__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
|
2010-04-22 16:54:22 +00:00
|
|
|
skb->protocol = htons(ETH_P_IPV6);
|
|
|
|
|
|
|
|
/* Ethernet header */
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
eth = eth_hdr(skb);
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
|
2010-04-22 16:54:22 +00:00
|
|
|
eth->h_proto = htons(ETH_P_IPV6);
|
|
|
|
skb_put(skb, sizeof(*eth));
|
|
|
|
|
|
|
|
/* IPv6 header + HbH option */
|
|
|
|
skb_set_network_header(skb, skb->len);
|
|
|
|
ip6h = ipv6_hdr(skb);
|
|
|
|
|
|
|
|
*(__force __be32 *)ip6h = htonl(0x60000000);
|
2016-11-21 12:03:25 +00:00
|
|
|
ip6h->payload_len = htons(8 + mld_hdr_size);
|
2010-04-22 16:54:22 +00:00
|
|
|
ip6h->nexthdr = IPPROTO_HOPOPTS;
|
|
|
|
ip6h->hop_limit = 1;
|
2020-09-07 09:56:09 +00:00
|
|
|
ip6h->daddr = *ip6_dst;
|
2021-07-19 17:06:25 +00:00
|
|
|
if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
|
|
|
|
&ip6h->daddr, 0, &ip6h->saddr)) {
|
2012-03-05 04:52:44 +00:00
|
|
|
kfree_skb(skb);
|
2021-07-19 17:06:25 +00:00
|
|
|
br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
|
2012-03-05 04:52:44 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2016-06-24 10:35:18 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
|
2011-02-17 08:17:51 +00:00
|
|
|
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
|
2010-04-22 16:54:22 +00:00
|
|
|
|
|
|
|
hopopt = (u8 *)(ip6h + 1);
|
|
|
|
hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
|
|
|
|
hopopt[1] = 0; /* length of HbH */
|
|
|
|
hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
|
|
|
|
hopopt[3] = 2; /* Length of RA Option */
|
|
|
|
hopopt[4] = 0; /* Type = 0x0000 (MLD) */
|
|
|
|
hopopt[5] = 0;
|
2012-05-17 06:00:25 +00:00
|
|
|
hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
|
|
|
|
hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
|
2010-04-22 16:54:22 +00:00
|
|
|
|
|
|
|
skb_put(skb, sizeof(*ip6h) + 8);
|
|
|
|
|
|
|
|
/* ICMPv6 */
|
|
|
|
skb_set_transport_header(skb, skb->len);
|
2020-09-07 09:56:09 +00:00
|
|
|
interval = ipv6_addr_any(group) ?
|
2021-07-19 17:06:24 +00:00
|
|
|
brmctx->multicast_query_response_interval :
|
|
|
|
brmctx->multicast_last_member_interval;
|
2016-06-28 14:57:06 +00:00
|
|
|
*igmp_type = ICMPV6_MGM_QUERY;
|
2021-07-19 17:06:24 +00:00
|
|
|
switch (brmctx->multicast_mld_version) {
|
2016-11-21 12:03:25 +00:00
|
|
|
case 1:
|
|
|
|
mldq = (struct mld_msg *)icmp6_hdr(skb);
|
|
|
|
mldq->mld_type = ICMPV6_MGM_QUERY;
|
|
|
|
mldq->mld_code = 0;
|
|
|
|
mldq->mld_cksum = 0;
|
|
|
|
mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
|
|
|
|
mldq->mld_reserved = 0;
|
2020-09-07 09:56:09 +00:00
|
|
|
mldq->mld_mca = *group;
|
|
|
|
csum = &mldq->mld_cksum;
|
|
|
|
csum_start = (void *)mldq;
|
2016-11-21 12:03:25 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
mld2q = (struct mld2_query *)icmp6_hdr(skb);
|
2017-01-16 23:11:35 +00:00
|
|
|
mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
|
2016-11-21 12:03:25 +00:00
|
|
|
mld2q->mld2q_type = ICMPV6_MGM_QUERY;
|
|
|
|
mld2q->mld2q_code = 0;
|
|
|
|
mld2q->mld2q_cksum = 0;
|
|
|
|
mld2q->mld2q_resv1 = 0;
|
|
|
|
mld2q->mld2q_resv2 = 0;
|
2020-09-07 09:56:09 +00:00
|
|
|
mld2q->mld2q_suppress = sflag;
|
2016-11-21 12:03:25 +00:00
|
|
|
mld2q->mld2q_qrv = 2;
|
2020-09-07 09:56:09 +00:00
|
|
|
mld2q->mld2q_nsrcs = htons(llqt_srcs);
|
2021-07-19 17:06:24 +00:00
|
|
|
mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
|
2020-09-07 09:56:09 +00:00
|
|
|
mld2q->mld2q_mca = *group;
|
|
|
|
csum = &mld2q->mld2q_cksum;
|
|
|
|
csum_start = (void *)mld2q;
|
|
|
|
if (!pg || !with_srcs)
|
|
|
|
break;
|
|
|
|
|
|
|
|
llqt_srcs = 0;
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node) {
|
|
|
|
if (over_llqt == time_after(ent->timer.expires,
|
|
|
|
llqt) &&
|
|
|
|
ent->src_query_rexmit_cnt > 0) {
|
2020-09-22 07:30:16 +00:00
|
|
|
mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
|
2020-09-07 09:56:09 +00:00
|
|
|
ent->src_query_rexmit_cnt--;
|
2020-09-07 09:56:10 +00:00
|
|
|
if (need_rexmit && ent->src_query_rexmit_cnt)
|
|
|
|
*need_rexmit = true;
|
2020-09-07 09:56:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-11-21 12:03:25 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2020-09-07 09:56:09 +00:00
|
|
|
if (WARN_ON(!csum || !csum_start)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
|
|
|
|
IPPROTO_ICMPV6,
|
|
|
|
csum_partial(csum_start, mld_hdr_size, 0));
|
|
|
|
skb_put(skb, mld_hdr_size);
|
2010-04-22 16:54:22 +00:00
|
|
|
__skb_pull(skb, sizeof(*eth));
|
|
|
|
|
|
|
|
out:
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
|
2021-07-19 17:06:32 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2020-09-07 09:56:09 +00:00
|
|
|
struct net_bridge_port_group *pg,
|
|
|
|
struct br_ip *ip_dst,
|
|
|
|
struct br_ip *group,
|
|
|
|
bool with_srcs, bool over_lmqt,
|
2020-09-07 09:56:10 +00:00
|
|
|
u8 sflag, u8 *igmp_type,
|
|
|
|
bool *need_rexmit)
|
2010-04-18 03:42:07 +00:00
|
|
|
{
|
2020-09-07 09:56:09 +00:00
|
|
|
__be32 ip4_dst;
|
|
|
|
|
|
|
|
switch (group->proto) {
|
2010-04-18 03:42:07 +00:00
|
|
|
case htons(ETH_P_IP):
|
2020-09-22 07:30:17 +00:00
|
|
|
ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
|
2021-07-19 17:06:32 +00:00
|
|
|
return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
|
2020-09-22 07:30:17 +00:00
|
|
|
ip4_dst, group->dst.ip4,
|
2020-09-07 09:56:09 +00:00
|
|
|
with_srcs, over_lmqt,
|
2020-09-07 09:56:10 +00:00
|
|
|
sflag, igmp_type,
|
|
|
|
need_rexmit);
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2020-09-07 09:56:09 +00:00
|
|
|
case htons(ETH_P_IPV6): {
|
|
|
|
struct in6_addr ip6_dst;
|
|
|
|
|
|
|
|
if (ip_dst)
|
2020-09-22 07:30:17 +00:00
|
|
|
ip6_dst = ip_dst->dst.ip6;
|
2020-09-07 09:56:09 +00:00
|
|
|
else
|
|
|
|
ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
|
|
|
|
htonl(1));
|
|
|
|
|
2021-07-19 17:06:32 +00:00
|
|
|
return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
|
2020-09-22 07:30:17 +00:00
|
|
|
&ip6_dst, &group->dst.ip6,
|
2020-09-07 09:56:09 +00:00
|
|
|
with_srcs, over_lmqt,
|
2020-09-07 09:56:10 +00:00
|
|
|
sflag, igmp_type,
|
|
|
|
need_rexmit);
|
2020-09-07 09:56:09 +00:00
|
|
|
}
|
2010-04-22 16:54:22 +00:00
|
|
|
#endif
|
2010-04-18 03:42:07 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-12-11 22:23:08 +00:00
|
|
|
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
|
2016-11-21 12:03:24 +00:00
|
|
|
struct br_ip *group)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
2010-12-10 03:18:04 +00:00
|
|
|
int err;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
mp = br_mdb_ip_get(br, group);
|
|
|
|
if (mp)
|
|
|
|
return mp;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
trace_br_mdb_full(br->dev, group);
|
2022-02-15 16:53:03 +00:00
|
|
|
br_mc_disabled_update(br->dev, false, NULL);
|
2018-12-05 13:14:24 +00:00
|
|
|
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
|
|
|
|
return ERR_PTR(-E2BIG);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
|
|
|
|
if (unlikely(!mp))
|
2010-12-10 03:18:04 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
mp->br = br;
|
2010-04-18 03:42:07 +00:00
|
|
|
mp->addr = *group;
|
2020-09-07 09:56:19 +00:00
|
|
|
mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
|
2017-11-03 06:21:10 +00:00
|
|
|
timer_setup(&mp->timer, br_multicast_group_expired, 0);
|
2018-12-05 13:14:24 +00:00
|
|
|
err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
|
|
|
|
br_mdb_rht_params);
|
|
|
|
if (err) {
|
|
|
|
kfree(mp);
|
|
|
|
mp = ERR_PTR(err);
|
|
|
|
} else {
|
|
|
|
hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
|
|
|
|
}
|
2013-07-20 03:07:16 +00:00
|
|
|
|
2010-02-27 19:41:45 +00:00
|
|
|
return mp;
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:07 +00:00
|
|
|
static void br_multicast_group_src_expired(struct timer_list *t)
|
|
|
|
{
|
|
|
|
struct net_bridge_group_src *src = from_timer(src, t, timer);
|
|
|
|
struct net_bridge_port_group *pg;
|
|
|
|
struct net_bridge *br = src->br;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
|
|
|
|
timer_pending(&src->timer))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
pg = src->pg;
|
|
|
|
if (pg->filter_mode == MCAST_INCLUDE) {
|
2021-01-20 14:52:03 +00:00
|
|
|
br_multicast_del_group_src(src, false);
|
2020-09-07 09:56:07 +00:00
|
|
|
if (!hlist_empty(&pg->src_list))
|
|
|
|
goto out;
|
|
|
|
br_multicast_find_del_pg(br, pg);
|
2020-09-22 07:30:25 +00:00
|
|
|
} else {
|
|
|
|
br_multicast_fwd_src_handle(src);
|
2020-09-07 09:56:07 +00:00
|
|
|
}
|
2020-09-22 07:30:25 +00:00
|
|
|
|
2020-09-07 09:56:07 +00:00
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2021-01-20 14:51:58 +00:00
|
|
|
struct net_bridge_group_src *
|
2020-09-07 09:56:07 +00:00
|
|
|
br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
|
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
|
|
|
|
switch (ip->proto) {
|
|
|
|
case htons(ETH_P_IP):
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
2020-09-22 07:30:16 +00:00
|
|
|
if (ip->src.ip4 == ent->addr.src.ip4)
|
2020-09-07 09:56:07 +00:00
|
|
|
return ent;
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
2020-09-22 07:30:16 +00:00
|
|
|
if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
|
2020-09-07 09:56:07 +00:00
|
|
|
return ent;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-12-10 14:56:24 +00:00
|
|
|
struct net_bridge_group_src *
|
2020-09-07 09:56:07 +00:00
|
|
|
br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
|
|
|
|
{
|
|
|
|
struct net_bridge_group_src *grp_src;
|
|
|
|
|
|
|
|
if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
switch (src_ip->proto) {
|
|
|
|
case htons(ETH_P_IP):
|
2020-09-22 07:30:16 +00:00
|
|
|
if (ipv4_is_zeronet(src_ip->src.ip4) ||
|
|
|
|
ipv4_is_multicast(src_ip->src.ip4))
|
2020-09-07 09:56:07 +00:00
|
|
|
return NULL;
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
case htons(ETH_P_IPV6):
|
2020-09-22 07:30:16 +00:00
|
|
|
if (ipv6_addr_any(&src_ip->src.ip6) ||
|
|
|
|
ipv6_addr_is_multicast(&src_ip->src.ip6))
|
2020-09-07 09:56:07 +00:00
|
|
|
return NULL;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
|
|
|
|
if (unlikely(!grp_src))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
grp_src->pg = pg;
|
2020-09-22 07:30:22 +00:00
|
|
|
grp_src->br = pg->key.port->br;
|
2020-09-07 09:56:07 +00:00
|
|
|
grp_src->addr = *src_ip;
|
2020-09-07 09:56:19 +00:00
|
|
|
grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
|
2020-09-07 09:56:07 +00:00
|
|
|
timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
|
|
|
|
|
|
|
|
hlist_add_head_rcu(&grp_src->node, &pg->src_list);
|
|
|
|
pg->src_ents++;
|
|
|
|
|
|
|
|
return grp_src;
|
|
|
|
}
|
|
|
|
|
2012-12-11 22:23:08 +00:00
|
|
|
struct net_bridge_port_group *br_multicast_new_port_group(
|
|
|
|
struct net_bridge_port *port,
|
2022-12-06 10:58:09 +00:00
|
|
|
const struct br_ip *group,
|
2012-12-14 22:09:51 +00:00
|
|
|
struct net_bridge_port_group __rcu *next,
|
2017-01-21 20:01:32 +00:00
|
|
|
unsigned char flags,
|
2020-09-07 09:56:07 +00:00
|
|
|
const unsigned char *src,
|
2020-09-22 07:30:21 +00:00
|
|
|
u8 filter_mode,
|
2023-02-02 17:59:20 +00:00
|
|
|
u8 rt_protocol,
|
|
|
|
struct netlink_ext_ack *extack)
|
2012-12-11 22:23:08 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_port_group *p;
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = br_multicast_port_ngroups_inc(port, group, extack);
|
|
|
|
if (err)
|
|
|
|
return NULL;
|
2012-12-11 22:23:08 +00:00
|
|
|
|
|
|
|
p = kzalloc(sizeof(*p), GFP_ATOMIC);
|
2023-02-02 17:59:21 +00:00
|
|
|
if (unlikely(!p)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
goto dec_out;
|
2023-02-02 17:59:21 +00:00
|
|
|
}
|
2012-12-11 22:23:08 +00:00
|
|
|
|
2020-09-22 07:30:22 +00:00
|
|
|
p->key.addr = *group;
|
|
|
|
p->key.port = port;
|
2016-02-03 08:57:05 +00:00
|
|
|
p->flags = flags;
|
2020-09-07 09:56:07 +00:00
|
|
|
p->filter_mode = filter_mode;
|
2020-09-22 07:30:21 +00:00
|
|
|
p->rt_protocol = rt_protocol;
|
2021-01-20 14:51:55 +00:00
|
|
|
p->eht_host_tree = RB_ROOT;
|
2021-01-20 14:51:56 +00:00
|
|
|
p->eht_set_tree = RB_ROOT;
|
2020-09-07 09:56:19 +00:00
|
|
|
p->mcast_gc.destroy = br_multicast_destroy_port_group;
|
2020-09-07 09:56:07 +00:00
|
|
|
INIT_HLIST_HEAD(&p->src_list);
|
2020-09-22 07:30:22 +00:00
|
|
|
|
|
|
|
if (!br_multicast_is_star_g(group) &&
|
|
|
|
rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
|
|
|
|
br_sg_port_rht_params)) {
|
2023-02-02 17:59:21 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group");
|
2023-02-02 17:59:23 +00:00
|
|
|
goto free_out;
|
2020-09-22 07:30:22 +00:00
|
|
|
}
|
|
|
|
|
2012-12-13 06:51:28 +00:00
|
|
|
rcu_assign_pointer(p->next, next);
|
2017-11-03 06:21:10 +00:00
|
|
|
timer_setup(&p->timer, br_multicast_port_group_expired, 0);
|
2020-09-07 09:56:10 +00:00
|
|
|
timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
|
|
|
|
hlist_add_head(&p->mglist, &port->mglist);
|
2017-01-21 20:01:32 +00:00
|
|
|
|
|
|
|
if (src)
|
|
|
|
memcpy(p->eth_addr, src, ETH_ALEN);
|
|
|
|
else
|
2019-03-20 02:06:57 +00:00
|
|
|
eth_broadcast_addr(p->eth_addr);
|
2017-01-21 20:01:32 +00:00
|
|
|
|
2012-12-11 22:23:08 +00:00
|
|
|
return p;
|
2023-02-02 17:59:23 +00:00
|
|
|
|
|
|
|
free_out:
|
|
|
|
kfree(p);
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
dec_out:
|
|
|
|
br_multicast_port_ngroups_dec(port, group->vid);
|
2023-02-02 17:59:23 +00:00
|
|
|
return NULL;
|
2012-12-11 22:23:08 +00:00
|
|
|
}
|
|
|
|
|
2023-02-02 17:59:22 +00:00
|
|
|
void br_multicast_del_port_group(struct net_bridge_port_group *p)
|
|
|
|
{
|
|
|
|
struct net_bridge_port *port = p->key.port;
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
__u16 vid = p->key.addr.vid;
|
2023-02-02 17:59:22 +00:00
|
|
|
|
|
|
|
hlist_del_init(&p->mglist);
|
|
|
|
if (!br_multicast_is_star_g(&p->key.addr))
|
|
|
|
rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode,
|
|
|
|
br_sg_port_rht_params);
|
|
|
|
kfree(p);
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
br_multicast_port_ngroups_dec(port, vid);
|
2023-02-02 17:59:22 +00:00
|
|
|
}
|
|
|
|
|
2021-07-21 14:01:27 +00:00
|
|
|
void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mdb_entry *mp, bool notify)
|
2019-08-17 11:22:13 +00:00
|
|
|
{
|
|
|
|
if (!mp->host_joined) {
|
|
|
|
mp->host_joined = true;
|
2020-09-22 07:30:26 +00:00
|
|
|
if (br_multicast_is_star_g(&mp->addr))
|
|
|
|
br_multicast_star_g_host_state(mp);
|
2019-08-17 11:22:13 +00:00
|
|
|
if (notify)
|
2020-09-07 09:56:12 +00:00
|
|
|
br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
|
2019-08-17 11:22:13 +00:00
|
|
|
}
|
2020-10-28 23:38:31 +00:00
|
|
|
|
|
|
|
if (br_group_is_l2(&mp->addr))
|
|
|
|
return;
|
|
|
|
|
2021-07-21 14:01:27 +00:00
|
|
|
mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
|
2019-08-17 11:22:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
|
|
|
|
{
|
|
|
|
if (!mp->host_joined)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mp->host_joined = false;
|
2020-09-22 07:30:26 +00:00
|
|
|
if (br_multicast_is_star_g(&mp->addr))
|
|
|
|
br_multicast_star_g_host_state(mp);
|
2019-08-17 11:22:13 +00:00
|
|
|
if (notify)
|
2020-09-07 09:56:12 +00:00
|
|
|
br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
|
2019-08-17 11:22:13 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 07:30:23 +00:00
|
|
|
static struct net_bridge_port_group *
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_add_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2020-09-22 07:30:23 +00:00
|
|
|
struct br_ip *group,
|
|
|
|
const unsigned char *src,
|
|
|
|
u8 filter_mode,
|
2020-09-22 07:30:25 +00:00
|
|
|
bool igmpv2_mldv1,
|
|
|
|
bool blocked)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2010-11-15 06:38:10 +00:00
|
|
|
struct net_bridge_port_group __rcu **pp;
|
2020-09-22 07:30:23 +00:00
|
|
|
struct net_bridge_port_group *p = NULL;
|
2016-11-21 12:03:24 +00:00
|
|
|
struct net_bridge_mdb_entry *mp;
|
2013-10-19 22:58:57 +00:00
|
|
|
unsigned long now = jiffies;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx))
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
mp = br_multicast_new_group(brmctx->br, group);
|
2010-12-10 03:18:04 +00:00
|
|
|
if (IS_ERR(mp))
|
2021-02-04 07:05:49 +00:00
|
|
|
return ERR_CAST(mp);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!pmctx) {
|
2021-07-21 14:01:27 +00:00
|
|
|
br_multicast_host_join(brmctx, mp, true);
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-11-15 06:38:10 +00:00
|
|
|
for (pp = &mp->ports;
|
2021-07-19 17:06:25 +00:00
|
|
|
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
|
2010-11-15 06:38:10 +00:00
|
|
|
pp = &p->next) {
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_port_group_equal(p, pmctx->port, src))
|
2013-10-19 22:58:57 +00:00
|
|
|
goto found;
|
2021-07-19 17:06:25 +00:00
|
|
|
if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
|
2010-02-27 19:41:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
|
2023-02-02 17:59:20 +00:00
|
|
|
filter_mode, RTPROT_KERNEL, NULL);
|
2020-09-22 07:30:23 +00:00
|
|
|
if (unlikely(!p)) {
|
|
|
|
p = ERR_PTR(-ENOMEM);
|
|
|
|
goto out;
|
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
rcu_assign_pointer(*pp, p);
|
2020-09-22 07:30:25 +00:00
|
|
|
if (blocked)
|
|
|
|
p->flags |= MDB_PG_FLAGS_BLOCKED;
|
2021-07-19 17:06:25 +00:00
|
|
|
br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2013-10-19 22:58:57 +00:00
|
|
|
found:
|
2020-09-07 09:56:14 +00:00
|
|
|
if (igmpv2_mldv1)
|
2021-07-19 17:06:24 +00:00
|
|
|
mod_timer(&p->timer,
|
2021-07-19 17:06:25 +00:00
|
|
|
now + brmctx->multicast_membership_interval);
|
2020-09-07 09:56:12 +00:00
|
|
|
|
2010-02-27 19:41:45 +00:00
|
|
|
out:
|
2020-09-22 07:30:23 +00:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2020-09-22 07:30:23 +00:00
|
|
|
struct br_ip *group,
|
|
|
|
const unsigned char *src,
|
|
|
|
u8 filter_mode,
|
|
|
|
bool igmpv2_mldv1)
|
|
|
|
{
|
|
|
|
struct net_bridge_port_group *pg;
|
|
|
|
int err;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
|
|
|
pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
|
2020-09-22 07:30:25 +00:00
|
|
|
igmpv2_mldv1, false);
|
2020-09-22 07:30:23 +00:00
|
|
|
/* NULL is considered valid for host joined groups */
|
2021-01-25 02:39:41 +00:00
|
|
|
err = PTR_ERR_OR_ZERO(pg);
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2020-09-22 07:30:23 +00:00
|
|
|
|
2010-02-27 19:41:45 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-02-13 12:00:17 +00:00
|
|
|
__be32 group,
|
2017-01-21 20:01:32 +00:00
|
|
|
__u16 vid,
|
2020-09-07 09:56:07 +00:00
|
|
|
const unsigned char *src,
|
|
|
|
bool igmpv2)
|
2010-04-18 03:42:07 +00:00
|
|
|
{
|
|
|
|
struct br_ip br_group;
|
2020-09-07 09:56:07 +00:00
|
|
|
u8 filter_mode;
|
2010-04-18 03:42:07 +00:00
|
|
|
|
|
|
|
if (ipv4_is_local_multicast(group))
|
|
|
|
return 0;
|
|
|
|
|
2019-04-03 20:27:24 +00:00
|
|
|
memset(&br_group, 0, sizeof(br_group));
|
2020-09-22 07:30:17 +00:00
|
|
|
br_group.dst.ip4 = group;
|
2010-04-18 03:42:07 +00:00
|
|
|
br_group.proto = htons(ETH_P_IP);
|
2013-02-13 12:00:17 +00:00
|
|
|
br_group.vid = vid;
|
2020-09-07 09:56:07 +00:00
|
|
|
filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
|
2010-04-18 03:42:07 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
return br_multicast_add_group(brmctx, pmctx, &br_group, src,
|
|
|
|
filter_mode, igmpv2);
|
2010-04-18 03:42:07 +00:00
|
|
|
}
|
|
|
|
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-02-13 12:00:17 +00:00
|
|
|
const struct in6_addr *group,
|
2017-01-21 20:01:32 +00:00
|
|
|
__u16 vid,
|
2020-09-07 09:56:07 +00:00
|
|
|
const unsigned char *src,
|
|
|
|
bool mldv1)
|
2010-04-22 16:54:22 +00:00
|
|
|
{
|
|
|
|
struct br_ip br_group;
|
2020-09-07 09:56:07 +00:00
|
|
|
u8 filter_mode;
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2013-09-04 00:13:39 +00:00
|
|
|
if (ipv6_addr_is_ll_all_nodes(group))
|
2010-04-22 16:54:22 +00:00
|
|
|
return 0;
|
|
|
|
|
2018-12-05 13:14:24 +00:00
|
|
|
memset(&br_group, 0, sizeof(br_group));
|
2020-09-22 07:30:17 +00:00
|
|
|
br_group.dst.ip6 = *group;
|
2011-02-15 13:19:17 +00:00
|
|
|
br_group.proto = htons(ETH_P_IPV6);
|
2013-02-13 12:00:17 +00:00
|
|
|
br_group.vid = vid;
|
2020-09-07 09:56:07 +00:00
|
|
|
filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
return br_multicast_add_group(brmctx, pmctx, &br_group, src,
|
|
|
|
filter_mode, mldv1);
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-05-13 13:20:50 +00:00
|
|
|
static bool br_multicast_rport_del(struct hlist_node *rlist)
|
|
|
|
{
|
|
|
|
if (hlist_unhashed(rlist))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
hlist_del_init_rcu(rlist);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
|
2021-05-13 13:20:50 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
return br_multicast_rport_del(&pmctx->ip4_rlist);
|
2021-05-13 13:20:50 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
|
2021-05-13 13:20:51 +00:00
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
return br_multicast_rport_del(&pmctx->ip6_rlist);
|
2021-05-13 13:20:51 +00:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:23 +00:00
|
|
|
static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
|
2021-05-13 13:20:48 +00:00
|
|
|
struct timer_list *t,
|
|
|
|
struct hlist_node *rlist)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge *br = pmctx->port->br;
|
2021-05-13 13:20:50 +00:00
|
|
|
bool del;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
2021-07-19 17:06:23 +00:00
|
|
|
if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
|
|
|
|
pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
|
2021-05-13 13:20:48 +00:00
|
|
|
timer_pending(t))
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
|
|
|
|
2021-05-13 13:20:50 +00:00
|
|
|
del = br_multicast_rport_del(rlist);
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_rport_del_notify(pmctx, del);
|
2010-02-27 19:41:45 +00:00
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:48 +00:00
|
|
|
static void br_ip4_multicast_router_expired(struct timer_list *t)
|
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
|
|
|
|
ip4_mc_router_timer);
|
2021-05-13 13:20:48 +00:00
|
|
|
|
2021-07-19 17:06:23 +00:00
|
|
|
br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
|
2021-05-13 13:20:48 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static void br_ip6_multicast_router_expired(struct timer_list *t)
|
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
|
|
|
|
ip6_mc_router_timer);
|
2021-05-13 13:20:51 +00:00
|
|
|
|
2021-07-19 17:06:23 +00:00
|
|
|
br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
|
2021-05-13 13:20:51 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-09 09:15:31 +00:00
|
|
|
static void br_mc_router_state_change(struct net_bridge *p,
|
|
|
|
bool is_mc_router)
|
|
|
|
{
|
|
|
|
struct switchdev_attr attr = {
|
|
|
|
.orig_dev = p->dev,
|
|
|
|
.id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
|
|
|
|
.flags = SWITCHDEV_F_DEFER,
|
|
|
|
.u.mrouter = is_mc_router,
|
|
|
|
};
|
|
|
|
|
2021-02-13 20:43:17 +00:00
|
|
|
switchdev_port_attr_set(p->dev, &attr, NULL);
|
2017-10-09 09:15:31 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
|
2021-05-13 13:20:48 +00:00
|
|
|
struct timer_list *timer)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
|
|
|
if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
|
|
|
|
brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
|
|
|
|
br_ip4_multicast_is_router(brmctx) ||
|
|
|
|
br_ip6_multicast_is_router(brmctx))
|
2017-10-09 09:15:31 +00:00
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
br_mc_router_state_change(brmctx->br, false);
|
2017-10-09 09:15:31 +00:00
|
|
|
out:
|
2021-07-19 17:06:24 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:48 +00:00
|
|
|
static void br_ip4_multicast_local_router_expired(struct timer_list *t)
|
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
|
|
|
|
ip4_mc_router_timer);
|
2021-05-13 13:20:48 +00:00
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
br_multicast_local_router_expired(brmctx, t);
|
2021-05-13 13:20:48 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static void br_ip6_multicast_local_router_expired(struct timer_list *t)
|
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
|
|
|
|
ip6_mc_router_timer);
|
2021-05-13 13:20:51 +00:00
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
br_multicast_local_router_expired(brmctx, t);
|
2021-05-13 13:20:51 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_own_query *query)
|
2012-04-13 02:37:42 +00:00
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
|
|
|
if (!netif_running(brmctx->br->dev) ||
|
2021-07-19 17:06:31 +00:00
|
|
|
br_multicast_ctx_vlan_global_disabled(brmctx) ||
|
2021-07-19 17:06:24 +00:00
|
|
|
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
|
2012-04-13 02:37:42 +00:00
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_start_querier(brmctx, query);
|
2012-04-13 02:37:42 +00:00
|
|
|
|
|
|
|
out:
|
2021-07-19 17:06:24 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2012-04-13 02:37:42 +00:00
|
|
|
}
|
|
|
|
|
2017-11-03 06:21:10 +00:00
|
|
|
static void br_ip4_multicast_querier_expired(struct timer_list *t)
|
2013-08-30 15:28:17 +00:00
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
|
|
|
|
ip4_other_query.timer);
|
2013-08-30 15:28:17 +00:00
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2017-11-03 06:21:10 +00:00
|
|
|
static void br_ip6_multicast_querier_expired(struct timer_list *t)
|
2013-08-30 15:28:17 +00:00
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
|
|
|
|
ip6_other_query.timer);
|
2013-08-30 15:28:17 +00:00
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
|
2014-06-07 16:26:27 +00:00
|
|
|
struct br_ip *ip,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (ip->proto == htons(ETH_P_IP))
|
2021-07-19 17:06:24 +00:00
|
|
|
brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
|
2014-06-07 16:26:27 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
2021-07-19 17:06:24 +00:00
|
|
|
brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
|
2014-06-07 16:26:27 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2020-09-07 09:56:09 +00:00
|
|
|
struct net_bridge_port_group *pg,
|
|
|
|
struct br_ip *ip_dst,
|
|
|
|
struct br_ip *group,
|
|
|
|
bool with_srcs,
|
2020-09-07 09:56:10 +00:00
|
|
|
u8 sflag,
|
|
|
|
bool *need_rexmit)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2020-09-07 09:56:09 +00:00
|
|
|
bool over_lmqt = !!sflag;
|
2010-02-27 19:41:45 +00:00
|
|
|
struct sk_buff *skb;
|
2016-06-28 14:57:06 +00:00
|
|
|
u8 igmp_type;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-08-10 15:29:29 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
|
|
|
|
!br_multicast_ctx_matches_vlan_snooping(brmctx))
|
2021-07-19 17:06:32 +00:00
|
|
|
return;
|
|
|
|
|
2020-09-07 09:56:09 +00:00
|
|
|
again_under_lmqt:
|
2021-07-19 17:06:32 +00:00
|
|
|
skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
|
|
|
|
with_srcs, over_lmqt, sflag, &igmp_type,
|
2020-09-07 09:56:10 +00:00
|
|
|
need_rexmit);
|
2010-02-27 19:41:45 +00:00
|
|
|
if (!skb)
|
2010-04-18 03:42:07 +00:00
|
|
|
return;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx) {
|
|
|
|
skb->dev = pmctx->port->dev;
|
|
|
|
br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
|
2016-06-28 14:57:06 +00:00
|
|
|
BR_MCAST_DIR_TX);
|
2015-09-16 01:04:16 +00:00
|
|
|
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
|
2021-07-19 17:06:25 +00:00
|
|
|
dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
|
2014-11-17 11:20:28 +00:00
|
|
|
br_dev_queue_push_xmit);
|
2020-09-07 09:56:09 +00:00
|
|
|
|
|
|
|
if (over_lmqt && with_srcs && sflag) {
|
|
|
|
over_lmqt = false;
|
|
|
|
goto again_under_lmqt;
|
|
|
|
}
|
2014-06-07 16:26:27 +00:00
|
|
|
} else {
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_select_own_querier(brmctx, group, skb);
|
|
|
|
br_multicast_count(brmctx->br, NULL, skb, igmp_type,
|
2016-06-28 14:57:06 +00:00
|
|
|
BR_MCAST_DIR_RX);
|
2010-02-27 19:41:45 +00:00
|
|
|
netif_rx(skb);
|
2014-06-07 16:26:27 +00:00
|
|
|
}
|
2010-04-18 03:42:07 +00:00
|
|
|
}
|
|
|
|
|
2021-08-13 14:59:58 +00:00
|
|
|
static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
|
|
|
|
struct bridge_mcast_querier *dest)
|
|
|
|
{
|
|
|
|
unsigned int seq;
|
|
|
|
|
|
|
|
memset(dest, 0, sizeof(*dest));
|
|
|
|
do {
|
|
|
|
seq = read_seqcount_begin(&querier->seq);
|
|
|
|
dest->port_ifidx = querier->port_ifidx;
|
|
|
|
memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
|
|
|
|
} while (read_seqcount_retry(&querier->seq, seq));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
|
|
|
|
struct bridge_mcast_querier *querier,
|
|
|
|
int ifindex,
|
|
|
|
struct br_ip *saddr)
|
|
|
|
{
|
|
|
|
write_seqcount_begin(&querier->seq);
|
|
|
|
querier->port_ifidx = ifindex;
|
|
|
|
memcpy(&querier->addr, saddr, sizeof(*saddr));
|
|
|
|
write_seqcount_end(&querier->seq);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_own_query *own_query)
|
2010-04-18 03:42:07 +00:00
|
|
|
{
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_other_query *other_query = NULL;
|
2021-08-13 14:59:58 +00:00
|
|
|
struct bridge_mcast_querier *querier;
|
2016-11-21 12:03:24 +00:00
|
|
|
struct br_ip br_group;
|
|
|
|
unsigned long time;
|
2010-04-18 03:42:07 +00:00
|
|
|
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
|
2021-07-19 17:06:25 +00:00
|
|
|
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
|
2021-08-10 15:29:28 +00:00
|
|
|
!brmctx->multicast_querier)
|
2010-04-18 03:42:07 +00:00
|
|
|
return;
|
|
|
|
|
2020-09-22 07:30:17 +00:00
|
|
|
memset(&br_group.dst, 0, sizeof(br_group.dst));
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx ? (own_query == &pmctx->ip4_own_query) :
|
|
|
|
(own_query == &brmctx->ip4_own_query)) {
|
2021-08-13 14:59:58 +00:00
|
|
|
querier = &brmctx->ip4_querier;
|
2021-07-19 17:06:24 +00:00
|
|
|
other_query = &brmctx->ip4_other_query;
|
2013-08-30 15:28:17 +00:00
|
|
|
br_group.proto = htons(ETH_P_IP);
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2013-08-30 15:28:17 +00:00
|
|
|
} else {
|
2021-08-13 14:59:58 +00:00
|
|
|
querier = &brmctx->ip6_querier;
|
2021-07-19 17:06:24 +00:00
|
|
|
other_query = &brmctx->ip6_other_query;
|
2013-08-30 15:28:17 +00:00
|
|
|
br_group.proto = htons(ETH_P_IPV6);
|
2010-04-22 16:54:22 +00:00
|
|
|
#endif
|
2013-08-30 15:28:17 +00:00
|
|
|
}
|
|
|
|
|
2014-06-07 16:26:26 +00:00
|
|
|
if (!other_query || timer_pending(&other_query->timer))
|
2013-08-30 15:28:17 +00:00
|
|
|
return;
|
|
|
|
|
2021-08-13 14:59:58 +00:00
|
|
|
/* we're about to select ourselves as querier */
|
|
|
|
if (!pmctx && querier->port_ifidx) {
|
|
|
|
struct br_ip zeroip = {};
|
|
|
|
|
|
|
|
br_multicast_update_querier(brmctx, querier, 0, &zeroip);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
|
|
|
|
0, NULL);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
time = jiffies;
|
2021-07-19 17:06:24 +00:00
|
|
|
time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
|
|
|
|
brmctx->multicast_startup_query_interval :
|
|
|
|
brmctx->multicast_query_interval;
|
2014-06-07 16:26:26 +00:00
|
|
|
mod_timer(&own_query->timer, time);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2014-06-07 16:26:26 +00:00
|
|
|
static void
|
2021-07-19 17:06:23 +00:00
|
|
|
br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_own_query *query)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge *br = pmctx->port->br;
|
2021-07-19 17:06:31 +00:00
|
|
|
struct net_bridge_mcast *brmctx;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
2021-07-19 17:06:31 +00:00
|
|
|
if (br_multicast_port_ctx_state_stopped(pmctx))
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
2021-07-19 17:06:32 +00:00
|
|
|
|
2021-07-19 17:06:31 +00:00
|
|
|
brmctx = br_multicast_port_ctx_get_global(pmctx);
|
|
|
|
if (query->startup_sent < brmctx->multicast_startup_query_count)
|
2013-08-30 15:28:17 +00:00
|
|
|
query->startup_sent++;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:31 +00:00
|
|
|
br_multicast_send_query(brmctx, pmctx, query);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2017-11-03 06:21:10 +00:00
|
|
|
static void br_ip4_multicast_port_query_expired(struct timer_list *t)
|
2013-08-30 15:28:17 +00:00
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
|
|
|
|
ip4_own_query.timer);
|
2013-08-30 15:28:17 +00:00
|
|
|
|
2021-07-19 17:06:23 +00:00
|
|
|
br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2017-11-03 06:21:10 +00:00
|
|
|
static void br_ip6_multicast_port_query_expired(struct timer_list *t)
|
2013-08-30 15:28:17 +00:00
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
|
|
|
|
ip6_own_query.timer);
|
2013-08-30 15:28:17 +00:00
|
|
|
|
2021-07-19 17:06:23 +00:00
|
|
|
br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-09-07 09:56:10 +00:00
|
|
|
static void br_multicast_port_group_rexmit(struct timer_list *t)
|
|
|
|
{
|
|
|
|
struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
|
|
|
|
struct bridge_mcast_other_query *other_query = NULL;
|
2020-09-22 07:30:22 +00:00
|
|
|
struct net_bridge *br = pg->key.port->br;
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx;
|
|
|
|
struct net_bridge_mcast *brmctx;
|
2020-09-07 09:56:10 +00:00
|
|
|
bool need_rexmit = false;
|
|
|
|
|
|
|
|
spin_lock(&br->multicast_lock);
|
|
|
|
if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
|
2021-08-10 15:29:28 +00:00
|
|
|
!br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
2020-09-07 09:56:10 +00:00
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:30 +00:00
|
|
|
pmctx = br_multicast_pg_to_port_ctx(pg);
|
|
|
|
if (!pmctx)
|
|
|
|
goto out;
|
|
|
|
brmctx = br_multicast_port_ctx_get_global(pmctx);
|
2021-08-10 15:29:28 +00:00
|
|
|
if (!brmctx->multicast_querier)
|
|
|
|
goto out;
|
|
|
|
|
2020-09-22 07:30:22 +00:00
|
|
|
if (pg->key.addr.proto == htons(ETH_P_IP))
|
2021-07-19 17:06:24 +00:00
|
|
|
other_query = &brmctx->ip4_other_query;
|
2020-09-07 09:56:10 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
2021-07-19 17:06:24 +00:00
|
|
|
other_query = &brmctx->ip6_other_query;
|
2020-09-07 09:56:10 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!other_query || timer_pending(&other_query->timer))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (pg->grp_query_rexmit_cnt) {
|
|
|
|
pg->grp_query_rexmit_cnt--;
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
|
2020-09-22 07:30:22 +00:00
|
|
|
&pg->key.addr, false, 1, NULL);
|
2020-09-07 09:56:10 +00:00
|
|
|
}
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
|
2020-09-22 07:30:22 +00:00
|
|
|
&pg->key.addr, true, 0, &need_rexmit);
|
2020-09-07 09:56:10 +00:00
|
|
|
|
|
|
|
if (pg->grp_query_rexmit_cnt || need_rexmit)
|
|
|
|
mod_timer(&pg->rexmit_timer, jiffies +
|
2021-07-19 17:06:24 +00:00
|
|
|
brmctx->multicast_last_member_interval);
|
2020-09-07 09:56:10 +00:00
|
|
|
out:
|
|
|
|
spin_unlock(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
|
2021-04-14 19:22:57 +00:00
|
|
|
static int br_mc_disabled_update(struct net_device *dev, bool value,
|
|
|
|
struct netlink_ext_ack *extack)
|
2017-02-09 13:54:40 +00:00
|
|
|
{
|
|
|
|
struct switchdev_attr attr = {
|
|
|
|
.orig_dev = dev,
|
|
|
|
.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
|
|
|
|
.flags = SWITCHDEV_F_DEFER,
|
2018-09-26 14:01:03 +00:00
|
|
|
.u.mc_disabled = !value,
|
2017-02-09 13:54:40 +00:00
|
|
|
};
|
|
|
|
|
2021-04-14 19:22:57 +00:00
|
|
|
return switchdev_port_attr_set(dev, &attr, extack);
|
2017-02-09 13:54:40 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:26 +00:00
|
|
|
void br_multicast_port_ctx_init(struct net_bridge_port *port,
|
|
|
|
struct net_bridge_vlan *vlan,
|
|
|
|
struct net_bridge_mcast_port *pmctx)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
pmctx->port = port;
|
2021-07-19 17:06:26 +00:00
|
|
|
pmctx->vlan = vlan;
|
2021-07-19 17:06:23 +00:00
|
|
|
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
|
|
|
|
timer_setup(&pmctx->ip4_mc_router_timer,
|
2021-05-13 13:20:48 +00:00
|
|
|
br_ip4_multicast_router_expired, 0);
|
2021-07-19 17:06:23 +00:00
|
|
|
timer_setup(&pmctx->ip4_own_query.timer,
|
2017-11-03 06:21:10 +00:00
|
|
|
br_ip4_multicast_port_query_expired, 0);
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:23 +00:00
|
|
|
timer_setup(&pmctx->ip6_mc_router_timer,
|
2021-05-13 13:20:51 +00:00
|
|
|
br_ip6_multicast_router_expired, 0);
|
2021-07-19 17:06:23 +00:00
|
|
|
timer_setup(&pmctx->ip6_own_query.timer,
|
2017-11-03 06:21:10 +00:00
|
|
|
br_ip6_multicast_port_query_expired, 0);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2021-07-19 17:06:23 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:26 +00:00
|
|
|
void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
|
2021-07-19 17:06:23 +00:00
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
del_timer_sync(&pmctx->ip6_mc_router_timer);
|
|
|
|
#endif
|
|
|
|
del_timer_sync(&pmctx->ip4_mc_router_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
int br_multicast_add_port(struct net_bridge_port *port)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
|
2021-07-19 17:06:26 +00:00
|
|
|
br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
|
2021-07-19 17:06:23 +00:00
|
|
|
|
2021-04-14 19:22:57 +00:00
|
|
|
err = br_mc_disabled_update(port->dev,
|
|
|
|
br_opt_get(port->br,
|
|
|
|
BROPT_MULTICAST_ENABLED),
|
|
|
|
NULL);
|
2021-04-21 18:44:20 +00:00
|
|
|
if (err && err != -EOPNOTSUPP)
|
2021-04-14 19:22:57 +00:00
|
|
|
return err;
|
2017-02-09 13:54:40 +00:00
|
|
|
|
2016-06-28 14:57:06 +00:00
|
|
|
port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
|
|
|
|
if (!port->mcast_stats)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_del_port(struct net_bridge_port *port)
|
|
|
|
{
|
2015-07-15 14:16:51 +00:00
|
|
|
struct net_bridge *br = port->br;
|
|
|
|
struct net_bridge_port_group *pg;
|
2020-09-07 09:56:19 +00:00
|
|
|
HLIST_HEAD(deleted_head);
|
2015-07-15 14:16:51 +00:00
|
|
|
struct hlist_node *n;
|
|
|
|
|
|
|
|
/* Take care of the remaining groups, only perm ones should be left */
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
|
2020-09-07 09:56:06 +00:00
|
|
|
br_multicast_find_del_pg(br, pg);
|
2020-09-07 09:56:19 +00:00
|
|
|
hlist_move_list(&br->mcast_gc_list, &deleted_head);
|
2015-07-15 14:16:51 +00:00
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
2020-09-07 09:56:19 +00:00
|
|
|
br_multicast_gc(&deleted_head);
|
2021-07-19 17:06:23 +00:00
|
|
|
br_multicast_port_ctx_deinit(&port->multicast_ctx);
|
2016-06-28 14:57:06 +00:00
|
|
|
free_percpu(port->mcast_stats);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2014-06-07 16:26:26 +00:00
|
|
|
static void br_multicast_enable(struct bridge_mcast_own_query *query)
|
2010-02-27 19:41:50 +00:00
|
|
|
{
|
2013-08-30 15:28:17 +00:00
|
|
|
query->startup_sent = 0;
|
2010-02-27 19:41:50 +00:00
|
|
|
|
2013-08-30 15:28:17 +00:00
|
|
|
if (try_to_del_timer_sync(&query->timer) >= 0 ||
|
|
|
|
del_timer(&query->timer))
|
|
|
|
mod_timer(&query->timer, jiffies);
|
2010-02-27 19:41:50 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge *br = pmctx->port->br;
|
2021-07-19 17:06:27 +00:00
|
|
|
struct net_bridge_mcast *brmctx;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:27 +00:00
|
|
|
brmctx = br_multicast_port_ctx_get_global(pmctx);
|
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
|
|
|
|
!netif_running(br->dev))
|
2016-10-18 16:09:48 +00:00
|
|
|
return;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_enable(&pmctx->ip4_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_enable(&pmctx->ip6_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
|
|
|
|
br_ip4_multicast_add_router(brmctx, pmctx);
|
|
|
|
br_ip6_multicast_add_router(brmctx, pmctx);
|
2021-05-13 13:20:51 +00:00
|
|
|
}
|
net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.
In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:
- Per-port and per-port-VLAN number of MDB entries that the port
is member in.
- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
per-port-VLAN maximum permitted number of MDB entries, or 0 for
no limit.
The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.
The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.
With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.
Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.
In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-02-02 17:59:25 +00:00
|
|
|
|
|
|
|
if (br_multicast_port_ctx_is_vlan(pmctx)) {
|
|
|
|
struct net_bridge_port_group *pg;
|
|
|
|
u32 n = 0;
|
|
|
|
|
|
|
|
/* The mcast_n_groups counter might be wrong. First,
|
|
|
|
* BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries
|
|
|
|
* are flushed, thus mcast_n_groups after the toggle does not
|
|
|
|
* reflect the true values. And second, permanent entries added
|
|
|
|
* while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected
|
|
|
|
* either. Thus we have to refresh the counter.
|
|
|
|
*/
|
|
|
|
|
|
|
|
hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) {
|
|
|
|
if (pg->key.addr.vid == pmctx->vlan->vid)
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
WRITE_ONCE(pmctx->mdb_n_entries, n);
|
|
|
|
}
|
2016-10-18 16:09:48 +00:00
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2016-10-18 16:09:48 +00:00
|
|
|
void br_multicast_enable_port(struct net_bridge_port *port)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = port->br;
|
|
|
|
|
2021-07-19 17:06:28 +00:00
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_enable_port_ctx(&port->multicast_ctx);
|
2021-07-19 17:06:28 +00:00
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:27 +00:00
|
|
|
static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_port_group *pg;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
struct hlist_node *n;
|
2021-05-13 13:20:50 +00:00
|
|
|
bool del = false;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:27 +00:00
|
|
|
hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
|
|
|
|
if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
|
|
|
|
(!br_multicast_port_ctx_is_vlan(pmctx) ||
|
|
|
|
pg->key.addr.vid == pmctx->vlan->vid))
|
|
|
|
br_multicast_find_del_pg(pmctx->port->br, pg);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
del |= br_ip4_multicast_rport_del(pmctx);
|
|
|
|
del_timer(&pmctx->ip4_mc_router_timer);
|
|
|
|
del_timer(&pmctx->ip4_own_query.timer);
|
|
|
|
del |= br_ip6_multicast_rport_del(pmctx);
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
del_timer(&pmctx->ip6_mc_router_timer);
|
|
|
|
del_timer(&pmctx->ip6_own_query.timer);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_rport_del_notify(pmctx, del);
|
2021-07-19 17:06:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_disable_port(struct net_bridge_port *port)
|
|
|
|
{
|
2021-07-19 17:06:28 +00:00
|
|
|
spin_lock_bh(&port->br->multicast_lock);
|
2021-07-19 17:06:27 +00:00
|
|
|
__br_multicast_disable_port_ctx(&port->multicast_ctx);
|
2021-07-19 17:06:28 +00:00
|
|
|
spin_unlock_bh(&port->br->multicast_lock);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:15 +00:00
|
|
|
static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
|
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
struct hlist_node *tmp;
|
|
|
|
int deleted = 0;
|
|
|
|
|
|
|
|
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
|
|
|
|
if (ent->flags & BR_SGRP_F_DELETE) {
|
2021-01-20 14:52:03 +00:00
|
|
|
br_multicast_del_group_src(ent, false);
|
2020-09-07 09:56:15 +00:00
|
|
|
deleted++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return deleted;
|
|
|
|
}
|
|
|
|
|
2020-09-22 07:30:23 +00:00
|
|
|
static void __grp_src_mod_timer(struct net_bridge_group_src *src,
|
|
|
|
unsigned long expires)
|
|
|
|
{
|
|
|
|
mod_timer(&src->timer, expires);
|
|
|
|
br_multicast_fwd_src_handle(src);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg)
|
2020-09-07 09:56:16 +00:00
|
|
|
{
|
|
|
|
struct bridge_mcast_other_query *other_query = NULL;
|
2021-07-19 17:06:24 +00:00
|
|
|
u32 lmqc = brmctx->multicast_last_member_count;
|
2020-09-07 09:56:16 +00:00
|
|
|
unsigned long lmqt, lmi, now = jiffies;
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!netif_running(brmctx->br->dev) ||
|
|
|
|
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
|
2020-09-07 09:56:16 +00:00
|
|
|
return;
|
|
|
|
|
2020-09-22 07:30:22 +00:00
|
|
|
if (pg->key.addr.proto == htons(ETH_P_IP))
|
2021-07-19 17:06:24 +00:00
|
|
|
other_query = &brmctx->ip4_other_query;
|
2020-09-07 09:56:16 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
2021-07-19 17:06:24 +00:00
|
|
|
other_query = &brmctx->ip6_other_query;
|
2020-09-07 09:56:16 +00:00
|
|
|
#endif
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
lmqt = now + br_multicast_lmqt(brmctx);
|
2020-09-07 09:56:16 +00:00
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node) {
|
|
|
|
if (ent->flags & BR_SGRP_F_SEND) {
|
|
|
|
ent->flags &= ~BR_SGRP_F_SEND;
|
|
|
|
if (ent->timer.expires > lmqt) {
|
2021-08-10 15:29:28 +00:00
|
|
|
if (brmctx->multicast_querier &&
|
2020-09-07 09:56:16 +00:00
|
|
|
other_query &&
|
|
|
|
!timer_pending(&other_query->timer))
|
|
|
|
ent->src_query_rexmit_cnt = lmqc;
|
2020-09-22 07:30:23 +00:00
|
|
|
__grp_src_mod_timer(ent, lmqt);
|
2020-09-07 09:56:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-10 15:29:28 +00:00
|
|
|
if (!brmctx->multicast_querier ||
|
2020-09-07 09:56:16 +00:00
|
|
|
!other_query || timer_pending(&other_query->timer))
|
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
|
2020-09-22 07:30:22 +00:00
|
|
|
&pg->key.addr, true, 1, NULL);
|
2020-09-07 09:56:16 +00:00
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
lmi = now + brmctx->multicast_last_member_interval;
|
2020-09-07 09:56:16 +00:00
|
|
|
if (!timer_pending(&pg->rexmit_timer) ||
|
|
|
|
time_after(pg->rexmit_timer.expires, lmi))
|
|
|
|
mod_timer(&pg->rexmit_timer, lmi);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg)
|
2020-09-07 09:56:16 +00:00
|
|
|
{
|
|
|
|
struct bridge_mcast_other_query *other_query = NULL;
|
|
|
|
unsigned long now = jiffies, lmi;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!netif_running(brmctx->br->dev) ||
|
|
|
|
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
|
2020-09-07 09:56:16 +00:00
|
|
|
return;
|
|
|
|
|
2020-09-22 07:30:22 +00:00
|
|
|
if (pg->key.addr.proto == htons(ETH_P_IP))
|
2021-07-19 17:06:24 +00:00
|
|
|
other_query = &brmctx->ip4_other_query;
|
2020-09-07 09:56:16 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
2021-07-19 17:06:24 +00:00
|
|
|
other_query = &brmctx->ip6_other_query;
|
2020-09-07 09:56:16 +00:00
|
|
|
#endif
|
|
|
|
|
2021-08-10 15:29:28 +00:00
|
|
|
if (brmctx->multicast_querier &&
|
2020-09-07 09:56:16 +00:00
|
|
|
other_query && !timer_pending(&other_query->timer)) {
|
2021-07-19 17:06:24 +00:00
|
|
|
lmi = now + brmctx->multicast_last_member_interval;
|
|
|
|
pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
|
2020-09-22 07:30:22 +00:00
|
|
|
&pg->key.addr, false, 0, NULL);
|
2020-09-07 09:56:16 +00:00
|
|
|
if (!timer_pending(&pg->rexmit_timer) ||
|
|
|
|
time_after(pg->rexmit_timer.expires, lmi))
|
|
|
|
mod_timer(&pg->rexmit_timer, lmi);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pg->filter_mode == MCAST_EXCLUDE &&
|
|
|
|
(!timer_pending(&pg->timer) ||
|
2021-07-19 17:06:25 +00:00
|
|
|
time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
|
|
|
|
mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
|
2020-09-07 09:56:16 +00:00
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:14 +00:00
|
|
|
/* State Msg type New state Actions
|
|
|
|
* INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
|
|
|
|
* INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
|
|
|
|
* EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:58 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:14 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
bool changed = false;
|
|
|
|
struct br_ip src_ip;
|
|
|
|
u32 src_idx;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:14 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:14 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (!ent) {
|
|
|
|
ent = br_multicast_new_group_src(pg, &src_ip);
|
|
|
|
if (ent)
|
|
|
|
changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ent)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
|
2020-09-07 09:56:14 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type))
|
2021-01-20 14:51:58 +00:00
|
|
|
changed = true;
|
|
|
|
|
2020-09-07 09:56:14 +00:00
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:15 +00:00
|
|
|
/* State Msg type New state Actions
|
|
|
|
* INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
|
|
|
|
* Delete (A-B)
|
|
|
|
* Group Timer=GMI
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:15 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
struct br_ip src_ip;
|
|
|
|
u32 src_idx;
|
|
|
|
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
|
|
|
ent->flags |= BR_SGRP_F_DELETE;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:15 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:15 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (ent)
|
|
|
|
ent->flags &= ~BR_SGRP_F_DELETE;
|
|
|
|
else
|
2020-09-22 07:30:23 +00:00
|
|
|
ent = br_multicast_new_group_src(pg, &src_ip);
|
|
|
|
if (ent)
|
|
|
|
br_multicast_fwd_src_handle(ent);
|
2020-09-07 09:56:15 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type);
|
2021-01-20 14:51:59 +00:00
|
|
|
|
2020-09-07 09:56:15 +00:00
|
|
|
__grp_src_delete_marked(pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* State Msg type New state Actions
|
|
|
|
* EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
|
|
|
|
* Delete (X-A)
|
|
|
|
* Delete (Y-A)
|
|
|
|
* Group Timer=GMI
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:15 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
bool changed = false;
|
|
|
|
struct br_ip src_ip;
|
|
|
|
u32 src_idx;
|
|
|
|
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
|
|
|
ent->flags |= BR_SGRP_F_DELETE;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:15 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:15 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
|
|
|
ent->flags &= ~BR_SGRP_F_DELETE;
|
|
|
|
} else {
|
|
|
|
ent = br_multicast_new_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
2020-09-22 07:30:23 +00:00
|
|
|
__grp_src_mod_timer(ent,
|
2021-07-19 17:06:25 +00:00
|
|
|
now + br_multicast_gmi(brmctx));
|
2020-09-07 09:56:15 +00:00
|
|
|
changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type))
|
2021-01-20 14:51:59 +00:00
|
|
|
changed = true;
|
|
|
|
|
2020-09-07 09:56:15 +00:00
|
|
|
if (__grp_src_delete_marked(pg))
|
|
|
|
changed = true;
|
|
|
|
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:15 +00:00
|
|
|
{
|
|
|
|
bool changed = false;
|
|
|
|
|
|
|
|
switch (pg->filter_mode) {
|
|
|
|
case MCAST_INCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
2021-01-20 14:51:59 +00:00
|
|
|
grec_type);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
|
2020-09-07 09:56:15 +00:00
|
|
|
changed = true;
|
|
|
|
break;
|
|
|
|
case MCAST_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
|
|
|
|
addr_size, grec_type);
|
2020-09-07 09:56:15 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
pg->filter_mode = MCAST_EXCLUDE;
|
2021-07-19 17:06:25 +00:00
|
|
|
mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
|
2020-09-07 09:56:15 +00:00
|
|
|
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:16 +00:00
|
|
|
/* State Msg type New state Actions
|
|
|
|
* INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
|
|
|
|
* Send Q(G,A-B)
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:16 +00:00
|
|
|
{
|
|
|
|
u32 src_idx, to_send = pg->src_ents;
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
bool changed = false;
|
|
|
|
struct br_ip src_ip;
|
|
|
|
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
|
|
|
ent->flags |= BR_SGRP_F_SEND;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:16 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:16 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
|
|
|
ent->flags &= ~BR_SGRP_F_SEND;
|
|
|
|
to_send--;
|
|
|
|
} else {
|
|
|
|
ent = br_multicast_new_group_src(pg, &src_ip);
|
|
|
|
if (ent)
|
|
|
|
changed = true;
|
|
|
|
}
|
|
|
|
if (ent)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
|
2020-09-07 09:56:16 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type))
|
2021-01-20 14:51:59 +00:00
|
|
|
changed = true;
|
|
|
|
|
2020-09-07 09:56:16 +00:00
|
|
|
if (to_send)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
|
2020-09-07 09:56:16 +00:00
|
|
|
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* State Msg type New state Actions
|
|
|
|
* EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
|
|
|
|
* Send Q(G,X-A)
|
|
|
|
* Send Q(G)
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:16 +00:00
|
|
|
{
|
|
|
|
u32 src_idx, to_send = pg->src_ents;
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
bool changed = false;
|
|
|
|
struct br_ip src_ip;
|
|
|
|
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
|
|
|
if (timer_pending(&ent->timer))
|
|
|
|
ent->flags |= BR_SGRP_F_SEND;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:16 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:16 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
|
|
|
if (timer_pending(&ent->timer)) {
|
|
|
|
ent->flags &= ~BR_SGRP_F_SEND;
|
|
|
|
to_send--;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ent = br_multicast_new_group_src(pg, &src_ip);
|
|
|
|
if (ent)
|
|
|
|
changed = true;
|
|
|
|
}
|
|
|
|
if (ent)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
|
2020-09-07 09:56:16 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type))
|
2021-01-20 14:51:59 +00:00
|
|
|
changed = true;
|
|
|
|
|
2020-09-07 09:56:16 +00:00
|
|
|
if (to_send)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
|
2020-09-07 09:56:16 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_send_query_and_rexmit(brmctx, pmctx, pg);
|
2020-09-07 09:56:16 +00:00
|
|
|
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:16 +00:00
|
|
|
{
|
|
|
|
bool changed = false;
|
|
|
|
|
|
|
|
switch (pg->filter_mode) {
|
|
|
|
case MCAST_INCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
|
|
|
|
nsrcs, addr_size, grec_type);
|
2020-09-07 09:56:16 +00:00
|
|
|
break;
|
|
|
|
case MCAST_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
|
|
|
|
nsrcs, addr_size, grec_type);
|
2020-09-07 09:56:16 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-01-20 14:51:59 +00:00
|
|
|
if (br_multicast_eht_should_del_pg(pg)) {
|
2021-01-20 14:52:03 +00:00
|
|
|
pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
|
2021-01-20 14:51:59 +00:00
|
|
|
br_multicast_find_del_pg(pg->key.port->br, pg);
|
|
|
|
/* a notification has already been sent and we shouldn't
|
|
|
|
* access pg after the delete so we have to return false
|
|
|
|
*/
|
|
|
|
changed = false;
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:16 +00:00
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* State Msg type New state Actions
|
|
|
|
* INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
|
|
|
|
* Delete (A-B)
|
|
|
|
* Send Q(G,A*B)
|
|
|
|
* Group Timer=GMI
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:16 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
u32 src_idx, to_send = 0;
|
|
|
|
struct br_ip src_ip;
|
|
|
|
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
|
|
|
ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:16 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:16 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
|
|
|
ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
|
|
|
|
BR_SGRP_F_SEND;
|
|
|
|
to_send++;
|
|
|
|
} else {
|
2020-09-22 07:30:23 +00:00
|
|
|
ent = br_multicast_new_group_src(pg, &src_ip);
|
2020-09-07 09:56:16 +00:00
|
|
|
}
|
2020-09-22 07:30:23 +00:00
|
|
|
if (ent)
|
|
|
|
br_multicast_fwd_src_handle(ent);
|
2020-09-07 09:56:16 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type);
|
2021-01-20 14:51:59 +00:00
|
|
|
|
2020-09-07 09:56:16 +00:00
|
|
|
__grp_src_delete_marked(pg);
|
|
|
|
if (to_send)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
|
2020-09-07 09:56:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* State Msg type New state Actions
|
|
|
|
* EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
|
|
|
|
* Delete (X-A)
|
|
|
|
* Delete (Y-A)
|
|
|
|
* Send Q(G,A-Y)
|
|
|
|
* Group Timer=GMI
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:16 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
u32 src_idx, to_send = 0;
|
|
|
|
bool changed = false;
|
|
|
|
struct br_ip src_ip;
|
|
|
|
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
|
|
|
ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:16 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:16 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
|
|
|
ent->flags &= ~BR_SGRP_F_DELETE;
|
|
|
|
} else {
|
|
|
|
ent = br_multicast_new_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
2020-09-22 07:30:23 +00:00
|
|
|
__grp_src_mod_timer(ent, pg->timer.expires);
|
2020-09-07 09:56:16 +00:00
|
|
|
changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ent && timer_pending(&ent->timer)) {
|
|
|
|
ent->flags |= BR_SGRP_F_SEND;
|
|
|
|
to_send++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type))
|
2021-01-20 14:51:59 +00:00
|
|
|
changed = true;
|
|
|
|
|
2020-09-07 09:56:16 +00:00
|
|
|
if (__grp_src_delete_marked(pg))
|
|
|
|
changed = true;
|
|
|
|
if (to_send)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
|
2020-09-07 09:56:16 +00:00
|
|
|
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:59 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size,
|
|
|
|
int grec_type)
|
2020-09-07 09:56:16 +00:00
|
|
|
{
|
|
|
|
bool changed = false;
|
|
|
|
|
|
|
|
switch (pg->filter_mode) {
|
|
|
|
case MCAST_INCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
|
|
|
|
addr_size, grec_type);
|
net: bridge: mcast: handle port group filter modes
We need to handle group filter mode transitions and initial state.
To change a port group's INCLUDE -> EXCLUDE mode (or when we have added
a new port group in EXCLUDE mode) we need to add that port to all of
*,G ports' S,G entries for proper replication. When the EXCLUDE state is
changed from IGMPv3 report, br_multicast_fwd_filter_exclude() must be
called after the source list processing because the assumption is that
all of the group's S,G entries will be created before transitioning to
EXCLUDE mode, i.e. most importantly its blocked entries will already be
added so it will not get automatically added to them.
The transition EXCLUDE -> INCLUDE happens only when a port group timer
expires, it requires us to remove that port from all of *,G ports' S,G
entries where it was automatically added previously.
Finally when we are adding a new S,G entry we must add all of *,G's
EXCLUDE ports to it.
In order to distinguish automatically added *,G EXCLUDE ports we have a
new port group flag - MDB_PG_FLAGS_STAR_EXCL.
Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:30:24 +00:00
|
|
|
br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
|
2020-09-07 09:56:16 +00:00
|
|
|
changed = true;
|
|
|
|
break;
|
|
|
|
case MCAST_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
|
|
|
|
nsrcs, addr_size, grec_type);
|
2020-09-07 09:56:16 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
pg->filter_mode = MCAST_EXCLUDE;
|
2021-07-19 17:06:25 +00:00
|
|
|
mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
|
2020-09-07 09:56:16 +00:00
|
|
|
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:17 +00:00
|
|
|
/* State Msg type New state Actions
|
|
|
|
* INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:58 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
|
2020-09-07 09:56:17 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
u32 src_idx, to_send = 0;
|
2021-01-20 14:51:52 +00:00
|
|
|
bool changed = false;
|
2020-09-07 09:56:17 +00:00
|
|
|
struct br_ip src_ip;
|
|
|
|
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
|
|
|
ent->flags &= ~BR_SGRP_F_SEND;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:17 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:17 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
|
|
|
ent->flags |= BR_SGRP_F_SEND;
|
|
|
|
to_send++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type))
|
2021-01-20 14:51:58 +00:00
|
|
|
changed = true;
|
|
|
|
|
2020-09-07 09:56:17 +00:00
|
|
|
if (to_send)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
|
2020-09-07 09:56:17 +00:00
|
|
|
|
2021-01-20 14:51:52 +00:00
|
|
|
return changed;
|
2020-09-07 09:56:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* State Msg type New state Actions
|
|
|
|
* EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
|
|
|
|
* Send Q(G,A-Y)
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:58 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
|
2020-09-07 09:56:17 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_group_src *ent;
|
|
|
|
u32 src_idx, to_send = 0;
|
|
|
|
bool changed = false;
|
|
|
|
struct br_ip src_ip;
|
|
|
|
|
|
|
|
hlist_for_each_entry(ent, &pg->src_list, node)
|
|
|
|
ent->flags &= ~BR_SGRP_F_SEND;
|
|
|
|
|
|
|
|
memset(&src_ip, 0, sizeof(src_ip));
|
2020-09-22 07:30:22 +00:00
|
|
|
src_ip.proto = pg->key.addr.proto;
|
2020-09-07 09:56:17 +00:00
|
|
|
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
|
2021-01-20 14:51:53 +00:00
|
|
|
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
|
2020-09-07 09:56:17 +00:00
|
|
|
ent = br_multicast_find_group_src(pg, &src_ip);
|
|
|
|
if (!ent) {
|
|
|
|
ent = br_multicast_new_group_src(pg, &src_ip);
|
|
|
|
if (ent) {
|
2020-09-22 07:30:23 +00:00
|
|
|
__grp_src_mod_timer(ent, pg->timer.expires);
|
2020-09-07 09:56:17 +00:00
|
|
|
changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ent && timer_pending(&ent->timer)) {
|
|
|
|
ent->flags |= BR_SGRP_F_SEND;
|
|
|
|
to_send++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
|
|
|
|
grec_type))
|
2021-01-20 14:51:58 +00:00
|
|
|
changed = true;
|
|
|
|
|
2020-09-07 09:56:17 +00:00
|
|
|
if (to_send)
|
2021-07-19 17:06:25 +00:00
|
|
|
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
|
2020-09-07 09:56:17 +00:00
|
|
|
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool br_multicast_block(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct net_bridge_port_group *pg, void *h_addr,
|
2021-01-20 14:51:58 +00:00
|
|
|
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
|
2020-09-07 09:56:17 +00:00
|
|
|
{
|
|
|
|
bool changed = false;
|
|
|
|
|
|
|
|
switch (pg->filter_mode) {
|
|
|
|
case MCAST_INCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
|
|
|
|
nsrcs, addr_size, grec_type);
|
2020-09-07 09:56:17 +00:00
|
|
|
break;
|
|
|
|
case MCAST_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
|
|
|
|
nsrcs, addr_size, grec_type);
|
2020-09-07 09:56:17 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-01-20 14:52:02 +00:00
|
|
|
if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
|
|
|
|
br_multicast_eht_should_del_pg(pg)) {
|
2021-01-20 14:52:03 +00:00
|
|
|
if (br_multicast_eht_should_del_pg(pg))
|
|
|
|
pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
|
2021-01-20 14:52:02 +00:00
|
|
|
br_multicast_find_del_pg(pg->key.port->br, pg);
|
|
|
|
/* a notification has already been sent and we shouldn't
|
|
|
|
* access pg after the delete so we have to return false
|
|
|
|
*/
|
|
|
|
changed = false;
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:17 +00:00
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2020-09-07 09:56:14 +00:00
|
|
|
static struct net_bridge_port_group *
|
|
|
|
br_multicast_find_port(struct net_bridge_mdb_entry *mp,
|
|
|
|
struct net_bridge_port *p,
|
|
|
|
const unsigned char *src)
|
|
|
|
{
|
2020-09-08 07:17:13 +00:00
|
|
|
struct net_bridge *br __maybe_unused = mp->br;
|
2020-09-07 09:56:14 +00:00
|
|
|
struct net_bridge_port_group *pg;
|
|
|
|
|
|
|
|
for (pg = mlock_dereference(mp->ports, br);
|
|
|
|
pg;
|
|
|
|
pg = mlock_dereference(pg->next, br))
|
|
|
|
if (br_port_group_equal(pg, p, src))
|
|
|
|
return pg;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-10-28 19:45:07 +00:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
bool igmpv2 = brmctx->multicast_igmp_version == 2;
|
2020-09-07 09:56:14 +00:00
|
|
|
struct net_bridge_mdb_entry *mdst;
|
|
|
|
struct net_bridge_port_group *pg;
|
2017-01-21 20:01:32 +00:00
|
|
|
const unsigned char *src;
|
2010-02-27 19:41:45 +00:00
|
|
|
struct igmpv3_report *ih;
|
|
|
|
struct igmpv3_grec *grec;
|
2020-09-07 09:56:14 +00:00
|
|
|
int i, len, num, type;
|
2021-01-20 14:51:51 +00:00
|
|
|
__be32 group, *h_addr;
|
2020-09-07 09:56:14 +00:00
|
|
|
bool changed = false;
|
|
|
|
int err = 0;
|
2019-07-02 12:00:18 +00:00
|
|
|
u16 nsrcs;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
ih = igmpv3_report_hdr(skb);
|
|
|
|
num = ntohs(ih->ngrec);
|
2015-09-11 16:39:48 +00:00
|
|
|
len = skb_transport_offset(skb) + sizeof(*ih);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
len += sizeof(*grec);
|
2019-01-21 06:26:25 +00:00
|
|
|
if (!ip_mc_may_pull(skb, len))
|
2010-02-27 19:41:45 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-04-08 04:20:47 +00:00
|
|
|
grec = (void *)(skb->data + len - sizeof(*grec));
|
2010-02-27 19:41:45 +00:00
|
|
|
group = grec->grec_mca;
|
|
|
|
type = grec->grec_type;
|
2019-07-02 12:00:18 +00:00
|
|
|
nsrcs = ntohs(grec->grec_nsrcs);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2019-07-02 12:00:18 +00:00
|
|
|
len += nsrcs * 4;
|
2019-01-21 06:26:25 +00:00
|
|
|
if (!ip_mc_may_pull(skb, len))
|
2010-02-27 19:41:45 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case IGMPV3_MODE_IS_INCLUDE:
|
|
|
|
case IGMPV3_MODE_IS_EXCLUDE:
|
|
|
|
case IGMPV3_CHANGE_TO_INCLUDE:
|
|
|
|
case IGMPV3_CHANGE_TO_EXCLUDE:
|
|
|
|
case IGMPV3_ALLOW_NEW_SOURCES:
|
|
|
|
case IGMPV3_BLOCK_OLD_SOURCES:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-01-21 20:01:32 +00:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2020-09-07 09:56:14 +00:00
|
|
|
if (nsrcs == 0 &&
|
|
|
|
(type == IGMPV3_CHANGE_TO_INCLUDE ||
|
|
|
|
type == IGMPV3_MODE_IS_INCLUDE)) {
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!pmctx || igmpv2) {
|
|
|
|
br_ip4_multicast_leave_group(brmctx, pmctx,
|
|
|
|
group, vid, src);
|
2020-09-07 09:56:14 +00:00
|
|
|
continue;
|
|
|
|
}
|
2015-07-13 12:28:37 +00:00
|
|
|
} else {
|
2021-07-19 17:06:25 +00:00
|
|
|
err = br_ip4_multicast_add_group(brmctx, pmctx, group,
|
|
|
|
vid, src, igmpv2);
|
2015-07-13 12:28:37 +00:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
2020-09-07 09:56:14 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!pmctx || igmpv2)
|
2020-09-07 09:56:14 +00:00
|
|
|
continue;
|
|
|
|
|
2022-10-18 06:40:00 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx))
|
|
|
|
goto unlock_continue;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
mdst = br_mdb_ip4_get(brmctx->br, group, vid);
|
2020-09-07 09:56:14 +00:00
|
|
|
if (!mdst)
|
|
|
|
goto unlock_continue;
|
2021-07-19 17:06:25 +00:00
|
|
|
pg = br_multicast_find_port(mdst, pmctx->port, src);
|
2020-09-07 09:56:14 +00:00
|
|
|
if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
|
|
|
|
goto unlock_continue;
|
2021-01-20 14:51:51 +00:00
|
|
|
/* reload grec and host addr */
|
2020-09-07 09:56:14 +00:00
|
|
|
grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
|
2021-01-20 14:51:51 +00:00
|
|
|
h_addr = &ip_hdr(skb)->saddr;
|
2020-09-07 09:56:14 +00:00
|
|
|
switch (type) {
|
|
|
|
case IGMPV3_ALLOW_NEW_SOURCES:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
|
|
|
|
grec->grec_src,
|
2021-01-20 14:51:58 +00:00
|
|
|
nsrcs, sizeof(__be32), type);
|
2020-09-07 09:56:14 +00:00
|
|
|
break;
|
2020-09-07 09:56:15 +00:00
|
|
|
case IGMPV3_MODE_IS_INCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
|
|
|
|
grec->grec_src,
|
2021-01-20 14:51:58 +00:00
|
|
|
nsrcs, sizeof(__be32), type);
|
2020-09-07 09:56:15 +00:00
|
|
|
break;
|
|
|
|
case IGMPV3_MODE_IS_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_isexc(brmctx, pg, h_addr,
|
|
|
|
grec->grec_src,
|
2021-01-20 14:51:59 +00:00
|
|
|
nsrcs, sizeof(__be32), type);
|
2020-09-07 09:56:15 +00:00
|
|
|
break;
|
2020-09-07 09:56:16 +00:00
|
|
|
case IGMPV3_CHANGE_TO_INCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
|
|
|
|
grec->grec_src,
|
2021-01-20 14:51:59 +00:00
|
|
|
nsrcs, sizeof(__be32), type);
|
2020-09-07 09:56:16 +00:00
|
|
|
break;
|
|
|
|
case IGMPV3_CHANGE_TO_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
|
|
|
|
grec->grec_src,
|
2021-01-20 14:51:59 +00:00
|
|
|
nsrcs, sizeof(__be32), type);
|
2020-09-07 09:56:16 +00:00
|
|
|
break;
|
2020-09-07 09:56:17 +00:00
|
|
|
case IGMPV3_BLOCK_OLD_SOURCES:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
|
|
|
|
grec->grec_src,
|
2021-01-20 14:51:58 +00:00
|
|
|
nsrcs, sizeof(__be32), type);
|
2020-09-07 09:56:17 +00:00
|
|
|
break;
|
2020-09-07 09:56:14 +00:00
|
|
|
}
|
|
|
|
if (changed)
|
2021-07-19 17:06:25 +00:00
|
|
|
br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
|
2020-09-07 09:56:14 +00:00
|
|
|
unlock_continue:
|
2022-10-18 06:40:00 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-10-28 19:45:07 +00:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-04-22 16:54:22 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
bool mldv1 = brmctx->multicast_mld_version == 1;
|
2020-09-07 09:56:14 +00:00
|
|
|
struct net_bridge_mdb_entry *mdst;
|
|
|
|
struct net_bridge_port_group *pg;
|
2019-01-21 06:26:25 +00:00
|
|
|
unsigned int nsrcs_offset;
|
2021-08-29 04:32:29 +00:00
|
|
|
struct mld2_report *mld2r;
|
2017-01-21 20:01:32 +00:00
|
|
|
const unsigned char *src;
|
2021-01-20 14:51:51 +00:00
|
|
|
struct in6_addr *h_addr;
|
2010-04-22 16:54:22 +00:00
|
|
|
struct mld2_grec *grec;
|
2019-01-21 06:26:25 +00:00
|
|
|
unsigned int grec_len;
|
2020-09-07 09:56:14 +00:00
|
|
|
bool changed = false;
|
|
|
|
int i, len, num;
|
2010-04-22 16:54:22 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2021-08-29 04:32:29 +00:00
|
|
|
if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
|
2010-04-22 16:54:22 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-08-29 04:32:29 +00:00
|
|
|
mld2r = (struct mld2_report *)icmp6_hdr(skb);
|
|
|
|
num = ntohs(mld2r->mld2r_ngrec);
|
|
|
|
len = skb_transport_offset(skb) + sizeof(*mld2r);
|
2010-04-22 16:54:22 +00:00
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
2019-07-02 12:00:18 +00:00
|
|
|
__be16 *_nsrcs, __nsrcs;
|
|
|
|
u16 nsrcs;
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2019-01-21 06:26:25 +00:00
|
|
|
nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
|
|
|
|
|
|
|
|
if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
|
2020-07-05 19:10:17 +00:00
|
|
|
nsrcs_offset + sizeof(__nsrcs))
|
2019-01-21 06:26:25 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-02 12:00:18 +00:00
|
|
|
_nsrcs = skb_header_pointer(skb, nsrcs_offset,
|
|
|
|
sizeof(__nsrcs), &__nsrcs);
|
|
|
|
if (!_nsrcs)
|
2010-04-22 16:54:22 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-02 12:00:18 +00:00
|
|
|
nsrcs = ntohs(*_nsrcs);
|
|
|
|
grec_len = struct_size(grec, grec_src, nsrcs);
|
2019-01-21 06:26:25 +00:00
|
|
|
|
|
|
|
if (!ipv6_mc_may_pull(skb, len + grec_len))
|
2010-04-22 16:54:22 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
grec = (struct mld2_grec *)(skb->data + len);
|
2019-01-21 06:26:25 +00:00
|
|
|
len += grec_len;
|
2010-04-22 16:54:22 +00:00
|
|
|
|
|
|
|
switch (grec->grec_type) {
|
|
|
|
case MLD2_MODE_IS_INCLUDE:
|
|
|
|
case MLD2_MODE_IS_EXCLUDE:
|
|
|
|
case MLD2_CHANGE_TO_INCLUDE:
|
|
|
|
case MLD2_CHANGE_TO_EXCLUDE:
|
|
|
|
case MLD2_ALLOW_NEW_SOURCES:
|
|
|
|
case MLD2_BLOCK_OLD_SOURCES:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-01-21 20:01:32 +00:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2015-07-13 12:28:37 +00:00
|
|
|
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
|
|
|
|
grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
|
2019-07-02 12:00:18 +00:00
|
|
|
nsrcs == 0) {
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!pmctx || mldv1) {
|
|
|
|
br_ip6_multicast_leave_group(brmctx, pmctx,
|
2020-09-07 09:56:14 +00:00
|
|
|
&grec->grec_mca,
|
|
|
|
vid, src);
|
|
|
|
continue;
|
|
|
|
}
|
2015-07-13 12:28:37 +00:00
|
|
|
} else {
|
2021-07-19 17:06:25 +00:00
|
|
|
err = br_ip6_multicast_add_group(brmctx, pmctx,
|
2017-01-21 20:01:32 +00:00
|
|
|
&grec->grec_mca, vid,
|
2020-09-07 09:56:14 +00:00
|
|
|
src, mldv1);
|
2016-08-31 12:16:44 +00:00
|
|
|
if (err)
|
2015-07-13 12:28:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-09-07 09:56:14 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!pmctx || mldv1)
|
2020-09-07 09:56:14 +00:00
|
|
|
continue;
|
|
|
|
|
2022-10-18 06:40:00 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx))
|
|
|
|
goto unlock_continue;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
|
2020-09-07 09:56:14 +00:00
|
|
|
if (!mdst)
|
|
|
|
goto unlock_continue;
|
2021-07-19 17:06:25 +00:00
|
|
|
pg = br_multicast_find_port(mdst, pmctx->port, src);
|
2020-09-07 09:56:14 +00:00
|
|
|
if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
|
|
|
|
goto unlock_continue;
|
2021-01-20 14:51:51 +00:00
|
|
|
h_addr = &ipv6_hdr(skb)->saddr;
|
2020-09-07 09:56:14 +00:00
|
|
|
switch (grec->grec_type) {
|
|
|
|
case MLD2_ALLOW_NEW_SOURCES:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
|
2021-01-20 14:51:51 +00:00
|
|
|
grec->grec_src, nsrcs,
|
2021-01-20 14:51:58 +00:00
|
|
|
sizeof(struct in6_addr),
|
|
|
|
grec->grec_type);
|
2020-09-07 09:56:14 +00:00
|
|
|
break;
|
2020-09-07 09:56:15 +00:00
|
|
|
case MLD2_MODE_IS_INCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
|
2021-01-20 14:51:51 +00:00
|
|
|
grec->grec_src, nsrcs,
|
2021-01-20 14:51:58 +00:00
|
|
|
sizeof(struct in6_addr),
|
|
|
|
grec->grec_type);
|
2020-09-07 09:56:15 +00:00
|
|
|
break;
|
|
|
|
case MLD2_MODE_IS_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_isexc(brmctx, pg, h_addr,
|
2021-01-20 14:51:51 +00:00
|
|
|
grec->grec_src, nsrcs,
|
2021-01-20 14:51:59 +00:00
|
|
|
sizeof(struct in6_addr),
|
|
|
|
grec->grec_type);
|
2020-09-07 09:56:15 +00:00
|
|
|
break;
|
2020-09-07 09:56:16 +00:00
|
|
|
case MLD2_CHANGE_TO_INCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
|
2021-01-20 14:51:51 +00:00
|
|
|
grec->grec_src, nsrcs,
|
2021-01-20 14:51:59 +00:00
|
|
|
sizeof(struct in6_addr),
|
|
|
|
grec->grec_type);
|
2020-09-07 09:56:16 +00:00
|
|
|
break;
|
|
|
|
case MLD2_CHANGE_TO_EXCLUDE:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
|
2021-01-20 14:51:51 +00:00
|
|
|
grec->grec_src, nsrcs,
|
2021-01-20 14:51:59 +00:00
|
|
|
sizeof(struct in6_addr),
|
|
|
|
grec->grec_type);
|
2020-09-07 09:56:16 +00:00
|
|
|
break;
|
2020-09-07 09:56:17 +00:00
|
|
|
case MLD2_BLOCK_OLD_SOURCES:
|
2021-07-19 17:06:25 +00:00
|
|
|
changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
|
2021-01-20 14:51:51 +00:00
|
|
|
grec->grec_src, nsrcs,
|
2021-01-20 14:51:58 +00:00
|
|
|
sizeof(struct in6_addr),
|
|
|
|
grec->grec_type);
|
2020-09-07 09:56:17 +00:00
|
|
|
break;
|
2020-09-07 09:56:14 +00:00
|
|
|
}
|
|
|
|
if (changed)
|
2021-07-19 17:06:25 +00:00
|
|
|
br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
|
2020-09-07 09:56:14 +00:00
|
|
|
unlock_continue:
|
2022-10-18 06:40:00 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-08-13 14:59:59 +00:00
|
|
|
static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
|
|
|
struct br_ip *saddr)
|
2014-06-07 16:26:27 +00:00
|
|
|
{
|
2021-08-13 14:59:58 +00:00
|
|
|
int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
|
2021-08-13 14:59:59 +00:00
|
|
|
struct timer_list *own_timer, *other_timer;
|
|
|
|
struct bridge_mcast_querier *querier;
|
2021-07-21 10:06:24 +00:00
|
|
|
|
2021-08-13 14:59:59 +00:00
|
|
|
switch (saddr->proto) {
|
|
|
|
case htons(ETH_P_IP):
|
|
|
|
querier = &brmctx->ip4_querier;
|
|
|
|
own_timer = &brmctx->ip4_own_query.timer;
|
|
|
|
other_timer = &brmctx->ip4_other_query.timer;
|
|
|
|
if (!querier->addr.src.ip4 ||
|
|
|
|
ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
|
|
|
|
goto update;
|
|
|
|
break;
|
2014-06-07 16:26:27 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-08-13 14:59:59 +00:00
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
querier = &brmctx->ip6_querier;
|
|
|
|
own_timer = &brmctx->ip6_own_query.timer;
|
|
|
|
other_timer = &brmctx->ip6_other_query.timer;
|
|
|
|
if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
|
|
|
|
goto update;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2014-06-07 16:26:27 +00:00
|
|
|
|
2021-08-13 14:59:59 +00:00
|
|
|
if (!timer_pending(own_timer) && !timer_pending(other_timer))
|
2014-06-07 16:26:27 +00:00
|
|
|
goto update;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
update:
|
2021-08-13 14:59:59 +00:00
|
|
|
br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
|
2014-06-07 16:26:29 +00:00
|
|
|
|
2014-06-07 16:26:27 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-08-13 15:00:00 +00:00
|
|
|
static struct net_bridge_port *
|
|
|
|
__br_multicast_get_querier_port(struct net_bridge *br,
|
|
|
|
const struct bridge_mcast_querier *querier)
|
|
|
|
{
|
|
|
|
int port_ifidx = READ_ONCE(querier->port_ifidx);
|
|
|
|
struct net_bridge_port *p;
|
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
if (port_ifidx == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
|
|
|
|
if (!dev)
|
|
|
|
return NULL;
|
|
|
|
p = br_port_get_rtnl_rcu(dev);
|
|
|
|
if (!p || p->br != br)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t br_multicast_querier_state_size(void)
|
|
|
|
{
|
2021-08-16 10:11:33 +00:00
|
|
|
return nla_total_size(0) + /* nest attribute */
|
2021-08-13 15:00:00 +00:00
|
|
|
nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
|
|
|
|
nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */
|
2021-08-16 10:11:34 +00:00
|
|
|
nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
|
|
|
|
nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */
|
|
|
|
nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
|
|
|
|
#endif
|
|
|
|
0;
|
2021-08-13 15:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* protected by rtnl or rcu */
|
|
|
|
int br_multicast_dump_querier_state(struct sk_buff *skb,
|
|
|
|
const struct net_bridge_mcast *brmctx,
|
|
|
|
int nest_attr)
|
|
|
|
{
|
|
|
|
struct bridge_mcast_querier querier = {};
|
|
|
|
struct net_bridge_port *p;
|
|
|
|
struct nlattr *nest;
|
|
|
|
|
2021-08-16 10:11:32 +00:00
|
|
|
if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
|
|
|
|
br_multicast_ctx_vlan_global_disabled(brmctx))
|
|
|
|
return 0;
|
|
|
|
|
2021-08-13 15:00:00 +00:00
|
|
|
nest = nla_nest_start(skb, nest_attr);
|
|
|
|
if (!nest)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2021-08-13 15:00:01 +00:00
|
|
|
if (!brmctx->multicast_querier &&
|
|
|
|
!timer_pending(&brmctx->ip4_other_query.timer))
|
|
|
|
goto out_v6;
|
|
|
|
|
2021-08-13 15:00:00 +00:00
|
|
|
br_multicast_read_querier(&brmctx->ip4_querier, &querier);
|
|
|
|
if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
|
|
|
|
querier.addr.src.ip4)) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
p = __br_multicast_get_querier_port(brmctx->br, &querier);
|
|
|
|
if (timer_pending(&brmctx->ip4_other_query.timer) &&
|
|
|
|
(nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
|
|
|
|
br_timer_value(&brmctx->ip4_other_query.timer),
|
|
|
|
BRIDGE_QUERIER_PAD) ||
|
|
|
|
(p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
goto out_err;
|
|
|
|
}
|
2021-08-13 15:00:01 +00:00
|
|
|
|
|
|
|
out_v6:
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (!brmctx->multicast_querier &&
|
|
|
|
!timer_pending(&brmctx->ip6_other_query.timer))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
br_multicast_read_querier(&brmctx->ip6_querier, &querier);
|
|
|
|
if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
|
|
|
|
&querier.addr.src.ip6)) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
p = __br_multicast_get_querier_port(brmctx->br, &querier);
|
|
|
|
if (timer_pending(&brmctx->ip6_other_query.timer) &&
|
|
|
|
(nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
|
|
|
|
br_timer_value(&brmctx->ip6_other_query.timer),
|
|
|
|
BRIDGE_QUERIER_PAD) ||
|
|
|
|
(p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
|
|
|
|
p->dev->ifindex)))) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
#endif
|
2021-08-13 15:00:00 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
nla_nest_end(skb, nest);
|
2021-08-13 15:00:01 +00:00
|
|
|
if (!nla_len(nest))
|
|
|
|
nla_nest_cancel(skb, nest);
|
2021-08-13 15:00:00 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
nla_nest_cancel(skb, nest);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2013-08-30 15:28:17 +00:00
|
|
|
static void
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_other_query *query,
|
|
|
|
unsigned long max_delay)
|
2013-07-31 23:06:20 +00:00
|
|
|
{
|
2014-06-07 16:26:26 +00:00
|
|
|
if (!timer_pending(&query->timer))
|
|
|
|
query->delay_time = jiffies + max_delay;
|
2013-07-31 23:06:20 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
|
2013-07-31 23:06:20 +00:00
|
|
|
}
|
|
|
|
|
2017-02-09 13:54:42 +00:00
|
|
|
static void br_port_mc_router_state_change(struct net_bridge_port *p,
|
|
|
|
bool is_mc_router)
|
|
|
|
{
|
|
|
|
struct switchdev_attr attr = {
|
|
|
|
.orig_dev = p->dev,
|
|
|
|
.id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
|
|
|
|
.flags = SWITCHDEV_F_DEFER,
|
|
|
|
.u.mrouter = is_mc_router,
|
|
|
|
};
|
|
|
|
|
2021-02-13 20:43:17 +00:00
|
|
|
switchdev_port_attr_set(p->dev, &attr, NULL);
|
2017-02-09 13:54:42 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:49 +00:00
|
|
|
static struct net_bridge_port *
|
2021-07-19 17:06:24 +00:00
|
|
|
br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
|
2021-05-13 13:20:49 +00:00
|
|
|
struct hlist_head *mc_router_list,
|
|
|
|
struct hlist_node *rlist)
|
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx;
|
|
|
|
|
2021-05-13 13:20:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:24 +00:00
|
|
|
if (mc_router_list == &brmctx->ip6_mc_router_list)
|
2021-07-19 17:06:23 +00:00
|
|
|
pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
|
|
|
|
ip6_rlist);
|
|
|
|
else
|
2021-05-13 13:20:51 +00:00
|
|
|
#endif
|
2021-07-19 17:06:23 +00:00
|
|
|
pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
|
|
|
|
ip4_rlist);
|
|
|
|
|
|
|
|
return pmctx->port;
|
2021-05-13 13:20:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct hlist_node *
|
2021-07-19 17:06:24 +00:00
|
|
|
br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
|
2021-05-13 13:20:49 +00:00
|
|
|
struct net_bridge_port *port,
|
|
|
|
struct hlist_head *mc_router_list)
|
|
|
|
|
|
|
|
{
|
|
|
|
struct hlist_node *slot = NULL;
|
|
|
|
struct net_bridge_port *p;
|
|
|
|
struct hlist_node *rlist;
|
|
|
|
|
|
|
|
hlist_for_each(rlist, mc_router_list) {
|
2021-07-19 17:06:24 +00:00
|
|
|
p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
|
2021-05-13 13:20:49 +00:00
|
|
|
|
|
|
|
if ((unsigned long)port >= (unsigned long)p)
|
|
|
|
break;
|
|
|
|
|
|
|
|
slot = rlist;
|
|
|
|
}
|
|
|
|
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
|
2021-05-13 13:20:51 +00:00
|
|
|
struct hlist_node *rnode)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
if (rnode != &pmctx->ip6_rlist)
|
|
|
|
return hlist_unhashed(&pmctx->ip6_rlist);
|
2021-05-13 13:20:51 +00:00
|
|
|
else
|
2021-07-19 17:06:25 +00:00
|
|
|
return hlist_unhashed(&pmctx->ip4_rlist);
|
2021-05-13 13:20:51 +00:00
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:49 +00:00
|
|
|
/* Add port to router_list
|
2010-04-27 15:01:04 +00:00
|
|
|
* list is maintained ordered by pointer value
|
|
|
|
* and locked by br->multicast_lock and RCU
|
|
|
|
*/
|
2021-07-19 17:06:24 +00:00
|
|
|
static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2021-05-13 13:20:49 +00:00
|
|
|
struct hlist_node *rlist,
|
|
|
|
struct hlist_head *mc_router_list)
|
2010-02-27 19:41:49 +00:00
|
|
|
{
|
2021-05-13 13:20:49 +00:00
|
|
|
struct hlist_node *slot;
|
2010-04-27 07:13:11 +00:00
|
|
|
|
2021-05-13 13:20:49 +00:00
|
|
|
if (!hlist_unhashed(rlist))
|
2015-06-09 17:23:57 +00:00
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
|
2010-04-27 07:13:11 +00:00
|
|
|
|
2010-04-27 15:01:04 +00:00
|
|
|
if (slot)
|
2021-05-13 13:20:49 +00:00
|
|
|
hlist_add_behind_rcu(rlist, slot);
|
2010-04-27 07:13:11 +00:00
|
|
|
else
|
2021-05-13 13:20:49 +00:00
|
|
|
hlist_add_head_rcu(rlist, mc_router_list);
|
|
|
|
|
2021-05-13 13:20:51 +00:00
|
|
|
/* For backwards compatibility for now, only notify if we
|
|
|
|
* switched from no IPv4/IPv6 multicast router to a new
|
|
|
|
* IPv4 or IPv6 multicast router.
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
if (br_multicast_no_router_otherpf(pmctx, rlist)) {
|
2021-07-19 17:06:33 +00:00
|
|
|
br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
|
2021-07-19 17:06:25 +00:00
|
|
|
br_port_mc_router_state_change(pmctx->port, true);
|
2021-05-13 13:20:51 +00:00
|
|
|
}
|
2010-02-27 19:41:49 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:49 +00:00
|
|
|
/* Add port to router_list
|
|
|
|
* list is maintained ordered by pointer value
|
|
|
|
* and locked by br->multicast_lock and RCU
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx)
|
2021-05-13 13:20:49 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
|
|
|
|
&brmctx->ip4_mc_router_list);
|
2021-05-13 13:20:49 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:51 +00:00
|
|
|
/* Add port to router_list
|
|
|
|
* list is maintained ordered by pointer value
|
|
|
|
* and locked by br->multicast_lock and RCU
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx)
|
2021-05-13 13:20:51 +00:00
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
|
|
|
|
&brmctx->ip6_mc_router_list);
|
2021-05-13 13:20:51 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2021-05-13 13:20:49 +00:00
|
|
|
struct timer_list *timer,
|
|
|
|
struct hlist_node *rlist,
|
|
|
|
struct hlist_head *mc_router_list)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx))
|
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!pmctx) {
|
2021-07-19 17:06:24 +00:00
|
|
|
if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
|
|
|
|
if (!br_ip4_multicast_is_router(brmctx) &&
|
|
|
|
!br_ip6_multicast_is_router(brmctx))
|
2021-07-19 17:06:25 +00:00
|
|
|
br_mc_router_state_change(brmctx->br, true);
|
2021-07-19 17:06:24 +00:00
|
|
|
mod_timer(timer, now + brmctx->multicast_querier_interval);
|
2017-10-09 09:15:31 +00:00
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
|
|
|
|
pmctx->multicast_router == MDB_RTR_TYPE_PERM)
|
2010-02-27 19:41:45 +00:00
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
|
2021-07-19 17:06:24 +00:00
|
|
|
mod_timer(timer, now + brmctx->multicast_querier_interval);
|
2021-05-13 13:20:49 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx)
|
2021-05-13 13:20:49 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
struct timer_list *timer = &brmctx->ip4_mc_router_timer;
|
2021-05-13 13:20:49 +00:00
|
|
|
struct hlist_node *rlist = NULL;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx) {
|
|
|
|
timer = &pmctx->ip4_mc_router_timer;
|
|
|
|
rlist = &pmctx->ip4_rlist;
|
2021-05-13 13:20:49 +00:00
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_mark_router(brmctx, pmctx, timer, rlist,
|
|
|
|
&brmctx->ip4_mc_router_list);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx)
|
2021-05-13 13:20:51 +00:00
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
struct timer_list *timer = &brmctx->ip6_mc_router_timer;
|
2021-05-13 13:20:51 +00:00
|
|
|
struct hlist_node *rlist = NULL;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx) {
|
|
|
|
timer = &pmctx->ip6_mc_router_timer;
|
|
|
|
rlist = &pmctx->ip6_rlist;
|
2021-05-13 13:20:51 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_mark_router(brmctx, pmctx, timer, rlist,
|
|
|
|
&brmctx->ip6_mc_router_list);
|
2021-05-13 13:20:51 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:46 +00:00
|
|
|
static void
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2021-05-13 13:20:46 +00:00
|
|
|
struct bridge_mcast_other_query *query,
|
|
|
|
struct br_ip *saddr,
|
|
|
|
unsigned long max_delay)
|
|
|
|
{
|
2021-08-13 14:59:59 +00:00
|
|
|
if (!br_multicast_select_querier(brmctx, pmctx, saddr))
|
2021-05-13 13:20:46 +00:00
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_update_query_timer(brmctx, query, max_delay);
|
|
|
|
br_ip4_multicast_mark_router(brmctx, pmctx);
|
2021-05-13 13:20:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static void
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2021-05-13 13:20:46 +00:00
|
|
|
struct bridge_mcast_other_query *query,
|
|
|
|
struct br_ip *saddr,
|
|
|
|
unsigned long max_delay)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-08-13 14:59:59 +00:00
|
|
|
if (!br_multicast_select_querier(brmctx, pmctx, saddr))
|
2010-02-27 19:41:45 +00:00
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_update_query_timer(brmctx, query, max_delay);
|
|
|
|
br_ip6_multicast_mark_router(brmctx, pmctx);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
2021-05-13 13:20:46 +00:00
|
|
|
#endif
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2018-08-06 03:07:23 +00:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2019-01-21 06:26:25 +00:00
|
|
|
unsigned int transport_len = ip_transport_len(skb);
|
2011-04-22 04:53:02 +00:00
|
|
|
const struct iphdr *iph = ip_hdr(skb);
|
2010-02-27 19:41:45 +00:00
|
|
|
struct igmphdr *ih = igmp_hdr(skb);
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct igmpv3_query *ih3;
|
|
|
|
struct net_bridge_port_group *p;
|
2010-11-15 06:38:10 +00:00
|
|
|
struct net_bridge_port_group __rcu **pp;
|
2021-08-13 14:59:58 +00:00
|
|
|
struct br_ip saddr = {};
|
2010-02-27 19:41:45 +00:00
|
|
|
unsigned long max_delay;
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
__be32 group;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx))
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
group = ih->group;
|
|
|
|
|
2019-01-21 06:26:25 +00:00
|
|
|
if (transport_len == sizeof(*ih)) {
|
2010-02-27 19:41:45 +00:00
|
|
|
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
|
|
|
|
|
|
|
|
if (!max_delay) {
|
|
|
|
max_delay = 10 * HZ;
|
|
|
|
group = 0;
|
|
|
|
}
|
2019-01-21 06:26:25 +00:00
|
|
|
} else if (transport_len >= sizeof(*ih3)) {
|
2010-02-27 19:41:45 +00:00
|
|
|
ih3 = igmpv3_query_hdr(skb);
|
2020-09-07 09:56:18 +00:00
|
|
|
if (ih3->nsrcs ||
|
2021-07-19 17:06:24 +00:00
|
|
|
(brmctx->multicast_igmp_version == 3 && group &&
|
|
|
|
ih3->suppress))
|
2010-03-13 20:27:21 +00:00
|
|
|
goto out;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2010-03-15 19:27:00 +00:00
|
|
|
max_delay = ih3->code ?
|
|
|
|
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
|
2015-05-02 12:01:07 +00:00
|
|
|
} else {
|
2014-03-10 21:25:24 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-06-07 16:26:27 +00:00
|
|
|
if (!group) {
|
|
|
|
saddr.proto = htons(ETH_P_IP);
|
2020-09-22 07:30:16 +00:00
|
|
|
saddr.src.ip4 = iph->saddr;
|
2013-07-31 23:06:20 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip4_multicast_query_received(brmctx, pmctx,
|
2021-07-19 17:06:24 +00:00
|
|
|
&brmctx->ip4_other_query,
|
2021-05-13 13:20:46 +00:00
|
|
|
&saddr, max_delay);
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
2014-06-07 16:26:27 +00:00
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
mp = br_mdb_ip4_get(brmctx->br, group, vid);
|
2010-02-27 19:41:45 +00:00
|
|
|
if (!mp)
|
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
max_delay *= brmctx->multicast_last_member_count;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2017-11-09 22:10:57 +00:00
|
|
|
if (mp->host_joined &&
|
2010-02-27 19:41:45 +00:00
|
|
|
(timer_pending(&mp->timer) ?
|
|
|
|
time_after(mp->timer.expires, now + max_delay) :
|
|
|
|
try_to_del_timer_sync(&mp->timer) >= 0))
|
|
|
|
mod_timer(&mp->timer, now + max_delay);
|
|
|
|
|
2010-11-15 06:38:10 +00:00
|
|
|
for (pp = &mp->ports;
|
2021-07-19 17:06:25 +00:00
|
|
|
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
|
2010-11-15 06:38:10 +00:00
|
|
|
pp = &p->next) {
|
2010-02-27 19:41:45 +00:00
|
|
|
if (timer_pending(&p->timer) ?
|
|
|
|
time_after(p->timer.expires, now + max_delay) :
|
2020-09-07 09:56:18 +00:00
|
|
|
try_to_del_timer_sync(&p->timer) >= 0 &&
|
2021-07-19 17:06:24 +00:00
|
|
|
(brmctx->multicast_igmp_version == 2 ||
|
2020-09-07 09:56:18 +00:00
|
|
|
p->filter_mode == MCAST_EXCLUDE))
|
2011-02-11 12:42:07 +00:00
|
|
|
mod_timer(&p->timer, now + max_delay);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-10-28 19:45:07 +00:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-04-22 16:54:22 +00:00
|
|
|
{
|
2019-01-21 06:26:25 +00:00
|
|
|
unsigned int transport_len = ipv6_transport_len(skb);
|
2012-12-13 06:51:28 +00:00
|
|
|
struct mld_msg *mld;
|
2010-04-22 16:54:22 +00:00
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct mld2_query *mld2q;
|
2010-11-15 06:38:10 +00:00
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
struct net_bridge_port_group __rcu **pp;
|
2021-08-13 14:59:58 +00:00
|
|
|
struct br_ip saddr = {};
|
2010-04-22 16:54:22 +00:00
|
|
|
unsigned long max_delay;
|
|
|
|
unsigned long now = jiffies;
|
2016-05-04 15:25:02 +00:00
|
|
|
unsigned int offset = skb_transport_offset(skb);
|
2011-04-22 04:53:02 +00:00
|
|
|
const struct in6_addr *group = NULL;
|
2014-03-10 21:25:24 +00:00
|
|
|
bool is_general_query;
|
2010-04-22 16:54:22 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx))
|
2010-04-22 16:54:22 +00:00
|
|
|
goto out;
|
|
|
|
|
2019-01-21 06:26:25 +00:00
|
|
|
if (transport_len == sizeof(*mld)) {
|
2016-05-04 15:25:02 +00:00
|
|
|
if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
|
2010-04-22 16:54:22 +00:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
mld = (struct mld_msg *) icmp6_hdr(skb);
|
2012-07-09 23:56:12 +00:00
|
|
|
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
|
2010-04-22 16:54:22 +00:00
|
|
|
if (max_delay)
|
|
|
|
group = &mld->mld_mca;
|
2013-08-05 22:32:05 +00:00
|
|
|
} else {
|
2016-05-04 15:25:02 +00:00
|
|
|
if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
|
2010-04-22 16:54:22 +00:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
mld2q = (struct mld2_query *)icmp6_hdr(skb);
|
|
|
|
if (!mld2q->mld2q_nsrcs)
|
|
|
|
group = &mld2q->mld2q_mca;
|
2021-07-19 17:06:24 +00:00
|
|
|
if (brmctx->multicast_mld_version == 2 &&
|
2020-09-07 09:56:18 +00:00
|
|
|
!ipv6_addr_any(&mld2q->mld2q_mca) &&
|
|
|
|
mld2q->mld2q_suppress)
|
|
|
|
goto out;
|
2013-09-03 22:19:39 +00:00
|
|
|
|
|
|
|
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
|
2014-03-10 21:25:24 +00:00
|
|
|
is_general_query = group && ipv6_addr_any(group);
|
|
|
|
|
2014-06-07 16:26:27 +00:00
|
|
|
if (is_general_query) {
|
|
|
|
saddr.proto = htons(ETH_P_IPV6);
|
2020-09-22 07:30:16 +00:00
|
|
|
saddr.src.ip6 = ipv6_hdr(skb)->saddr;
|
2013-07-31 23:06:20 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip6_multicast_query_received(brmctx, pmctx,
|
2021-07-19 17:06:24 +00:00
|
|
|
&brmctx->ip6_other_query,
|
2021-05-13 13:20:46 +00:00
|
|
|
&saddr, max_delay);
|
2010-04-22 16:54:22 +00:00
|
|
|
goto out;
|
2014-06-11 23:41:23 +00:00
|
|
|
} else if (!group) {
|
|
|
|
goto out;
|
2014-06-07 16:26:27 +00:00
|
|
|
}
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
mp = br_mdb_ip6_get(brmctx->br, group, vid);
|
2010-04-22 16:54:22 +00:00
|
|
|
if (!mp)
|
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
max_delay *= brmctx->multicast_last_member_count;
|
2017-11-09 22:10:57 +00:00
|
|
|
if (mp->host_joined &&
|
2010-04-22 16:54:22 +00:00
|
|
|
(timer_pending(&mp->timer) ?
|
|
|
|
time_after(mp->timer.expires, now + max_delay) :
|
|
|
|
try_to_del_timer_sync(&mp->timer) >= 0))
|
|
|
|
mod_timer(&mp->timer, now + max_delay);
|
|
|
|
|
2010-11-15 06:38:10 +00:00
|
|
|
for (pp = &mp->ports;
|
2021-07-19 17:06:25 +00:00
|
|
|
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
|
2010-11-15 06:38:10 +00:00
|
|
|
pp = &p->next) {
|
2010-04-22 16:54:22 +00:00
|
|
|
if (timer_pending(&p->timer) ?
|
|
|
|
time_after(p->timer.expires, now + max_delay) :
|
2020-09-07 09:56:18 +00:00
|
|
|
try_to_del_timer_sync(&p->timer) >= 0 &&
|
2021-07-19 17:06:24 +00:00
|
|
|
(brmctx->multicast_mld_version == 1 ||
|
2020-09-07 09:56:18 +00:00
|
|
|
p->filter_mode == MCAST_EXCLUDE))
|
2011-02-11 12:42:07 +00:00
|
|
|
mod_timer(&p->timer, now + max_delay);
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2010-04-22 16:54:22 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-07 16:26:26 +00:00
|
|
|
static void
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_leave_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2014-06-07 16:26:26 +00:00
|
|
|
struct br_ip *group,
|
|
|
|
struct bridge_mcast_other_query *other_query,
|
2017-01-21 20:01:32 +00:00
|
|
|
struct bridge_mcast_own_query *own_query,
|
|
|
|
const unsigned char *src)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
unsigned long now;
|
|
|
|
unsigned long time;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
2021-07-19 17:06:31 +00:00
|
|
|
if (!br_multicast_ctx_should_use(brmctx, pmctx))
|
2010-02-27 19:41:45 +00:00
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
mp = br_mdb_ip_get(brmctx->br, group);
|
2010-02-27 19:41:45 +00:00
|
|
|
if (!mp)
|
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
|
2015-07-28 10:28:27 +00:00
|
|
|
struct net_bridge_port_group __rcu **pp;
|
|
|
|
|
|
|
|
for (pp = &mp->ports;
|
2021-07-19 17:06:25 +00:00
|
|
|
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
|
2015-07-28 10:28:27 +00:00
|
|
|
pp = &p->next) {
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!br_port_group_equal(p, pmctx->port, src))
|
2015-07-28 10:28:27 +00:00
|
|
|
continue;
|
|
|
|
|
2019-07-30 11:21:00 +00:00
|
|
|
if (p->flags & MDB_PG_FLAGS_PERMANENT)
|
|
|
|
break;
|
|
|
|
|
2020-09-07 09:56:06 +00:00
|
|
|
p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
|
|
|
|
br_multicast_del_pg(mp, p, pp);
|
2015-07-28 10:28:27 +00:00
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timer_pending(&other_query->timer))
|
|
|
|
goto out;
|
|
|
|
|
2021-08-10 15:29:28 +00:00
|
|
|
if (brmctx->multicast_querier) {
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
|
2020-09-07 09:56:10 +00:00
|
|
|
false, 0, NULL);
|
2013-05-21 21:52:56 +00:00
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
time = jiffies + brmctx->multicast_last_member_count *
|
|
|
|
brmctx->multicast_last_member_interval;
|
2013-08-30 15:28:17 +00:00
|
|
|
|
2014-06-07 16:26:26 +00:00
|
|
|
mod_timer(&own_query->timer, time);
|
2013-05-21 21:52:56 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
for (p = mlock_dereference(mp->ports, brmctx->br);
|
2021-07-21 10:06:24 +00:00
|
|
|
p != NULL && pmctx != NULL;
|
2021-07-19 17:06:25 +00:00
|
|
|
p = mlock_dereference(p->next, brmctx->br)) {
|
|
|
|
if (!br_port_group_equal(p, pmctx->port, src))
|
2013-05-21 21:52:56 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!hlist_unhashed(&p->mglist) &&
|
|
|
|
(timer_pending(&p->timer) ?
|
|
|
|
time_after(p->timer.expires, time) :
|
|
|
|
try_to_del_timer_sync(&p->timer) >= 0)) {
|
|
|
|
mod_timer(&p->timer, time);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-27 19:41:45 +00:00
|
|
|
now = jiffies;
|
2021-07-19 17:06:24 +00:00
|
|
|
time = now + brmctx->multicast_last_member_count *
|
|
|
|
brmctx->multicast_last_member_interval;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!pmctx) {
|
2017-11-09 22:10:57 +00:00
|
|
|
if (mp->host_joined &&
|
2010-02-27 19:41:45 +00:00
|
|
|
(timer_pending(&mp->timer) ?
|
|
|
|
time_after(mp->timer.expires, time) :
|
|
|
|
try_to_del_timer_sync(&mp->timer) >= 0)) {
|
|
|
|
mod_timer(&mp->timer, time);
|
|
|
|
}
|
2013-10-19 22:58:57 +00:00
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
for (p = mlock_dereference(mp->ports, brmctx->br);
|
2013-10-19 22:58:57 +00:00
|
|
|
p != NULL;
|
2021-07-19 17:06:25 +00:00
|
|
|
p = mlock_dereference(p->next, brmctx->br)) {
|
|
|
|
if (p->key.port != pmctx->port)
|
2013-10-19 22:58:57 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!hlist_unhashed(&p->mglist) &&
|
|
|
|
(timer_pending(&p->timer) ?
|
|
|
|
time_after(p->timer.expires, time) :
|
|
|
|
try_to_del_timer_sync(&p->timer) >= 0)) {
|
|
|
|
mod_timer(&p->timer, time);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
out:
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-02-13 12:00:17 +00:00
|
|
|
__be32 group,
|
2017-01-21 20:01:32 +00:00
|
|
|
__u16 vid,
|
|
|
|
const unsigned char *src)
|
2010-04-18 03:42:07 +00:00
|
|
|
{
|
|
|
|
struct br_ip br_group;
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_own_query *own_query;
|
2010-04-18 03:42:07 +00:00
|
|
|
|
|
|
|
if (ipv4_is_local_multicast(group))
|
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
|
2014-06-07 16:26:26 +00:00
|
|
|
|
2019-04-03 20:27:24 +00:00
|
|
|
memset(&br_group, 0, sizeof(br_group));
|
2020-09-22 07:30:17 +00:00
|
|
|
br_group.dst.ip4 = group;
|
2010-04-18 03:42:07 +00:00
|
|
|
br_group.proto = htons(ETH_P_IP);
|
2013-02-13 12:00:17 +00:00
|
|
|
br_group.vid = vid;
|
2010-04-18 03:42:07 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_leave_group(brmctx, pmctx, &br_group,
|
|
|
|
&brmctx->ip4_other_query,
|
2017-01-21 20:01:32 +00:00
|
|
|
own_query, src);
|
2010-04-18 03:42:07 +00:00
|
|
|
}
|
|
|
|
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-02-13 12:00:17 +00:00
|
|
|
const struct in6_addr *group,
|
2017-01-21 20:01:32 +00:00
|
|
|
__u16 vid,
|
|
|
|
const unsigned char *src)
|
2010-04-22 16:54:22 +00:00
|
|
|
{
|
|
|
|
struct br_ip br_group;
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_own_query *own_query;
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2013-09-04 00:13:39 +00:00
|
|
|
if (ipv6_addr_is_ll_all_nodes(group))
|
2010-04-22 16:54:22 +00:00
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
|
2014-06-07 16:26:26 +00:00
|
|
|
|
2019-04-03 20:27:24 +00:00
|
|
|
memset(&br_group, 0, sizeof(br_group));
|
2020-09-22 07:30:17 +00:00
|
|
|
br_group.dst.ip6 = *group;
|
2010-04-22 16:54:22 +00:00
|
|
|
br_group.proto = htons(ETH_P_IPV6);
|
2013-02-13 12:00:17 +00:00
|
|
|
br_group.vid = vid;
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_leave_group(brmctx, pmctx, &br_group,
|
|
|
|
&brmctx->ip6_other_query,
|
2017-01-21 20:01:32 +00:00
|
|
|
own_query, src);
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
#endif
|
2010-04-18 03:42:07 +00:00
|
|
|
|
2016-06-28 14:57:06 +00:00
|
|
|
static void br_multicast_err_count(const struct net_bridge *br,
|
|
|
|
const struct net_bridge_port *p,
|
|
|
|
__be16 proto)
|
|
|
|
{
|
|
|
|
struct bridge_mcast_stats __percpu *stats;
|
|
|
|
struct bridge_mcast_stats *pstats;
|
|
|
|
|
2018-09-26 14:01:04 +00:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
|
2016-06-28 14:57:06 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (p)
|
|
|
|
stats = p->mcast_stats;
|
|
|
|
else
|
|
|
|
stats = br->mcast_stats;
|
|
|
|
if (WARN_ON(!stats))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pstats = this_cpu_ptr(stats);
|
|
|
|
|
|
|
|
u64_stats_update_begin(&pstats->syncp);
|
|
|
|
switch (proto) {
|
|
|
|
case htons(ETH_P_IP):
|
|
|
|
pstats->mstats.igmp_parse_errors++;
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
pstats->mstats.mld_parse_errors++;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
u64_stats_update_end(&pstats->syncp);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_multicast_pim(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2016-10-31 12:21:05 +00:00
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
unsigned int offset = skb_transport_offset(skb);
|
|
|
|
struct pimhdr *pimhdr, _pimhdr;
|
|
|
|
|
|
|
|
pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
|
|
|
|
if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
|
|
|
|
pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
|
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
|
|
|
br_ip4_multicast_mark_router(brmctx, pmctx);
|
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2016-10-31 12:21:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2019-01-21 06:26:28 +00:00
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
|
|
|
|
igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
|
|
|
|
return -ENOMSG;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
|
|
|
br_ip4_multicast_mark_router(brmctx, pmctx);
|
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2019-01-21 06:26:28 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-10-28 19:45:07 +00:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
|
2017-01-21 20:01:32 +00:00
|
|
|
const unsigned char *src;
|
2010-02-27 19:41:45 +00:00
|
|
|
struct igmphdr *ih;
|
|
|
|
int err;
|
|
|
|
|
2019-01-21 06:26:25 +00:00
|
|
|
err = ip_mc_check_igmp(skb);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2015-05-02 12:01:07 +00:00
|
|
|
if (err == -ENOMSG) {
|
2016-10-31 12:21:05 +00:00
|
|
|
if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
|
2011-06-23 02:39:12 +00:00
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
|
2016-10-31 12:21:05 +00:00
|
|
|
} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
|
|
|
|
if (ip_hdr(skb)->protocol == IPPROTO_PIM)
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_pim(brmctx, pmctx, skb);
|
2019-01-21 06:26:28 +00:00
|
|
|
} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
|
2016-10-31 12:21:05 +00:00
|
|
|
}
|
2019-01-21 06:26:28 +00:00
|
|
|
|
2010-02-27 19:41:45 +00:00
|
|
|
return 0;
|
2015-05-02 12:01:07 +00:00
|
|
|
} else if (err < 0) {
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_err_count(brmctx->br, p, skb->protocol);
|
2015-05-02 12:01:07 +00:00
|
|
|
return err;
|
2011-06-23 02:39:12 +00:00
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2015-05-02 12:01:07 +00:00
|
|
|
ih = igmp_hdr(skb);
|
2017-01-21 20:01:32 +00:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2016-06-28 14:57:06 +00:00
|
|
|
BR_INPUT_SKB_CB(skb)->igmp = ih->type;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
switch (ih->type) {
|
|
|
|
case IGMP_HOST_MEMBERSHIP_REPORT:
|
|
|
|
case IGMPV2_HOST_MEMBERSHIP_REPORT:
|
2011-06-13 15:04:43 +00:00
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
|
2021-07-19 17:06:25 +00:00
|
|
|
err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
|
|
|
|
src, true);
|
2010-02-27 19:41:45 +00:00
|
|
|
break;
|
|
|
|
case IGMPV3_HOST_MEMBERSHIP_REPORT:
|
2021-07-19 17:06:25 +00:00
|
|
|
err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
|
2010-02-27 19:41:45 +00:00
|
|
|
break;
|
|
|
|
case IGMP_HOST_MEMBERSHIP_QUERY:
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip4_multicast_query(brmctx, pmctx, skb, vid);
|
2010-02-27 19:41:45 +00:00
|
|
|
break;
|
|
|
|
case IGMP_HOST_LEAVE_MESSAGE:
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
|
2010-02-27 19:41:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
|
2016-06-28 14:57:06 +00:00
|
|
|
BR_MCAST_DIR_RX);
|
|
|
|
|
2010-02-27 19:41:45 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2021-04-25 15:27:35 +00:00
|
|
|
struct sk_buff *skb)
|
2019-01-21 06:26:28 +00:00
|
|
|
{
|
|
|
|
if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
|
2021-04-25 15:27:35 +00:00
|
|
|
return;
|
2019-01-21 06:26:28 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
|
|
|
br_ip6_multicast_mark_router(brmctx, pmctx);
|
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2019-01-21 06:26:28 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
|
|
|
|
struct net_bridge_mcast_port *pmctx,
|
2013-10-28 19:45:07 +00:00
|
|
|
struct sk_buff *skb,
|
|
|
|
u16 vid)
|
2010-04-22 16:54:22 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
|
2017-01-21 20:01:32 +00:00
|
|
|
const unsigned char *src;
|
2015-05-02 12:01:07 +00:00
|
|
|
struct mld_msg *mld;
|
2010-04-22 16:54:22 +00:00
|
|
|
int err;
|
|
|
|
|
2019-01-21 06:26:25 +00:00
|
|
|
err = ipv6_mc_check_mld(skb);
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2021-04-25 15:27:35 +00:00
|
|
|
if (err == -ENOMSG || err == -ENODATA) {
|
2015-05-02 12:01:07 +00:00
|
|
|
if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
|
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
|
2021-04-25 15:27:35 +00:00
|
|
|
if (err == -ENODATA &&
|
|
|
|
ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
|
2019-01-21 06:26:28 +00:00
|
|
|
|
2010-04-22 16:54:22 +00:00
|
|
|
return 0;
|
2015-05-02 12:01:07 +00:00
|
|
|
} else if (err < 0) {
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_err_count(brmctx->br, p, skb->protocol);
|
2015-05-02 12:01:07 +00:00
|
|
|
return err;
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
|
2015-05-02 12:01:07 +00:00
|
|
|
mld = (struct mld_msg *)skb_transport_header(skb);
|
2016-06-28 14:57:06 +00:00
|
|
|
BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
|
2010-04-22 16:54:22 +00:00
|
|
|
|
2015-05-02 12:01:07 +00:00
|
|
|
switch (mld->mld_type) {
|
2010-04-22 16:54:22 +00:00
|
|
|
case ICMPV6_MGM_REPORT:
|
2017-01-21 20:01:32 +00:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2011-06-13 15:06:58 +00:00
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
|
2021-07-19 17:06:25 +00:00
|
|
|
err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
|
|
|
|
vid, src, true);
|
2010-04-22 16:54:22 +00:00
|
|
|
break;
|
|
|
|
case ICMPV6_MLD2_REPORT:
|
2021-07-19 17:06:25 +00:00
|
|
|
err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
|
2010-04-22 16:54:22 +00:00
|
|
|
break;
|
|
|
|
case ICMPV6_MGM_QUERY:
|
2021-07-19 17:06:25 +00:00
|
|
|
err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
|
2010-04-22 16:54:22 +00:00
|
|
|
break;
|
|
|
|
case ICMPV6_MGM_REDUCTION:
|
2017-01-21 20:01:32 +00:00
|
|
|
src = eth_hdr(skb)->h_source;
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
|
|
|
|
src);
|
2015-05-02 12:01:07 +00:00
|
|
|
break;
|
2010-04-22 16:54:22 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
|
2016-06-28 14:57:06 +00:00
|
|
|
BR_MCAST_DIR_RX);
|
|
|
|
|
2010-04-22 16:54:22 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-07-19 17:06:28 +00:00
|
|
|
int br_multicast_rcv(struct net_bridge_mcast **brmctx,
|
|
|
|
struct net_bridge_mcast_port **pmctx,
|
|
|
|
struct net_bridge_vlan *vlan,
|
2013-10-28 19:45:07 +00:00
|
|
|
struct sk_buff *skb, u16 vid)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2016-06-28 14:57:06 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2010-04-25 08:06:40 +00:00
|
|
|
BR_INPUT_SKB_CB(skb)->igmp = 0;
|
|
|
|
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
|
|
|
|
|
2021-07-19 17:06:28 +00:00
|
|
|
if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
|
2010-02-27 19:41:45 +00:00
|
|
|
return 0;
|
|
|
|
|
2021-07-19 17:06:28 +00:00
|
|
|
if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
|
|
|
|
const struct net_bridge_vlan *masterv;
|
|
|
|
|
|
|
|
/* the vlan has the master flag set only when transmitting
|
|
|
|
* through the bridge device
|
|
|
|
*/
|
|
|
|
if (br_vlan_is_master(vlan)) {
|
|
|
|
masterv = vlan;
|
|
|
|
*brmctx = &vlan->br_mcast_ctx;
|
|
|
|
*pmctx = NULL;
|
|
|
|
} else {
|
|
|
|
masterv = vlan->brvlan;
|
|
|
|
*brmctx = &vlan->brvlan->br_mcast_ctx;
|
|
|
|
*pmctx = &vlan->port_mcast_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-27 19:41:45 +00:00
|
|
|
switch (skb->protocol) {
|
|
|
|
case htons(ETH_P_IP):
|
2021-07-19 17:06:28 +00:00
|
|
|
ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
|
2016-06-28 14:57:06 +00:00
|
|
|
break;
|
2011-12-10 09:48:31 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2010-04-22 16:54:22 +00:00
|
|
|
case htons(ETH_P_IPV6):
|
2021-07-19 17:06:28 +00:00
|
|
|
ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
|
2016-06-28 14:57:06 +00:00
|
|
|
break;
|
2010-04-22 16:54:22 +00:00
|
|
|
#endif
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2016-06-28 14:57:06 +00:00
|
|
|
return ret;
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
|
2014-06-07 16:26:29 +00:00
|
|
|
struct bridge_mcast_own_query *query,
|
|
|
|
struct bridge_mcast_querier *querier)
|
2013-08-30 15:28:17 +00:00
|
|
|
{
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_lock(&brmctx->br->multicast_lock);
|
2021-07-19 17:06:31 +00:00
|
|
|
if (br_multicast_ctx_vlan_disabled(brmctx))
|
|
|
|
goto out;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (query->startup_sent < brmctx->multicast_startup_query_count)
|
2013-08-30 15:28:17 +00:00
|
|
|
query->startup_sent++;
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_send_query(brmctx, NULL, query);
|
2021-07-19 17:06:31 +00:00
|
|
|
out:
|
2021-07-19 17:06:25 +00:00
|
|
|
spin_unlock(&brmctx->br->multicast_lock);
|
2013-08-30 15:28:17 +00:00
|
|
|
}
|
|
|
|
|
2017-11-03 06:21:10 +00:00
|
|
|
static void br_ip4_multicast_query_expired(struct timer_list *t)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
|
|
|
|
ip4_own_query.timer);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
|
2021-07-19 17:06:24 +00:00
|
|
|
&brmctx->ip4_querier);
|
2013-08-30 15:28:17 +00:00
|
|
|
}
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2017-11-03 06:21:10 +00:00
|
|
|
static void br_ip6_multicast_query_expired(struct timer_list *t)
|
2013-08-30 15:28:17 +00:00
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
|
|
|
|
ip6_own_query.timer);
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
|
2021-07-19 17:06:24 +00:00
|
|
|
&brmctx->ip6_querier);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2020-09-07 09:56:19 +00:00
|
|
|
static void br_multicast_gc_work(struct work_struct *work)
|
2020-09-07 09:56:07 +00:00
|
|
|
{
|
|
|
|
struct net_bridge *br = container_of(work, struct net_bridge,
|
2020-09-07 09:56:19 +00:00
|
|
|
mcast_gc_work);
|
2020-09-07 09:56:07 +00:00
|
|
|
HLIST_HEAD(deleted_head);
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2020-09-07 09:56:19 +00:00
|
|
|
hlist_move_list(&br->mcast_gc_list, &deleted_head);
|
2020-09-07 09:56:07 +00:00
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
|
2020-09-07 09:56:19 +00:00
|
|
|
br_multicast_gc(&deleted_head);
|
2020-09-07 09:56:07 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:26 +00:00
|
|
|
void br_multicast_ctx_init(struct net_bridge *br,
|
|
|
|
struct net_bridge_vlan *vlan,
|
|
|
|
struct net_bridge_mcast *brmctx)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:26 +00:00
|
|
|
brmctx->br = br;
|
|
|
|
brmctx->vlan = vlan;
|
|
|
|
brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
|
|
|
|
brmctx->multicast_last_member_count = 2;
|
|
|
|
brmctx->multicast_startup_query_count = 2;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2021-07-19 17:06:26 +00:00
|
|
|
brmctx->multicast_last_member_interval = HZ;
|
|
|
|
brmctx->multicast_query_response_interval = 10 * HZ;
|
|
|
|
brmctx->multicast_startup_query_interval = 125 * HZ / 4;
|
|
|
|
brmctx->multicast_query_interval = 125 * HZ;
|
|
|
|
brmctx->multicast_querier_interval = 255 * HZ;
|
|
|
|
brmctx->multicast_membership_interval = 260 * HZ;
|
|
|
|
|
|
|
|
brmctx->ip4_other_query.delay_time = 0;
|
2021-08-13 14:59:57 +00:00
|
|
|
brmctx->ip4_querier.port_ifidx = 0;
|
2021-09-28 14:10:49 +00:00
|
|
|
seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
|
2021-07-19 17:06:26 +00:00
|
|
|
brmctx->multicast_igmp_version = 2;
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:26 +00:00
|
|
|
brmctx->multicast_mld_version = 1;
|
|
|
|
brmctx->ip6_other_query.delay_time = 0;
|
2021-08-13 14:59:57 +00:00
|
|
|
brmctx->ip6_querier.port_ifidx = 0;
|
2021-09-28 14:10:49 +00:00
|
|
|
seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2013-07-31 23:06:20 +00:00
|
|
|
|
2021-07-19 17:06:26 +00:00
|
|
|
timer_setup(&brmctx->ip4_mc_router_timer,
|
2021-05-13 13:20:48 +00:00
|
|
|
br_ip4_multicast_local_router_expired, 0);
|
2021-07-19 17:06:26 +00:00
|
|
|
timer_setup(&brmctx->ip4_other_query.timer,
|
2017-11-03 06:21:10 +00:00
|
|
|
br_ip4_multicast_querier_expired, 0);
|
2021-07-19 17:06:26 +00:00
|
|
|
timer_setup(&brmctx->ip4_own_query.timer,
|
2017-11-03 06:21:10 +00:00
|
|
|
br_ip4_multicast_query_expired, 0);
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:26 +00:00
|
|
|
timer_setup(&brmctx->ip6_mc_router_timer,
|
2021-05-13 13:20:51 +00:00
|
|
|
br_ip6_multicast_local_router_expired, 0);
|
2021-07-19 17:06:26 +00:00
|
|
|
timer_setup(&brmctx->ip6_other_query.timer,
|
2017-11-03 06:21:10 +00:00
|
|
|
br_ip6_multicast_querier_expired, 0);
|
2021-07-19 17:06:26 +00:00
|
|
|
timer_setup(&brmctx->ip6_own_query.timer,
|
2017-11-03 06:21:10 +00:00
|
|
|
br_ip6_multicast_query_expired, 0);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2021-07-19 17:06:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
|
|
|
|
{
|
|
|
|
__br_multicast_stop(brmctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_init(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
|
|
|
|
|
|
|
|
br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
|
|
|
|
|
|
|
|
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
|
|
|
|
br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
|
|
|
|
|
|
|
|
spin_lock_init(&br->multicast_lock);
|
2018-12-05 13:14:24 +00:00
|
|
|
INIT_HLIST_HEAD(&br->mdb_list);
|
2020-09-07 09:56:19 +00:00
|
|
|
INIT_HLIST_HEAD(&br->mcast_gc_list);
|
|
|
|
INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2019-01-21 06:26:27 +00:00
|
|
|
static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct in_device *in_dev = in_dev_get(br->dev);
|
|
|
|
|
|
|
|
if (!in_dev)
|
|
|
|
return;
|
|
|
|
|
2019-02-02 04:20:52 +00:00
|
|
|
__ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
|
2019-01-21 06:26:27 +00:00
|
|
|
in_dev_put(in_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct in6_addr addr;
|
|
|
|
|
|
|
|
ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
|
|
|
|
ipv6_dev_mc_inc(br->dev, &addr);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-12-04 23:56:28 +00:00
|
|
|
void br_multicast_join_snoopers(struct net_bridge *br)
|
2019-01-21 06:26:27 +00:00
|
|
|
{
|
|
|
|
br_ip4_multicast_join_snoopers(br);
|
|
|
|
br_ip6_multicast_join_snoopers(br);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct in_device *in_dev = in_dev_get(br->dev);
|
|
|
|
|
|
|
|
if (WARN_ON(!in_dev))
|
|
|
|
return;
|
|
|
|
|
2019-02-02 04:20:52 +00:00
|
|
|
__ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
|
2019-01-21 06:26:27 +00:00
|
|
|
in_dev_put(in_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct in6_addr addr;
|
|
|
|
|
|
|
|
ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
|
|
|
|
ipv6_dev_mc_dec(br->dev, &addr);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-12-04 23:56:28 +00:00
|
|
|
void br_multicast_leave_snoopers(struct net_bridge *br)
|
2019-01-21 06:26:27 +00:00
|
|
|
{
|
|
|
|
br_ip4_multicast_leave_snoopers(br);
|
|
|
|
br_ip6_multicast_leave_snoopers(br);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:27 +00:00
|
|
|
static void __br_multicast_open_query(struct net_bridge *br,
|
|
|
|
struct bridge_mcast_own_query *query)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2013-08-30 15:28:17 +00:00
|
|
|
query->startup_sent = 0;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
2018-09-26 14:01:03 +00:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
2010-02-27 19:41:45 +00:00
|
|
|
return;
|
|
|
|
|
2013-08-30 15:28:17 +00:00
|
|
|
mod_timer(&query->timer, jiffies);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:27 +00:00
|
|
|
static void __br_multicast_open(struct net_bridge_mcast *brmctx)
|
2013-08-30 15:28:17 +00:00
|
|
|
{
|
2021-07-19 17:06:27 +00:00
|
|
|
__br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:27 +00:00
|
|
|
__br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:27 +00:00
|
|
|
void br_multicast_open(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
2021-07-19 17:06:28 +00:00
|
|
|
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
|
|
|
|
struct net_bridge_vlan_group *vg;
|
|
|
|
struct net_bridge_vlan *vlan;
|
|
|
|
|
|
|
|
vg = br_vlan_group(br);
|
|
|
|
if (vg) {
|
|
|
|
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
|
|
|
|
struct net_bridge_mcast *brmctx;
|
|
|
|
|
|
|
|
brmctx = &vlan->br_mcast_ctx;
|
|
|
|
if (br_vlan_is_brentry(vlan) &&
|
|
|
|
!br_multicast_ctx_vlan_disabled(brmctx))
|
|
|
|
__br_multicast_open(&vlan->br_mcast_ctx);
|
|
|
|
}
|
2021-07-19 17:06:27 +00:00
|
|
|
}
|
2021-08-10 15:29:29 +00:00
|
|
|
} else {
|
|
|
|
__br_multicast_open(&br->multicast_ctx);
|
2021-07-19 17:06:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:26 +00:00
|
|
|
static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
|
2010-02-27 19:41:45 +00:00
|
|
|
{
|
2021-07-19 17:06:26 +00:00
|
|
|
del_timer_sync(&brmctx->ip4_mc_router_timer);
|
|
|
|
del_timer_sync(&brmctx->ip4_other_query.timer);
|
|
|
|
del_timer_sync(&brmctx->ip4_own_query.timer);
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:26 +00:00
|
|
|
del_timer_sync(&brmctx->ip6_mc_router_timer);
|
|
|
|
del_timer_sync(&brmctx->ip6_other_query.timer);
|
|
|
|
del_timer_sync(&brmctx->ip6_own_query.timer);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2015-07-15 14:16:51 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:27 +00:00
|
|
|
void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
|
|
|
|
{
|
|
|
|
struct net_bridge *br;
|
|
|
|
|
|
|
|
/* it's okay to check for the flag without the multicast lock because it
|
|
|
|
* can only change under RTNL -> multicast_lock, we need the latter to
|
|
|
|
* sync with timers and packets
|
|
|
|
*/
|
|
|
|
if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (br_vlan_is_master(vlan)) {
|
|
|
|
br = vlan->br;
|
|
|
|
|
|
|
|
if (!br_vlan_is_brentry(vlan) ||
|
|
|
|
(on &&
|
|
|
|
br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
|
|
|
|
if (on)
|
|
|
|
__br_multicast_open(&vlan->br_mcast_ctx);
|
|
|
|
else
|
|
|
|
__br_multicast_stop(&vlan->br_mcast_ctx);
|
|
|
|
} else {
|
|
|
|
struct net_bridge_mcast *brmctx;
|
|
|
|
|
|
|
|
brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
|
|
|
|
if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
|
|
|
|
return;
|
|
|
|
|
|
|
|
br = vlan->port->br;
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
|
|
|
|
if (on)
|
|
|
|
__br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
|
|
|
|
else
|
|
|
|
__br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-16 14:57:07 +00:00
|
|
|
static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
|
2021-07-19 17:06:28 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_port *p;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry(p, &vlan->br->port_list, list) {
|
|
|
|
struct net_bridge_vlan *vport;
|
|
|
|
|
|
|
|
vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
|
|
|
|
if (!vport)
|
|
|
|
continue;
|
|
|
|
br_multicast_toggle_one_vlan(vport, on);
|
|
|
|
}
|
2021-08-16 14:57:07 +00:00
|
|
|
|
|
|
|
if (br_vlan_is_brentry(vlan))
|
|
|
|
br_multicast_toggle_one_vlan(vlan, on);
|
2021-07-19 17:06:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
|
|
|
|
struct netlink_ext_ack *extack)
|
2021-07-19 17:06:26 +00:00
|
|
|
{
|
2021-07-19 17:06:27 +00:00
|
|
|
struct net_bridge_vlan_group *vg;
|
|
|
|
struct net_bridge_vlan *vlan;
|
2021-07-19 17:06:28 +00:00
|
|
|
struct net_bridge_port *p;
|
2021-07-19 17:06:27 +00:00
|
|
|
|
2021-07-19 17:06:28 +00:00
|
|
|
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2021-07-19 17:06:27 +00:00
|
|
|
|
|
|
|
vg = br_vlan_group(br);
|
2021-07-19 17:06:28 +00:00
|
|
|
if (!vg)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
|
|
|
|
|
|
|
|
/* disable/enable non-vlan mcast contexts based on vlan snooping */
|
|
|
|
if (on)
|
|
|
|
__br_multicast_stop(&br->multicast_ctx);
|
|
|
|
else
|
|
|
|
__br_multicast_open(&br->multicast_ctx);
|
|
|
|
list_for_each_entry(p, &br->port_list, list) {
|
|
|
|
if (on)
|
|
|
|
br_multicast_disable_port(p);
|
|
|
|
else
|
|
|
|
br_multicast_enable_port(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(vlan, &vg->vlan_list, vlist)
|
|
|
|
br_multicast_toggle_vlan(vlan, on);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:37 +00:00
|
|
|
bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
|
|
|
|
{
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
/* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
|
|
|
|
* requires only RTNL to change
|
|
|
|
*/
|
|
|
|
if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
|
|
|
|
br_multicast_toggle_vlan(vlan, on);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:28 +00:00
|
|
|
void br_multicast_stop(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
|
|
|
|
struct net_bridge_vlan_group *vg;
|
|
|
|
struct net_bridge_vlan *vlan;
|
|
|
|
|
|
|
|
vg = br_vlan_group(br);
|
|
|
|
if (vg) {
|
|
|
|
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
|
|
|
|
struct net_bridge_mcast *brmctx;
|
|
|
|
|
|
|
|
brmctx = &vlan->br_mcast_ctx;
|
|
|
|
if (br_vlan_is_brentry(vlan) &&
|
|
|
|
!br_multicast_ctx_vlan_disabled(brmctx))
|
|
|
|
__br_multicast_stop(&vlan->br_mcast_ctx);
|
|
|
|
}
|
2021-07-19 17:06:27 +00:00
|
|
|
}
|
2021-08-10 15:29:29 +00:00
|
|
|
} else {
|
|
|
|
__br_multicast_stop(&br->multicast_ctx);
|
2021-07-19 17:06:27 +00:00
|
|
|
}
|
2021-07-19 17:06:26 +00:00
|
|
|
}
|
|
|
|
|
2015-07-15 14:16:51 +00:00
|
|
|
void br_multicast_dev_del(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
2020-09-07 09:56:07 +00:00
|
|
|
HLIST_HEAD(deleted_head);
|
2018-12-05 13:14:24 +00:00
|
|
|
struct hlist_node *tmp;
|
2010-02-27 19:41:45 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2020-09-07 09:56:19 +00:00
|
|
|
hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
|
|
|
|
br_multicast_del_mdb_entry(mp);
|
|
|
|
hlist_move_list(&br->mcast_gc_list, &deleted_head);
|
2010-02-27 19:41:45 +00:00
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
2018-12-05 13:14:24 +00:00
|
|
|
|
2021-07-19 17:06:26 +00:00
|
|
|
br_multicast_ctx_deinit(&br->multicast_ctx);
|
2020-09-07 09:56:19 +00:00
|
|
|
br_multicast_gc(&deleted_head);
|
|
|
|
cancel_work_sync(&br->mcast_gc_work);
|
2020-09-07 09:56:07 +00:00
|
|
|
|
2018-12-05 13:14:25 +00:00
|
|
|
rcu_barrier();
|
2010-02-27 19:41:45 +00:00
|
|
|
}
|
2010-02-27 19:41:49 +00:00
|
|
|
|
2021-08-10 15:29:31 +00:00
|
|
|
int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
|
2010-02-27 19:41:49 +00:00
|
|
|
{
|
2015-05-23 01:12:34 +00:00
|
|
|
int err = -EINVAL;
|
2010-02-27 19:41:49 +00:00
|
|
|
|
2021-08-10 15:29:31 +00:00
|
|
|
spin_lock_bh(&brmctx->br->multicast_lock);
|
2010-02-27 19:41:49 +00:00
|
|
|
|
|
|
|
switch (val) {
|
2016-02-26 20:20:01 +00:00
|
|
|
case MDB_RTR_TYPE_DISABLED:
|
|
|
|
case MDB_RTR_TYPE_PERM:
|
2021-08-10 15:29:31 +00:00
|
|
|
br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
|
2021-07-19 17:06:24 +00:00
|
|
|
del_timer(&brmctx->ip4_mc_router_timer);
|
2021-05-13 13:20:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:24 +00:00
|
|
|
del_timer(&brmctx->ip6_mc_router_timer);
|
2021-05-13 13:20:51 +00:00
|
|
|
#endif
|
2021-07-19 17:06:24 +00:00
|
|
|
brmctx->multicast_router = val;
|
2017-10-09 09:15:31 +00:00
|
|
|
err = 0;
|
|
|
|
break;
|
2016-02-26 20:20:01 +00:00
|
|
|
case MDB_RTR_TYPE_TEMP_QUERY:
|
2021-07-19 17:06:24 +00:00
|
|
|
if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
|
2021-08-10 15:29:31 +00:00
|
|
|
br_mc_router_state_change(brmctx->br, false);
|
2021-07-19 17:06:24 +00:00
|
|
|
brmctx->multicast_router = val;
|
2010-02-27 19:41:49 +00:00
|
|
|
err = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-08-10 15:29:31 +00:00
|
|
|
spin_unlock_bh(&brmctx->br->multicast_lock);
|
2010-02-27 19:41:49 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-05-13 13:20:50 +00:00
|
|
|
static void
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
|
2016-02-26 20:20:01 +00:00
|
|
|
{
|
2021-05-13 13:20:50 +00:00
|
|
|
if (!deleted)
|
2016-02-26 20:20:01 +00:00
|
|
|
return;
|
2021-05-13 13:20:50 +00:00
|
|
|
|
2021-05-13 13:20:51 +00:00
|
|
|
/* For backwards compatibility for now, only notify if there is
|
|
|
|
* no multicast router anymore for both IPv4 and IPv6.
|
|
|
|
*/
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!hlist_unhashed(&pmctx->ip4_rlist))
|
2021-05-13 13:20:51 +00:00
|
|
|
return;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
if (!hlist_unhashed(&pmctx->ip6_rlist))
|
2021-05-13 13:20:51 +00:00
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
|
2021-07-19 17:06:33 +00:00
|
|
|
br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
|
2021-07-19 17:06:25 +00:00
|
|
|
br_port_mc_router_state_change(pmctx->port, false);
|
2017-02-09 13:54:41 +00:00
|
|
|
|
|
|
|
/* don't allow timer refresh */
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
|
|
|
|
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
|
2016-02-26 20:20:01 +00:00
|
|
|
}
|
|
|
|
|
2021-08-20 12:42:54 +00:00
|
|
|
int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
|
|
|
|
unsigned long val)
|
2010-02-27 19:41:49 +00:00
|
|
|
{
|
2021-08-20 12:42:54 +00:00
|
|
|
struct net_bridge_mcast *brmctx;
|
2016-02-26 20:20:03 +00:00
|
|
|
unsigned long now = jiffies;
|
2015-05-23 01:12:34 +00:00
|
|
|
int err = -EINVAL;
|
2021-05-13 13:20:50 +00:00
|
|
|
bool del = false;
|
2010-02-27 19:41:49 +00:00
|
|
|
|
2021-08-20 12:42:54 +00:00
|
|
|
brmctx = br_multicast_port_ctx_get_global(pmctx);
|
2021-09-03 09:34:15 +00:00
|
|
|
spin_lock_bh(&brmctx->br->multicast_lock);
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx->multicast_router == val) {
|
2016-02-26 20:20:03 +00:00
|
|
|
/* Refresh the temp router port timer */
|
2021-07-19 17:06:25 +00:00
|
|
|
if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
|
|
|
|
mod_timer(&pmctx->ip4_mc_router_timer,
|
2021-07-19 17:06:24 +00:00
|
|
|
now + brmctx->multicast_querier_interval);
|
2021-05-13 13:20:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
mod_timer(&pmctx->ip6_mc_router_timer,
|
2021-07-19 17:06:24 +00:00
|
|
|
now + brmctx->multicast_querier_interval);
|
2021-05-13 13:20:51 +00:00
|
|
|
#endif
|
|
|
|
}
|
2016-02-26 20:20:02 +00:00
|
|
|
err = 0;
|
|
|
|
goto unlock;
|
|
|
|
}
|
2010-02-27 19:41:49 +00:00
|
|
|
switch (val) {
|
2016-02-26 20:20:01 +00:00
|
|
|
case MDB_RTR_TYPE_DISABLED:
|
2021-07-19 17:06:25 +00:00
|
|
|
pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
|
|
|
|
del |= br_ip4_multicast_rport_del(pmctx);
|
|
|
|
del_timer(&pmctx->ip4_mc_router_timer);
|
|
|
|
del |= br_ip6_multicast_rport_del(pmctx);
|
2021-05-13 13:20:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
del_timer(&pmctx->ip6_mc_router_timer);
|
2021-05-13 13:20:51 +00:00
|
|
|
#endif
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_rport_del_notify(pmctx, del);
|
2016-02-26 20:20:01 +00:00
|
|
|
break;
|
|
|
|
case MDB_RTR_TYPE_TEMP_QUERY:
|
2021-07-19 17:06:25 +00:00
|
|
|
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
|
|
|
|
del |= br_ip4_multicast_rport_del(pmctx);
|
|
|
|
del |= br_ip6_multicast_rport_del(pmctx);
|
|
|
|
br_multicast_rport_del_notify(pmctx, del);
|
2016-02-26 20:20:01 +00:00
|
|
|
break;
|
|
|
|
case MDB_RTR_TYPE_PERM:
|
2021-07-19 17:06:25 +00:00
|
|
|
pmctx->multicast_router = MDB_RTR_TYPE_PERM;
|
|
|
|
del_timer(&pmctx->ip4_mc_router_timer);
|
|
|
|
br_ip4_multicast_add_router(brmctx, pmctx);
|
2021-05-13 13:20:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:25 +00:00
|
|
|
del_timer(&pmctx->ip6_mc_router_timer);
|
2021-05-13 13:20:51 +00:00
|
|
|
#endif
|
2021-07-19 17:06:25 +00:00
|
|
|
br_ip6_multicast_add_router(brmctx, pmctx);
|
2010-02-27 19:41:49 +00:00
|
|
|
break;
|
2016-02-26 20:20:03 +00:00
|
|
|
case MDB_RTR_TYPE_TEMP:
|
2021-07-19 17:06:25 +00:00
|
|
|
pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
|
|
|
|
br_ip4_multicast_mark_router(brmctx, pmctx);
|
|
|
|
br_ip6_multicast_mark_router(brmctx, pmctx);
|
2016-02-26 20:20:03 +00:00
|
|
|
break;
|
2016-02-26 20:20:01 +00:00
|
|
|
default:
|
|
|
|
goto unlock;
|
2010-02-27 19:41:49 +00:00
|
|
|
}
|
2016-02-26 20:20:01 +00:00
|
|
|
err = 0;
|
|
|
|
unlock:
|
2021-09-03 09:34:15 +00:00
|
|
|
spin_unlock_bh(&brmctx->br->multicast_lock);
|
2010-02-27 19:41:49 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2010-02-27 19:41:50 +00:00
|
|
|
|
2021-08-20 12:42:55 +00:00
|
|
|
int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (br_vlan_is_master(v))
|
|
|
|
err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
|
|
|
|
else
|
|
|
|
err = br_multicast_set_port_router(&v->port_mcast_ctx,
|
|
|
|
mcast_router);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
|
2014-06-07 16:26:26 +00:00
|
|
|
struct bridge_mcast_own_query *query)
|
2010-02-27 19:41:50 +00:00
|
|
|
{
|
|
|
|
struct net_bridge_port *port;
|
2012-04-13 02:37:42 +00:00
|
|
|
|
2021-08-10 15:29:29 +00:00
|
|
|
if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
|
|
|
|
return;
|
|
|
|
|
2021-07-19 17:06:27 +00:00
|
|
|
__br_multicast_open_query(brmctx->br, query);
|
2012-04-13 02:37:42 +00:00
|
|
|
|
2019-04-11 12:08:25 +00:00
|
|
|
rcu_read_lock();
|
2021-07-19 17:06:25 +00:00
|
|
|
list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
|
2021-07-19 17:06:32 +00:00
|
|
|
struct bridge_mcast_own_query *ip4_own_query;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
struct bridge_mcast_own_query *ip6_own_query;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
|
2012-04-13 02:37:42 +00:00
|
|
|
continue;
|
|
|
|
|
2021-07-19 17:06:32 +00:00
|
|
|
if (br_multicast_ctx_is_vlan(brmctx)) {
|
|
|
|
struct net_bridge_vlan *vlan;
|
|
|
|
|
2021-08-16 14:57:06 +00:00
|
|
|
vlan = br_vlan_find(nbp_vlan_group_rcu(port),
|
|
|
|
brmctx->vlan->vid);
|
2021-07-19 17:06:32 +00:00
|
|
|
if (!vlan ||
|
|
|
|
br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
ip4_own_query = &port->multicast_ctx.ip4_own_query;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
ip6_own_query = &port->multicast_ctx.ip6_own_query;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
if (query == &brmctx->ip4_own_query)
|
2021-07-19 17:06:32 +00:00
|
|
|
br_multicast_enable(ip4_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
2021-07-19 17:06:32 +00:00
|
|
|
br_multicast_enable(ip6_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2012-04-13 02:37:42 +00:00
|
|
|
}
|
2019-04-11 12:08:25 +00:00
|
|
|
rcu_read_unlock();
|
2012-04-13 02:37:42 +00:00
|
|
|
}
|
|
|
|
|
2021-04-14 19:22:57 +00:00
|
|
|
int br_multicast_toggle(struct net_bridge *br, unsigned long val,
|
|
|
|
struct netlink_ext_ack *extack)
|
2012-04-13 02:37:42 +00:00
|
|
|
{
|
2016-10-18 16:09:48 +00:00
|
|
|
struct net_bridge_port *port;
|
2020-12-04 23:56:28 +00:00
|
|
|
bool change_snoopers = false;
|
2021-04-14 19:22:57 +00:00
|
|
|
int err = 0;
|
2010-02-27 19:41:50 +00:00
|
|
|
|
2011-11-10 05:48:03 +00:00
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2018-09-26 14:01:03 +00:00
|
|
|
if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
|
2010-02-27 19:41:50 +00:00
|
|
|
goto unlock;
|
|
|
|
|
2021-04-14 19:22:57 +00:00
|
|
|
err = br_mc_disabled_update(br->dev, val, extack);
|
|
|
|
if (err == -EOPNOTSUPP)
|
|
|
|
err = 0;
|
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
|
2018-09-26 14:01:03 +00:00
|
|
|
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
|
2019-01-21 06:26:27 +00:00
|
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
|
2020-12-04 23:56:28 +00:00
|
|
|
change_snoopers = true;
|
2010-02-27 19:41:50 +00:00
|
|
|
goto unlock;
|
2019-01-21 06:26:27 +00:00
|
|
|
}
|
2010-02-27 19:41:50 +00:00
|
|
|
|
2010-07-29 00:45:30 +00:00
|
|
|
if (!netif_running(br->dev))
|
|
|
|
goto unlock;
|
|
|
|
|
2016-10-18 16:09:48 +00:00
|
|
|
br_multicast_open(br);
|
|
|
|
list_for_each_entry(port, &br->port_list, list)
|
2021-07-19 17:06:25 +00:00
|
|
|
__br_multicast_enable_port_ctx(&port->multicast_ctx);
|
2010-02-27 19:41:50 +00:00
|
|
|
|
2020-12-04 23:56:28 +00:00
|
|
|
change_snoopers = true;
|
|
|
|
|
2010-02-27 19:41:50 +00:00
|
|
|
unlock:
|
2011-11-10 05:48:03 +00:00
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
2010-02-27 19:41:50 +00:00
|
|
|
|
2020-12-04 23:56:28 +00:00
|
|
|
/* br_multicast_join_snoopers has the potential to cause
|
|
|
|
* an MLD Report/Leave to be delivered to br_multicast_rcv,
|
|
|
|
* which would in turn call br_multicast_add_group, which would
|
|
|
|
* attempt to acquire multicast_lock. This function should be
|
|
|
|
* called after the lock has been released to avoid deadlocks on
|
|
|
|
* multicast_lock.
|
|
|
|
*
|
|
|
|
* br_multicast_leave_snoopers does not have the problem since
|
|
|
|
* br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
|
|
|
|
* returns without calling br_multicast_ipv4/6_rcv if it's not
|
|
|
|
* enabled. Moved both functions out just for symmetry.
|
|
|
|
*/
|
|
|
|
if (change_snoopers) {
|
|
|
|
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
|
|
|
br_multicast_join_snoopers(br);
|
|
|
|
else
|
|
|
|
br_multicast_leave_snoopers(br);
|
|
|
|
}
|
|
|
|
|
2021-04-14 19:22:57 +00:00
|
|
|
return err;
|
2010-02-27 19:41:50 +00:00
|
|
|
}
|
2010-02-27 19:41:51 +00:00
|
|
|
|
2017-05-26 06:37:24 +00:00
|
|
|
bool br_multicast_enabled(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = netdev_priv(dev);
|
|
|
|
|
2018-09-26 14:01:03 +00:00
|
|
|
return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
|
2017-05-26 06:37:24 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_enabled);
|
|
|
|
|
2017-10-09 09:15:32 +00:00
|
|
|
bool br_multicast_router(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = netdev_priv(dev);
|
|
|
|
bool is_router;
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2021-07-19 17:06:25 +00:00
|
|
|
is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
|
2017-10-09 09:15:32 +00:00
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
return is_router;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_router);
|
|
|
|
|
2021-08-10 15:29:30 +00:00
|
|
|
int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
|
2012-04-13 02:37:42 +00:00
|
|
|
{
|
2013-07-31 23:06:20 +00:00
|
|
|
unsigned long max_delay;
|
|
|
|
|
2012-04-13 02:37:42 +00:00
|
|
|
val = !!val;
|
|
|
|
|
2021-08-10 15:29:30 +00:00
|
|
|
spin_lock_bh(&brmctx->br->multicast_lock);
|
2021-08-10 15:29:28 +00:00
|
|
|
if (brmctx->multicast_querier == val)
|
2012-04-13 02:37:42 +00:00
|
|
|
goto unlock;
|
|
|
|
|
2021-08-10 15:29:28 +00:00
|
|
|
WRITE_ONCE(brmctx->multicast_querier, val);
|
2013-07-31 23:06:20 +00:00
|
|
|
if (!val)
|
|
|
|
goto unlock;
|
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
max_delay = brmctx->multicast_query_response_interval;
|
2013-07-31 23:06:20 +00:00
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
if (!timer_pending(&brmctx->ip4_other_query.timer))
|
|
|
|
brmctx->ip4_other_query.delay_time = jiffies + max_delay;
|
2013-08-30 15:28:17 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-07-19 17:06:24 +00:00
|
|
|
if (!timer_pending(&brmctx->ip6_other_query.timer))
|
|
|
|
brmctx->ip6_other_query.delay_time = jiffies + max_delay;
|
2013-08-30 15:28:17 +00:00
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
|
2013-08-30 15:28:17 +00:00
|
|
|
#endif
|
2012-04-13 02:37:42 +00:00
|
|
|
|
|
|
|
unlock:
|
2021-08-10 15:29:30 +00:00
|
|
|
spin_unlock_bh(&brmctx->br->multicast_lock);
|
2012-04-13 02:37:42 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-08-10 15:29:19 +00:00
|
|
|
int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
|
|
|
|
unsigned long val)
|
2016-11-21 12:03:24 +00:00
|
|
|
{
|
|
|
|
/* Currently we support only version 2 and 3 */
|
|
|
|
switch (val) {
|
|
|
|
case 2:
|
|
|
|
case 3:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-08-10 15:29:19 +00:00
|
|
|
spin_lock_bh(&brmctx->br->multicast_lock);
|
|
|
|
brmctx->multicast_igmp_version = val;
|
|
|
|
spin_unlock_bh(&brmctx->br->multicast_lock);
|
2016-11-21 12:03:24 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-21 12:03:25 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2021-08-10 15:29:19 +00:00
|
|
|
int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
|
|
|
|
unsigned long val)
|
2016-11-21 12:03:25 +00:00
|
|
|
{
|
|
|
|
/* Currently we support version 1 and 2 */
|
|
|
|
switch (val) {
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-08-10 15:29:19 +00:00
|
|
|
spin_lock_bh(&brmctx->br->multicast_lock);
|
|
|
|
brmctx->multicast_mld_version = val;
|
|
|
|
spin_unlock_bh(&brmctx->br->multicast_lock);
|
2016-11-21 12:03:25 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-12-27 17:21:15 +00:00
|
|
|
void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
|
|
|
|
|
|
|
|
if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
|
|
|
|
br_info(brmctx->br,
|
|
|
|
"trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
|
|
|
|
jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
|
|
|
|
jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
|
|
|
|
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
brmctx->multicast_query_interval = intvl_jiffies;
|
|
|
|
}
|
|
|
|
|
2021-12-27 17:21:16 +00:00
|
|
|
void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
|
|
|
|
|
|
|
|
if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
|
|
|
|
br_info(brmctx->br,
|
|
|
|
"trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
|
|
|
|
jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
|
|
|
|
jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
|
|
|
|
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
brmctx->multicast_startup_query_interval = intvl_jiffies;
|
|
|
|
}
|
|
|
|
|
2014-06-07 16:26:28 +00:00
|
|
|
/**
|
|
|
|
* br_multicast_list_adjacent - Returns snooped multicast addresses
|
|
|
|
* @dev: The bridge port adjacent to which to retrieve addresses
|
|
|
|
* @br_ip_list: The list to store found, snooped multicast IP addresses in
|
|
|
|
*
|
|
|
|
* Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
|
|
|
|
* snooping feature on all bridge ports of dev's bridge device, excluding
|
|
|
|
* the addresses from dev itself.
|
|
|
|
*
|
|
|
|
* Returns the number of items added to br_ip_list.
|
|
|
|
*
|
|
|
|
* Notes:
|
|
|
|
* - br_ip_list needs to be initialized by caller
|
|
|
|
* - br_ip_list might contain duplicates in the end
|
|
|
|
* (needs to be taken care of by caller)
|
|
|
|
* - br_ip_list needs to be freed by caller
|
|
|
|
*/
|
|
|
|
int br_multicast_list_adjacent(struct net_device *dev,
|
|
|
|
struct list_head *br_ip_list)
|
|
|
|
{
|
|
|
|
struct net_bridge *br;
|
|
|
|
struct net_bridge_port *port;
|
|
|
|
struct net_bridge_port_group *group;
|
|
|
|
struct br_ip_list *entry;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2019-03-29 13:38:19 +00:00
|
|
|
if (!br_ip_list || !netif_is_bridge_port(dev))
|
2014-06-07 16:26:28 +00:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
port = br_port_get_rcu(dev);
|
|
|
|
if (!port || !port->br)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
br = port->br;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(port, &br->port_list, list) {
|
|
|
|
if (!port->dev || port->dev == dev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
|
|
|
|
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
|
|
|
if (!entry)
|
|
|
|
goto unlock;
|
|
|
|
|
2020-09-22 07:30:22 +00:00
|
|
|
entry->addr = group->key.addr;
|
2014-06-07 16:26:28 +00:00
|
|
|
list_add(&entry->list, br_ip_list);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
|
2014-06-07 16:26:29 +00:00
|
|
|
|
2014-07-07 03:41:17 +00:00
|
|
|
/**
|
|
|
|
* br_multicast_has_querier_anywhere - Checks for a querier on a bridge
|
|
|
|
* @dev: The bridge port providing the bridge on which to check for a querier
|
|
|
|
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
|
|
|
|
*
|
|
|
|
* Checks whether the given interface has a bridge on top and if so returns
|
|
|
|
* true if a valid querier exists anywhere on the bridged link layer.
|
|
|
|
* Otherwise returns false.
|
|
|
|
*/
|
|
|
|
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
|
|
|
|
{
|
|
|
|
struct net_bridge *br;
|
|
|
|
struct net_bridge_port *port;
|
|
|
|
struct ethhdr eth;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2019-03-29 13:38:19 +00:00
|
|
|
if (!netif_is_bridge_port(dev))
|
2014-07-07 03:41:17 +00:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
port = br_port_get_rcu(dev);
|
|
|
|
if (!port || !port->br)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
br = port->br;
|
|
|
|
|
|
|
|
memset(ð, 0, sizeof(eth));
|
|
|
|
eth.h_proto = htons(proto);
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL);
|
2014-07-07 03:41:17 +00:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
|
|
|
|
|
2014-06-07 16:26:29 +00:00
|
|
|
/**
|
|
|
|
* br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
|
|
|
|
* @dev: The bridge port adjacent to which to check for a querier
|
|
|
|
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
|
|
|
|
*
|
|
|
|
* Checks whether the given interface has a bridge on top and if so returns
|
|
|
|
* true if a selected querier is behind one of the other ports of this
|
|
|
|
* bridge. Otherwise returns false.
|
|
|
|
*/
|
|
|
|
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
|
|
|
|
{
|
2021-07-19 17:06:24 +00:00
|
|
|
struct net_bridge_mcast *brmctx;
|
2014-06-07 16:26:29 +00:00
|
|
|
struct net_bridge *br;
|
|
|
|
struct net_bridge_port *port;
|
|
|
|
bool ret = false;
|
2021-08-13 14:59:57 +00:00
|
|
|
int port_ifidx;
|
2014-06-07 16:26:29 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2019-03-29 13:38:19 +00:00
|
|
|
if (!netif_is_bridge_port(dev))
|
2014-06-07 16:26:29 +00:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
port = br_port_get_rcu(dev);
|
|
|
|
if (!port || !port->br)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
br = port->br;
|
2021-07-19 17:06:24 +00:00
|
|
|
brmctx = &br->multicast_ctx;
|
2014-06-07 16:26:29 +00:00
|
|
|
|
|
|
|
switch (proto) {
|
|
|
|
case ETH_P_IP:
|
2021-08-13 14:59:57 +00:00
|
|
|
port_ifidx = brmctx->ip4_querier.port_ifidx;
|
2021-07-19 17:06:24 +00:00
|
|
|
if (!timer_pending(&brmctx->ip4_other_query.timer) ||
|
2021-08-13 14:59:57 +00:00
|
|
|
port_ifidx == port->dev->ifindex)
|
2014-06-07 16:26:29 +00:00
|
|
|
goto unlock;
|
|
|
|
break;
|
2014-06-11 23:41:24 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2014-06-07 16:26:29 +00:00
|
|
|
case ETH_P_IPV6:
|
2021-08-13 14:59:57 +00:00
|
|
|
port_ifidx = brmctx->ip6_querier.port_ifidx;
|
2021-07-19 17:06:24 +00:00
|
|
|
if (!timer_pending(&brmctx->ip6_other_query.timer) ||
|
2021-08-13 14:59:57 +00:00
|
|
|
port_ifidx == port->dev->ifindex)
|
2014-06-07 16:26:29 +00:00
|
|
|
goto unlock;
|
|
|
|
break;
|
2014-06-11 23:41:24 +00:00
|
|
|
#endif
|
2014-06-07 16:26:29 +00:00
|
|
|
default:
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = true;
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
|
2016-06-28 14:57:06 +00:00
|
|
|
|
2021-05-13 13:20:53 +00:00
|
|
|
/**
|
|
|
|
* br_multicast_has_router_adjacent - Checks for a router behind a bridge port
|
|
|
|
* @dev: The bridge port adjacent to which to check for a multicast router
|
|
|
|
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
|
|
|
|
*
|
|
|
|
* Checks whether the given interface has a bridge on top and if so returns
|
|
|
|
* true if a multicast router is behind one of the other ports of this
|
|
|
|
* bridge. Otherwise returns false.
|
|
|
|
*/
|
|
|
|
bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
|
|
|
|
{
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge_mcast_port *pmctx;
|
2021-07-19 17:06:24 +00:00
|
|
|
struct net_bridge_mcast *brmctx;
|
2021-07-19 17:06:23 +00:00
|
|
|
struct net_bridge_port *port;
|
2021-05-13 13:20:53 +00:00
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
port = br_port_get_check_rcu(dev);
|
|
|
|
if (!port)
|
|
|
|
goto unlock;
|
|
|
|
|
2021-07-19 17:06:24 +00:00
|
|
|
brmctx = &port->br->multicast_ctx;
|
2021-05-13 13:20:53 +00:00
|
|
|
switch (proto) {
|
|
|
|
case ETH_P_IP:
|
2021-07-19 17:06:24 +00:00
|
|
|
hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
|
2021-05-13 13:20:53 +00:00
|
|
|
ip4_rlist) {
|
2021-07-19 17:06:23 +00:00
|
|
|
if (pmctx->port == port)
|
2021-05-13 13:20:53 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = true;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
case ETH_P_IPV6:
|
2021-07-19 17:06:24 +00:00
|
|
|
hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
|
2021-05-13 13:20:53 +00:00
|
|
|
ip6_rlist) {
|
2021-07-19 17:06:23 +00:00
|
|
|
if (pmctx->port == port)
|
2021-05-13 13:20:53 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = true;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
/* when compiled without IPv6 support, be conservative and
|
|
|
|
* always assume presence of an IPv6 multicast router
|
|
|
|
*/
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
|
|
|
|
|
2016-06-28 14:57:06 +00:00
|
|
|
static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
|
2016-07-06 19:12:21 +00:00
|
|
|
const struct sk_buff *skb, u8 type, u8 dir)
|
2016-06-28 14:57:06 +00:00
|
|
|
{
|
|
|
|
struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
|
2016-07-06 19:12:21 +00:00
|
|
|
__be16 proto = skb->protocol;
|
|
|
|
unsigned int t_len;
|
2016-06-28 14:57:06 +00:00
|
|
|
|
|
|
|
u64_stats_update_begin(&pstats->syncp);
|
|
|
|
switch (proto) {
|
|
|
|
case htons(ETH_P_IP):
|
2016-07-06 19:12:21 +00:00
|
|
|
t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
|
2016-06-28 14:57:06 +00:00
|
|
|
switch (type) {
|
|
|
|
case IGMP_HOST_MEMBERSHIP_REPORT:
|
|
|
|
pstats->mstats.igmp_v1reports[dir]++;
|
|
|
|
break;
|
|
|
|
case IGMPV2_HOST_MEMBERSHIP_REPORT:
|
|
|
|
pstats->mstats.igmp_v2reports[dir]++;
|
|
|
|
break;
|
|
|
|
case IGMPV3_HOST_MEMBERSHIP_REPORT:
|
|
|
|
pstats->mstats.igmp_v3reports[dir]++;
|
|
|
|
break;
|
|
|
|
case IGMP_HOST_MEMBERSHIP_QUERY:
|
2016-07-06 19:12:21 +00:00
|
|
|
if (t_len != sizeof(struct igmphdr)) {
|
|
|
|
pstats->mstats.igmp_v3queries[dir]++;
|
|
|
|
} else {
|
|
|
|
unsigned int offset = skb_transport_offset(skb);
|
|
|
|
struct igmphdr *ih, _ihdr;
|
|
|
|
|
|
|
|
ih = skb_header_pointer(skb, offset,
|
|
|
|
sizeof(_ihdr), &_ihdr);
|
|
|
|
if (!ih)
|
|
|
|
break;
|
|
|
|
if (!ih->code)
|
|
|
|
pstats->mstats.igmp_v1queries[dir]++;
|
|
|
|
else
|
|
|
|
pstats->mstats.igmp_v2queries[dir]++;
|
|
|
|
}
|
2016-06-28 14:57:06 +00:00
|
|
|
break;
|
|
|
|
case IGMP_HOST_LEAVE_MESSAGE:
|
|
|
|
pstats->mstats.igmp_leaves[dir]++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
case htons(ETH_P_IPV6):
|
2016-07-06 19:12:21 +00:00
|
|
|
t_len = ntohs(ipv6_hdr(skb)->payload_len) +
|
|
|
|
sizeof(struct ipv6hdr);
|
|
|
|
t_len -= skb_network_header_len(skb);
|
2016-06-28 14:57:06 +00:00
|
|
|
switch (type) {
|
|
|
|
case ICMPV6_MGM_REPORT:
|
|
|
|
pstats->mstats.mld_v1reports[dir]++;
|
|
|
|
break;
|
|
|
|
case ICMPV6_MLD2_REPORT:
|
|
|
|
pstats->mstats.mld_v2reports[dir]++;
|
|
|
|
break;
|
|
|
|
case ICMPV6_MGM_QUERY:
|
2016-07-06 19:12:21 +00:00
|
|
|
if (t_len != sizeof(struct mld_msg))
|
|
|
|
pstats->mstats.mld_v2queries[dir]++;
|
|
|
|
else
|
|
|
|
pstats->mstats.mld_v1queries[dir]++;
|
2016-06-28 14:57:06 +00:00
|
|
|
break;
|
|
|
|
case ICMPV6_MGM_REDUCTION:
|
|
|
|
pstats->mstats.mld_leaves[dir]++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_IPV6 */
|
|
|
|
}
|
|
|
|
u64_stats_update_end(&pstats->syncp);
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:06:25 +00:00
|
|
|
void br_multicast_count(struct net_bridge *br,
|
|
|
|
const struct net_bridge_port *p,
|
2016-07-06 19:12:21 +00:00
|
|
|
const struct sk_buff *skb, u8 type, u8 dir)
|
2016-06-28 14:57:06 +00:00
|
|
|
{
|
|
|
|
struct bridge_mcast_stats __percpu *stats;
|
|
|
|
|
|
|
|
/* if multicast_disabled is true then igmp type can't be set */
|
2018-09-26 14:01:04 +00:00
|
|
|
if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
|
2016-06-28 14:57:06 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (p)
|
|
|
|
stats = p->mcast_stats;
|
|
|
|
else
|
|
|
|
stats = br->mcast_stats;
|
|
|
|
if (WARN_ON(!stats))
|
|
|
|
return;
|
|
|
|
|
2016-07-06 19:12:21 +00:00
|
|
|
br_mcast_stats_add(stats, skb, type, dir);
|
2016-06-28 14:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int br_multicast_init_stats(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
|
|
|
|
if (!br->mcast_stats)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-10 11:59:27 +00:00
|
|
|
void br_multicast_uninit_stats(struct net_bridge *br)
|
|
|
|
{
|
|
|
|
free_percpu(br->mcast_stats);
|
|
|
|
}
|
|
|
|
|
2020-05-27 13:51:13 +00:00
|
|
|
/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
|
|
|
|
static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
|
2016-06-28 14:57:06 +00:00
|
|
|
{
|
|
|
|
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
|
|
|
|
dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_multicast_get_stats(const struct net_bridge *br,
|
|
|
|
const struct net_bridge_port *p,
|
|
|
|
struct br_mcast_stats *dest)
|
|
|
|
{
|
|
|
|
struct bridge_mcast_stats __percpu *stats;
|
|
|
|
struct br_mcast_stats tdst;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(dest, 0, sizeof(*dest));
|
|
|
|
if (p)
|
|
|
|
stats = p->mcast_stats;
|
|
|
|
else
|
|
|
|
stats = br->mcast_stats;
|
|
|
|
if (WARN_ON(!stats))
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(&tdst, 0, sizeof(tdst));
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
|
|
|
|
struct br_mcast_stats temp;
|
|
|
|
unsigned int start;
|
|
|
|
|
|
|
|
do {
|
2022-10-26 13:22:15 +00:00
|
|
|
start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
2016-06-28 14:57:06 +00:00
|
|
|
memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
|
2022-10-26 13:22:15 +00:00
|
|
|
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
2016-06-28 14:57:06 +00:00
|
|
|
|
2016-07-06 19:12:21 +00:00
|
|
|
mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
|
2016-06-28 14:57:06 +00:00
|
|
|
mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
|
|
|
|
mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
|
|
|
|
tdst.igmp_parse_errors += temp.igmp_parse_errors;
|
|
|
|
|
2016-07-06 19:12:21 +00:00
|
|
|
mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
|
|
|
|
mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
|
2016-06-28 14:57:06 +00:00
|
|
|
mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
|
|
|
|
mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
|
|
|
|
mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
|
|
|
|
tdst.mld_parse_errors += temp.mld_parse_errors;
|
|
|
|
}
|
|
|
|
memcpy(dest, &tdst, sizeof(*dest));
|
|
|
|
}
|
2018-12-05 13:14:24 +00:00
|
|
|
|
|
|
|
int br_mdb_hash_init(struct net_bridge *br)
|
|
|
|
{
|
2020-09-22 07:30:22 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
|
|
|
|
if (err) {
|
|
|
|
rhashtable_destroy(&br->sg_port_tbl);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-12-05 13:14:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void br_mdb_hash_fini(struct net_bridge *br)
|
|
|
|
{
|
2020-09-22 07:30:22 +00:00
|
|
|
rhashtable_destroy(&br->sg_port_tbl);
|
2018-12-05 13:14:24 +00:00
|
|
|
rhashtable_destroy(&br->mdb_hash_tbl);
|
|
|
|
}
|