linux/net/bridge/br_multicast.c

2303 lines
51 KiB
C
Raw Normal View History

/*
* Bridge multicast support.
*
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/if_ether.h>
#include <linux/igmp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/netdevice.h>
#include <linux/netfilter_bridge.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/inetdevice.h>
#include <net/ip.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/mld.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#endif
#include "br_private.h"
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query);
unsigned int br_mdb_rehash_seq;
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
{
if (a->proto != b->proto)
return 0;
if (a->vid != b->vid)
return 0;
switch (a->proto) {
case htons(ETH_P_IP):
return a->u.ip4 == b->u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
#endif
}
return 0;
}
static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
__u16 vid)
{
return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
}
#if IS_ENABLED(CONFIG_IPV6)
static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
const struct in6_addr *ip,
__u16 vid)
{
return jhash_2words(ipv6_addr_hash(ip), vid,
mdb->secret) & (mdb->max - 1);
}
#endif
static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
struct br_ip *ip)
{
switch (ip->proto) {
case htons(ETH_P_IP):
return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
#endif
}
return 0;
}
static struct net_bridge_mdb_entry *__br_mdb_ip_get(
struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
{
struct net_bridge_mdb_entry *mp;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
if (br_ip_equal(&mp->addr, dst))
return mp;
}
return NULL;
}
struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
struct br_ip *dst)
{
if (!mdb)
return NULL;
return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
}
static struct net_bridge_mdb_entry *br_mdb_ip4_get(
struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
{
struct br_ip br_dst;
br_dst.u.ip4 = dst;
br_dst.proto = htons(ETH_P_IP);
br_dst.vid = vid;
return br_mdb_ip_get(mdb, &br_dst);
}
#if IS_ENABLED(CONFIG_IPV6)
static struct net_bridge_mdb_entry *br_mdb_ip6_get(
struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
__u16 vid)
{
struct br_ip br_dst;
br_dst.u.ip6 = *dst;
br_dst.proto = htons(ETH_P_IPV6);
br_dst.vid = vid;
return br_mdb_ip_get(mdb, &br_dst);
}
#endif
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb, u16 vid)
{
struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
struct br_ip ip;
if (br->multicast_disabled)
return NULL;
if (BR_INPUT_SKB_CB(skb)->igmp)
return NULL;
ip.proto = skb->protocol;
ip.vid = vid;
switch (skb->protocol) {
case htons(ETH_P_IP):
ip.u.ip4 = ip_hdr(skb)->daddr;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
ip.u.ip6 = ipv6_hdr(skb)->daddr;
break;
#endif
default:
return NULL;
}
return br_mdb_ip_get(mdb, &ip);
}
static void br_mdb_free(struct rcu_head *head)
{
struct net_bridge_mdb_htable *mdb =
container_of(head, struct net_bridge_mdb_htable, rcu);
struct net_bridge_mdb_htable *old = mdb->old;
mdb->old = NULL;
kfree(old->mhash);
kfree(old);
}
static int br_mdb_copy(struct net_bridge_mdb_htable *new,
struct net_bridge_mdb_htable *old,
int elasticity)
{
struct net_bridge_mdb_entry *mp;
int maxlen;
int len;
int i;
for (i = 0; i < old->max; i++)
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
hlist_add_head(&mp->hlist[new->ver],
&new->mhash[br_ip_hash(new, &mp->addr)]);
if (!elasticity)
return 0;
maxlen = 0;
for (i = 0; i < new->max; i++) {
len = 0;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
len++;
if (len > maxlen)
maxlen = len;
}
return maxlen > elasticity ? -EINVAL : 0;
}
void br_multicast_free_pg(struct rcu_head *head)
{
struct net_bridge_port_group *p =
container_of(head, struct net_bridge_port_group, rcu);
kfree(p);
}
static void br_multicast_free_group(struct rcu_head *head)
{
struct net_bridge_mdb_entry *mp =
container_of(head, struct net_bridge_mdb_entry, rcu);
kfree(mp);
}
static void br_multicast_group_expired(unsigned long data)
{
struct net_bridge_mdb_entry *mp = (void *)data;
struct net_bridge *br = mp->br;
struct net_bridge_mdb_htable *mdb;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || timer_pending(&mp->timer))
goto out;
mp->mglist = false;
if (mp->ports)
goto out;
mdb = mlock_dereference(br->mdb, br);
hlist_del_rcu(&mp->hlist[mdb->ver]);
mdb->size--;
call_rcu_bh(&mp->rcu, br_multicast_free_group);
out:
spin_unlock(&br->multicast_lock);
}
static void br_multicast_del_pg(struct net_bridge *br,
struct net_bridge_port_group *pg)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, &pg->addr);
if (WARN_ON(!mp))
return;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p != pg)
continue;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
Revert "bridge: only expire the mdb entry when query is received" While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang <amwang@redhat.com> Signed-off-by: Linus Lüssing <linus.luessing@web.de> Reviewed-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-19 22:58:57 +00:00
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
return;
}
WARN_ON(1);
}
static void br_multicast_port_group_expired(unsigned long data)
{
struct net_bridge_port_group *pg = (void *)data;
struct net_bridge *br = pg->port->br;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
goto out;
br_multicast_del_pg(br, pg);
out:
spin_unlock(&br->multicast_lock);
}
static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
int elasticity)
{
struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
struct net_bridge_mdb_htable *mdb;
int err;
mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
if (!mdb)
return -ENOMEM;
mdb->max = max;
mdb->old = old;
mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
if (!mdb->mhash) {
kfree(mdb);
return -ENOMEM;
}
mdb->size = old ? old->size : 0;
mdb->ver = old ? old->ver ^ 1 : 0;
if (!old || elasticity)
get_random_bytes(&mdb->secret, sizeof(mdb->secret));
else
mdb->secret = old->secret;
if (!old)
goto out;
err = br_mdb_copy(mdb, old, elasticity);
if (err) {
kfree(mdb->mhash);
kfree(mdb);
return err;
}
br_mdb_rehash_seq++;
call_rcu_bh(&mdb->rcu, br_mdb_free);
out:
rcu_assign_pointer(*mdbp, mdb);
return 0;
}
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
__be32 group)
{
struct sk_buff *skb;
struct igmphdr *ih;
struct ethhdr *eth;
struct iphdr *iph;
skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
sizeof(*ih) + 4);
if (!skb)
goto out;
skb->protocol = htons(ETH_P_IP);
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
ether_addr_copy(eth->h_source, br->dev->dev_addr);
eth->h_dest[0] = 1;
eth->h_dest[1] = 0;
eth->h_dest[2] = 0x5e;
eth->h_dest[3] = 0;
eth->h_dest[4] = 0;
eth->h_dest[5] = 1;
eth->h_proto = htons(ETH_P_IP);
skb_put(skb, sizeof(*eth));
skb_set_network_header(skb, skb->len);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 6;
iph->tos = 0xc0;
iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
iph->id = 0;
iph->frag_off = htons(IP_DF);
iph->ttl = 1;
iph->protocol = IPPROTO_IGMP;
iph->saddr = br->multicast_query_use_ifaddr ?
inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
((u8 *)&iph[1])[0] = IPOPT_RA;
((u8 *)&iph[1])[1] = 4;
((u8 *)&iph[1])[2] = 0;
((u8 *)&iph[1])[3] = 0;
ip_send_check(iph);
skb_put(skb, 24);
skb_set_transport_header(skb, skb->len);
ih = igmp_hdr(skb);
ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
ih->code = (group ? br->multicast_last_member_interval :
br->multicast_query_response_interval) /
(HZ / IGMP_TIMER_SCALE);
ih->group = group;
ih->csum = 0;
ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
skb_put(skb, sizeof(*ih));
__skb_pull(skb, sizeof(*eth));
out:
return skb;
}
#if IS_ENABLED(CONFIG_IPV6)
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
const struct in6_addr *group)
{
struct sk_buff *skb;
struct ipv6hdr *ip6h;
struct mld_msg *mldq;
struct ethhdr *eth;
u8 *hopopt;
unsigned long interval;
skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
8 + sizeof(*mldq));
if (!skb)
goto out;
skb->protocol = htons(ETH_P_IPV6);
/* Ethernet header */
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
ether_addr_copy(eth->h_source, br->dev->dev_addr);
eth->h_proto = htons(ETH_P_IPV6);
skb_put(skb, sizeof(*eth));
/* IPv6 header + HbH option */
skb_set_network_header(skb, skb->len);
ip6h = ipv6_hdr(skb);
*(__force __be32 *)ip6h = htonl(0x60000000);
ip6h->payload_len = htons(8 + sizeof(*mldq));
ip6h->nexthdr = IPPROTO_HOPOPTS;
ip6h->hop_limit = 1;
ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
&ip6h->saddr)) {
kfree_skb(skb);
return NULL;
}
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
hopopt[1] = 0; /* length of HbH */
hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
hopopt[3] = 2; /* Length of RA Option */
hopopt[4] = 0; /* Type = 0x0000 (MLD) */
hopopt[5] = 0;
hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
skb_put(skb, sizeof(*ip6h) + 8);
/* ICMPv6 */
skb_set_transport_header(skb, skb->len);
mldq = (struct mld_msg *) icmp6_hdr(skb);
interval = ipv6_addr_any(group) ?
br->multicast_query_response_interval :
br->multicast_last_member_interval;
mldq->mld_type = ICMPV6_MGM_QUERY;
mldq->mld_code = 0;
mldq->mld_cksum = 0;
mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
mldq->mld_reserved = 0;
mldq->mld_mca = *group;
/* checksum */
mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
sizeof(*mldq), IPPROTO_ICMPV6,
csum_partial(mldq,
sizeof(*mldq), 0));
skb_put(skb, sizeof(*mldq));
__skb_pull(skb, sizeof(*eth));
out:
return skb;
}
#endif
static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
struct br_ip *addr)
{
switch (addr->proto) {
case htons(ETH_P_IP):
return br_ip4_multicast_alloc_query(br, addr->u.ip4);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
#endif
}
return NULL;
}
static struct net_bridge_mdb_entry *br_multicast_get_group(
struct net_bridge *br, struct net_bridge_port *port,
struct br_ip *group, int hash)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
unsigned int count = 0;
unsigned int max;
int elasticity;
int err;
mdb = rcu_dereference_protected(br->mdb, 1);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
count++;
if (unlikely(br_ip_equal(group, &mp->addr)))
return mp;
}
elasticity = 0;
max = mdb->max;
if (unlikely(count > br->hash_elasticity && count)) {
if (net_ratelimit())
br_info(br, "Multicast hash table "
"chain limit reached: %s\n",
port ? port->dev->name : br->dev->name);
elasticity = br->hash_elasticity;
}
if (mdb->size >= max) {
max *= 2;
if (unlikely(max > br->hash_max)) {
br_warn(br, "Multicast hash table maximum of %d "
"reached, disabling snooping: %s\n",
br->hash_max,
port ? port->dev->name : br->dev->name);
err = -E2BIG;
disable:
br->multicast_disabled = 1;
goto err;
}
}
if (max > mdb->max || elasticity) {
if (mdb->old) {
if (net_ratelimit())
br_info(br, "Multicast hash table "
"on fire: %s\n",
port ? port->dev->name : br->dev->name);
err = -EEXIST;
goto err;
}
err = br_mdb_rehash(&br->mdb, max, elasticity);
if (err) {
br_warn(br, "Cannot rehash multicast "
"hash table, disabling snooping: %s, %d, %d\n",
port ? port->dev->name : br->dev->name,
mdb->size, err);
goto disable;
}
err = -EAGAIN;
goto err;
}
return NULL;
err:
mp = ERR_PTR(err);
return mp;
}
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
struct net_bridge_port *port, struct br_ip *group)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
int hash;
int err;
mdb = rcu_dereference_protected(br->mdb, 1);
if (!mdb) {
err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
if (err)
return ERR_PTR(err);
goto rehash;
}
hash = br_ip_hash(mdb, group);
mp = br_multicast_get_group(br, port, group, hash);
switch (PTR_ERR(mp)) {
case 0:
break;
case -EAGAIN:
rehash:
mdb = rcu_dereference_protected(br->mdb, 1);
hash = br_ip_hash(mdb, group);
break;
default:
goto out;
}
mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
if (unlikely(!mp))
return ERR_PTR(-ENOMEM);
mp->br = br;
mp->addr = *group;
setup_timer(&mp->timer, br_multicast_group_expired,
(unsigned long)mp);
hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
mdb->size++;
out:
return mp;
}
struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
struct br_ip *group,
struct net_bridge_port_group __rcu *next,
unsigned char state)
{
struct net_bridge_port_group *p;
p = kzalloc(sizeof(*p), GFP_ATOMIC);
if (unlikely(!p))
return NULL;
p->addr = *group;
p->port = port;
p->state = state;
rcu_assign_pointer(p->next, next);
hlist_add_head(&p->mglist, &port->mglist);
setup_timer(&p->timer, br_multicast_port_group_expired,
(unsigned long)p);
return p;
}
static int br_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *group)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
Revert "bridge: only expire the mdb entry when query is received" While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang <amwang@redhat.com> Signed-off-by: Linus Lüssing <linus.luessing@web.de> Reviewed-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-19 22:58:57 +00:00
unsigned long now = jiffies;
int err;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
goto out;
mp = br_multicast_new_group(br, port, group);
err = PTR_ERR(mp);
if (IS_ERR(mp))
goto err;
if (!port) {
mp->mglist = true;
Revert "bridge: only expire the mdb entry when query is received" While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang <amwang@redhat.com> Signed-off-by: Linus Lüssing <linus.luessing@web.de> Reviewed-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-19 22:58:57 +00:00
mod_timer(&mp->timer, now + br->multicast_membership_interval);
goto out;
}
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->port == port)
Revert "bridge: only expire the mdb entry when query is received" While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang <amwang@redhat.com> Signed-off-by: Linus Lüssing <linus.luessing@web.de> Reviewed-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-19 22:58:57 +00:00
goto found;
if ((unsigned long)p->port < (unsigned long)port)
break;
}
p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
if (unlikely(!p))
goto err;
rcu_assign_pointer(*pp, p);
br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
Revert "bridge: only expire the mdb entry when query is received" While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang <amwang@redhat.com> Signed-off-by: Linus Lüssing <linus.luessing@web.de> Reviewed-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-19 22:58:57 +00:00
found:
mod_timer(&p->timer, now + br->multicast_membership_interval);
out:
err = 0;
err:
spin_unlock(&br->multicast_lock);
return err;
}
static int br_ip4_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
__be32 group,
__u16 vid)
{
struct br_ip br_group;
if (ipv4_is_local_multicast(group))
return 0;
br_group.u.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
return br_multicast_add_group(br, port, &br_group);
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group,
__u16 vid)
{
struct br_ip br_group;
if (ipv6_addr_is_ll_all_nodes(group))
return 0;
br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
return br_multicast_add_group(br, port, &br_group);
}
#endif
static void br_multicast_router_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
if (port->multicast_router != 1 ||
timer_pending(&port->multicast_router_timer) ||
hlist_unhashed(&port->rlist))
goto out;
hlist_del_init_rcu(&port->rlist);
out:
spin_unlock(&br->multicast_lock);
}
static void br_multicast_local_router_expired(unsigned long data)
{
}
static void br_multicast_querier_expired(struct net_bridge *br,
struct bridge_mcast_own_query *query)
{
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || br->multicast_disabled)
goto out;
br_multicast_start_querier(br, query);
out:
spin_unlock(&br->multicast_lock);
}
static void br_ip4_multicast_querier_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_querier_expired(br, &br->ip4_own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_querier_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_querier_expired(br, &br->ip6_own_query);
}
#endif
static void br_multicast_select_own_querier(struct net_bridge *br,
struct br_ip *ip,
struct sk_buff *skb)
{
if (ip->proto == htons(ETH_P_IP))
br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
#if IS_ENABLED(CONFIG_IPV6)
else
br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
#endif
}
static void __br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *ip)
{
struct sk_buff *skb;
skb = br_multicast_alloc_query(br, ip);
if (!skb)
return;
if (port) {
skb->dev = port->dev;
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
NULL, skb->dev,
br_dev_queue_push_xmit);
} else {
br_multicast_select_own_querier(br, ip, skb);
netif_rx(skb);
}
}
static void br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port,
struct bridge_mcast_own_query *own_query)
{
unsigned long time;
struct br_ip br_group;
struct bridge_mcast_other_query *other_query = NULL;
if (!netif_running(br->dev) || br->multicast_disabled ||
!br->multicast_querier)
return;
memset(&br_group.u, 0, sizeof(br_group.u));
if (port ? (own_query == &port->ip4_own_query) :
(own_query == &br->ip4_own_query)) {
other_query = &br->ip4_other_query;
br_group.proto = htons(ETH_P_IP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
other_query = &br->ip6_other_query;
br_group.proto = htons(ETH_P_IPV6);
#endif
}
if (!other_query || timer_pending(&other_query->timer))
return;
__br_multicast_send_query(br, port, &br_group);
time = jiffies;
time += own_query->startup_sent < br->multicast_startup_query_count ?
br->multicast_startup_query_interval :
br->multicast_query_interval;
mod_timer(&own_query->timer, time);
}
static void
br_multicast_port_query_expired(struct net_bridge_port *port,
struct bridge_mcast_own_query *query)
{
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
goto out;
if (query->startup_sent < br->multicast_startup_query_count)
query->startup_sent++;
br_multicast_send_query(port->br, port, query);
out:
spin_unlock(&br->multicast_lock);
}
static void br_ip4_multicast_port_query_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
br_multicast_port_query_expired(port, &port->ip4_own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_port_query_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
br_multicast_port_query_expired(port, &port->ip6_own_query);
}
#endif
void br_multicast_add_port(struct net_bridge_port *port)
{
port->multicast_router = 1;
setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
(unsigned long)port);
setup_timer(&port->ip4_own_query.timer,
br_ip4_multicast_port_query_expired, (unsigned long)port);
#if IS_ENABLED(CONFIG_IPV6)
setup_timer(&port->ip6_own_query.timer,
br_ip6_multicast_port_query_expired, (unsigned long)port);
#endif
}
void br_multicast_del_port(struct net_bridge_port *port)
{
del_timer_sync(&port->multicast_router_timer);
}
static void br_multicast_enable(struct bridge_mcast_own_query *query)
{
query->startup_sent = 0;
if (try_to_del_timer_sync(&query->timer) >= 0 ||
del_timer(&query->timer))
mod_timer(&query->timer, jiffies);
}
void br_multicast_enable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
if (br->multicast_disabled || !netif_running(br->dev))
goto out;
br_multicast_enable(&port->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_enable(&port->ip6_own_query);
#endif
out:
spin_unlock(&br->multicast_lock);
}
void br_multicast_disable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
struct net_bridge_port_group *pg;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
struct hlist_node *n;
spin_lock(&br->multicast_lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
br_multicast_del_pg(br, pg);
if (!hlist_unhashed(&port->rlist))
hlist_del_init_rcu(&port->rlist);
del_timer(&port->multicast_router_timer);
del_timer(&port->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer(&port->ip6_own_query.timer);
#endif
spin_unlock(&br->multicast_lock);
}
static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
struct igmpv3_report *ih;
struct igmpv3_grec *grec;
int i;
int len;
int num;
int type;
int err = 0;
__be32 group;
if (!pskb_may_pull(skb, sizeof(*ih)))
return -EINVAL;
ih = igmpv3_report_hdr(skb);
num = ntohs(ih->ngrec);
len = sizeof(*ih);
for (i = 0; i < num; i++) {
len += sizeof(*grec);
if (!pskb_may_pull(skb, len))
return -EINVAL;
grec = (void *)(skb->data + len - sizeof(*grec));
group = grec->grec_mca;
type = grec->grec_type;
len += ntohs(grec->grec_nsrcs) * 4;
if (!pskb_may_pull(skb, len))
return -EINVAL;
/* We treat this as an IGMPv2 report for now. */
switch (type) {
case IGMPV3_MODE_IS_INCLUDE:
case IGMPV3_MODE_IS_EXCLUDE:
case IGMPV3_CHANGE_TO_INCLUDE:
case IGMPV3_CHANGE_TO_EXCLUDE:
case IGMPV3_ALLOW_NEW_SOURCES:
case IGMPV3_BLOCK_OLD_SOURCES:
break;
default:
continue;
}
err = br_ip4_multicast_add_group(br, port, group, vid);
if (err)
break;
}
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_mld2_report(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
struct icmp6hdr *icmp6h;
struct mld2_grec *grec;
int i;
int len;
int num;
int err = 0;
if (!pskb_may_pull(skb, sizeof(*icmp6h)))
return -EINVAL;
icmp6h = icmp6_hdr(skb);
num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
len = sizeof(*icmp6h);
for (i = 0; i < num; i++) {
__be16 *nsrcs, _nsrcs;
nsrcs = skb_header_pointer(skb,
len + offsetof(struct mld2_grec,
grec_nsrcs),
sizeof(_nsrcs), &_nsrcs);
if (!nsrcs)
return -EINVAL;
if (!pskb_may_pull(skb,
len + sizeof(*grec) +
sizeof(struct in6_addr) * ntohs(*nsrcs)))
return -EINVAL;
grec = (struct mld2_grec *)(skb->data + len);
len += sizeof(*grec) +
sizeof(struct in6_addr) * ntohs(*nsrcs);
/* We treat these as MLDv1 reports for now. */
switch (grec->grec_type) {
case MLD2_MODE_IS_INCLUDE:
case MLD2_MODE_IS_EXCLUDE:
case MLD2_CHANGE_TO_INCLUDE:
case MLD2_CHANGE_TO_EXCLUDE:
case MLD2_ALLOW_NEW_SOURCES:
case MLD2_BLOCK_OLD_SOURCES:
break;
default:
continue;
}
err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
vid);
if (!err)
break;
}
return err;
}
#endif
static bool br_ip4_multicast_select_querier(struct net_bridge *br,
struct net_bridge_port *port,
__be32 saddr)
{
if (!timer_pending(&br->ip4_own_query.timer) &&
!timer_pending(&br->ip4_other_query.timer))
goto update;
if (!br->ip4_querier.addr.u.ip4)
goto update;
if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
goto update;
return false;
update:
br->ip4_querier.addr.u.ip4 = saddr;
/* update protected by general multicast_lock by caller */
rcu_assign_pointer(br->ip4_querier.port, port);
return true;
}
#if IS_ENABLED(CONFIG_IPV6)
static bool br_ip6_multicast_select_querier(struct net_bridge *br,
struct net_bridge_port *port,
struct in6_addr *saddr)
{
if (!timer_pending(&br->ip6_own_query.timer) &&
!timer_pending(&br->ip6_other_query.timer))
goto update;
if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
goto update;
return false;
update:
br->ip6_querier.addr.u.ip6 = *saddr;
/* update protected by general multicast_lock by caller */
rcu_assign_pointer(br->ip6_querier.port, port);
return true;
}
#endif
static bool br_multicast_select_querier(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *saddr)
{
switch (saddr->proto) {
case htons(ETH_P_IP):
return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
#endif
}
return false;
}
static void
br_multicast_update_query_timer(struct net_bridge *br,
struct bridge_mcast_other_query *query,
unsigned long max_delay)
{
if (!timer_pending(&query->timer))
query->delay_time = jiffies + max_delay;
mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
}
/*
* Add port to router_list
* list is maintained ordered by pointer value
* and locked by br->multicast_lock and RCU
*/
static void br_multicast_add_router(struct net_bridge *br,
struct net_bridge_port *port)
{
struct net_bridge_port *p;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
struct hlist_node *slot = NULL;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
hlist_for_each_entry(p, &br->router_list, rlist) {
if ((unsigned long) port >= (unsigned long) p)
break;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
slot = &p->rlist;
}
if (slot)
hlist_add_behind_rcu(&port->rlist, slot);
else
hlist_add_head_rcu(&port->rlist, &br->router_list);
}
static void br_multicast_mark_router(struct net_bridge *br,
struct net_bridge_port *port)
{
unsigned long now = jiffies;
if (!port) {
if (br->multicast_router == 1)
mod_timer(&br->multicast_router_timer,
now + br->multicast_querier_interval);
return;
}
if (port->multicast_router != 1)
return;
if (!hlist_unhashed(&port->rlist))
goto timer;
br_multicast_add_router(br, port);
timer:
mod_timer(&port->multicast_router_timer,
now + br->multicast_querier_interval);
}
static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
struct bridge_mcast_other_query *query,
struct br_ip *saddr,
unsigned long max_delay)
{
if (!br_multicast_select_querier(br, port, saddr))
return;
br_multicast_update_query_timer(br, query, max_delay);
br_multicast_mark_router(br, port);
}
static int br_ip4_multicast_query(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
const struct iphdr *iph = ip_hdr(skb);
struct igmphdr *ih = igmp_hdr(skb);
struct net_bridge_mdb_entry *mp;
struct igmpv3_query *ih3;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
__be32 group;
int err = 0;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
goto out;
group = ih->group;
if (skb->len == sizeof(*ih)) {
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
if (!max_delay) {
max_delay = 10 * HZ;
group = 0;
}
} else {
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
err = -EINVAL;
goto out;
}
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs)
goto out;
max_delay = ih3->code ?
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
}
/* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
* all-systems destination addresses (224.0.0.1) for general queries
*/
if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
err = -EINVAL;
goto out;
}
if (!group) {
saddr.proto = htons(ETH_P_IP);
saddr.u.ip4 = iph->saddr;
br_multicast_query_received(br, port, &br->ip4_other_query,
&saddr, max_delay);
goto out;
}
mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
if (!mp)
goto out;
max_delay *= br->multicast_last_member_count;
if (mp->mglist &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, now + max_delay) :
try_to_del_timer_sync(&mp->timer) >= 0))
mod_timer(&mp->timer, now + max_delay);
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (timer_pending(&p->timer) ?
time_after(p->timer.expires, now + max_delay) :
try_to_del_timer_sync(&p->timer) >= 0)
mod_timer(&p->timer, now + max_delay);
}
out:
spin_unlock(&br->multicast_lock);
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_query(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld_msg *mld;
struct net_bridge_mdb_entry *mp;
struct mld2_query *mld2q;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
const struct in6_addr *group = NULL;
bool is_general_query;
int err = 0;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
goto out;
/* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
err = -EINVAL;
goto out;
}
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
goto out;
}
mld = (struct mld_msg *) icmp6_hdr(skb);
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
if (max_delay)
group = &mld->mld_mca;
} else {
if (!pskb_may_pull(skb, sizeof(*mld2q))) {
err = -EINVAL;
goto out;
}
mld2q = (struct mld2_query *)icmp6_hdr(skb);
if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca;
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
}
is_general_query = group && ipv6_addr_any(group);
/* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
* all-nodes destination address (ff02::1) for general queries
*/
if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
err = -EINVAL;
goto out;
}
if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6);
saddr.u.ip6 = ip6h->saddr;
br_multicast_query_received(br, port, &br->ip6_other_query,
&saddr, max_delay);
goto out;
} else if (!group) {
goto out;
}
mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
if (!mp)
goto out;
max_delay *= br->multicast_last_member_count;
if (mp->mglist &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, now + max_delay) :
try_to_del_timer_sync(&mp->timer) >= 0))
mod_timer(&mp->timer, now + max_delay);
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (timer_pending(&p->timer) ?
time_after(p->timer.expires, now + max_delay) :
try_to_del_timer_sync(&p->timer) >= 0)
mod_timer(&p->timer, now + max_delay);
}
out:
spin_unlock(&br->multicast_lock);
return err;
}
#endif
static void
br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *group,
struct bridge_mcast_other_query *other_query,
struct bridge_mcast_own_query *own_query)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
unsigned long now;
unsigned long time;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED) ||
timer_pending(&other_query->timer))
goto out;
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, group);
if (!mp)
goto out;
if (br->multicast_querier) {
__br_multicast_send_query(br, port, &mp->addr);
time = jiffies + br->multicast_last_member_count *
br->multicast_last_member_interval;
mod_timer(&own_query->timer, time);
for (p = mlock_dereference(mp->ports, br);
p != NULL;
p = mlock_dereference(p->next, br)) {
if (p->port != port)
continue;
if (!hlist_unhashed(&p->mglist) &&
(timer_pending(&p->timer) ?
time_after(p->timer.expires, time) :
try_to_del_timer_sync(&p->timer) >= 0)) {
mod_timer(&p->timer, time);
}
break;
}
}
if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
struct net_bridge_port_group __rcu **pp;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->port != port)
continue;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
br_mdb_notify(br->dev, port, group, RTM_DELMDB);
Revert "bridge: only expire the mdb entry when query is received" While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang <amwang@redhat.com> Signed-off-by: Linus Lüssing <linus.luessing@web.de> Reviewed-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-19 22:58:57 +00:00
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
}
goto out;
}
now = jiffies;
time = now + br->multicast_last_member_count *
br->multicast_last_member_interval;
if (!port) {
Revert "bridge: only expire the mdb entry when query is received" While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang <amwang@redhat.com> Signed-off-by: Linus Lüssing <linus.luessing@web.de> Reviewed-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-19 22:58:57 +00:00
if (mp->mglist &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, time) :
try_to_del_timer_sync(&mp->timer) >= 0)) {
mod_timer(&mp->timer, time);
}
Revert "bridge: only expire the mdb entry when query is received" While this commit was a good attempt to fix issues occuring when no multicast querier is present, this commit still has two more issues: 1) There are cases where mdb entries do not expire even if there is a querier present. The bridge will unnecessarily continue flooding multicast packets on the according ports. 2) Never removing an mdb entry could be exploited for a Denial of Service by an attacker on the local link, slowly, but steadily eating up all memory. Actually, this commit became obsolete with "bridge: disable snooping if there is no querier" (b00589af3b) which included fixes for a few more cases. Therefore reverting the following commits (the commit stated in the commit message plus three of its follow up fixes): ==================== Revert "bridge: update mdb expiration timer upon reports." This reverts commit f144febd93d5ee534fdf23505ab091b2b9088edc. Revert "bridge: do not call setup_timer() multiple times" This reverts commit 1faabf2aab1fdaa1ace4e8c829d1b9cf7bfec2f1. Revert "bridge: fix some kernel warning in multicast timer" This reverts commit c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1. Revert "bridge: only expire the mdb entry when query is received" This reverts commit 9f00b2e7cf241fa389733d41b615efdaa2cb0f5b. ==================== CC: Cong Wang <amwang@redhat.com> Signed-off-by: Linus Lüssing <linus.luessing@web.de> Reviewed-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-19 22:58:57 +00:00
goto out;
}
for (p = mlock_dereference(mp->ports, br);
p != NULL;
p = mlock_dereference(p->next, br)) {
if (p->port != port)
continue;
if (!hlist_unhashed(&p->mglist) &&
(timer_pending(&p->timer) ?
time_after(p->timer.expires, time) :
try_to_del_timer_sync(&p->timer) >= 0)) {
mod_timer(&p->timer, time);
}
break;
}
out:
spin_unlock(&br->multicast_lock);
}
static void br_ip4_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
__be32 group,
__u16 vid)
{
struct br_ip br_group;
struct bridge_mcast_own_query *own_query;
if (ipv4_is_local_multicast(group))
return;
own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
br_group.u.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group,
__u16 vid)
{
struct br_ip br_group;
struct bridge_mcast_own_query *own_query;
if (ipv6_addr_is_ll_all_nodes(group))
return;
own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
own_query);
}
#endif
static int br_multicast_ipv4_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
struct sk_buff *skb2 = skb;
const struct iphdr *iph;
struct igmphdr *ih;
unsigned int len;
unsigned int offset;
int err;
/* We treat OOM as packet loss for now. */
if (!pskb_may_pull(skb, sizeof(*iph)))
return -EINVAL;
iph = ip_hdr(skb);
if (iph->ihl < 5 || iph->version != 4)
return -EINVAL;
if (!pskb_may_pull(skb, ip_hdrlen(skb)))
return -EINVAL;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
return -EINVAL;
if (iph->protocol != IPPROTO_IGMP) {
if (!ipv4_is_local_multicast(iph->daddr))
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
return 0;
}
len = ntohs(iph->tot_len);
if (skb->len < len || len < ip_hdrlen(skb))
return -EINVAL;
if (skb->len > len) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
return -ENOMEM;
err = pskb_trim_rcsum(skb2, len);
if (err)
goto err_out;
}
len -= ip_hdrlen(skb2);
offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
__skb_pull(skb2, offset);
skb_reset_transport_header(skb2);
err = -EINVAL;
if (!pskb_may_pull(skb2, sizeof(*ih)))
goto out;
switch (skb2->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb2->csum))
break;
/* fall through */
case CHECKSUM_NONE:
skb2->csum = 0;
if (skb_checksum_complete(skb2))
goto out;
}
err = 0;
BR_INPUT_SKB_CB(skb)->igmp = 1;
ih = igmp_hdr(skb2);
switch (ih->type) {
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
err = br_ip4_multicast_add_group(br, port, ih->group, vid);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
err = br_ip4_multicast_query(br, port, skb2, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
br_ip4_multicast_leave_group(br, port, ih->group, vid);
break;
}
out:
__skb_push(skb2, offset);
err_out:
if (skb2 != skb)
kfree_skb(skb2);
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_multicast_ipv6_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
bridge: fix br_multicast_ipv6_rcv for paged skbs use pskb_may_pull to access ipv6 header correctly for paged skbs It was omitted in the bridge code leading to crash in blind __skb_pull since the skb is cloned undonditionally we also simplify the the exit path this fixes bug https://bugzilla.kernel.org/show_bug.cgi?id=25202 Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: authenticated Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: associated (aid 2) Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 RADIUS: starting accounting session 4D0608A3-00000005 Dec 15 14:36:41 User-PC kernel: [175576.120287] ------------[ cut here ]------------ Dec 15 14:36:41 User-PC kernel: [175576.120452] kernel BUG at include/linux/skbuff.h:1178! Dec 15 14:36:41 User-PC kernel: [175576.120609] invalid opcode: 0000 [#1] SMP Dec 15 14:36:41 User-PC kernel: [175576.120749] last sysfs file: /sys/devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda/uevent Dec 15 14:36:41 User-PC kernel: [175576.121035] Modules linked in: approvals binfmt_misc bridge stp llc parport_pc ppdev arc4 iwlagn snd_hda_codec_realtek iwlcore i915 snd_hda_intel mac80211 joydev snd_hda_codec snd_hwdep snd_pcm snd_seq_midi drm_kms_helper snd_rawmidi drm snd_seq_midi_event snd_seq snd_timer snd_seq_device cfg80211 eeepc_wmi usbhid psmouse intel_agp i2c_algo_bit intel_gtt uvcvideo agpgart videodev sparse_keymap snd shpchp v4l1_compat lp hid video serio_raw soundcore output snd_page_alloc ahci libahci atl1c Dec 15 14:36:41 User-PC kernel: [175576.122712] Dec 15 14:36:41 User-PC kernel: [175576.122769] Pid: 0, comm: kworker/0:0 Tainted: G W 2.6.37-rc5-wl+ #3 1015PE/1016P Dec 15 14:36:41 User-PC kernel: [175576.123012] EIP: 0060:[<f83edd65>] EFLAGS: 00010283 CPU: 1 Dec 15 14:36:41 User-PC kernel: [175576.123193] EIP is at br_multicast_rcv+0xc95/0xe1c [bridge] Dec 15 14:36:41 User-PC kernel: [175576.123362] EAX: 0000001c EBX: f5626318 ECX: 00000000 EDX: 00000000 Dec 15 14:36:41 User-PC kernel: [175576.123550] ESI: ec512262 EDI: f5626180 EBP: f60b5ca0 ESP: f60b5bd8 Dec 15 14:36:41 User-PC kernel: [175576.123737] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Dec 15 14:36:41 User-PC kernel: [175576.123902] Process kworker/0:0 (pid: 0, ti=f60b4000 task=f60a8000 task.ti=f60b0000) Dec 15 14:36:41 User-PC kernel: [175576.124137] Stack: Dec 15 14:36:41 User-PC kernel: [175576.124181] ec556500 f6d06800 f60b5be8 c01087d8 ec512262 00000030 00000024 f5626180 Dec 15 14:36:41 User-PC kernel: [175576.124181] f572c200 ef463440 f5626300 3affffff f6d06dd0 e60766a4 000000c4 f6d06860 Dec 15 14:36:41 User-PC kernel: [175576.124181] ffffffff ec55652c 00000001 f6d06844 f60b5c64 c0138264 c016e451 c013e47d Dec 15 14:36:41 User-PC kernel: [175576.124181] Call Trace: Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01087d8>] ? sched_clock+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0138264>] ? enqueue_entity+0x174/0x440 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e451>] ? sched_clock_cpu+0x131/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c013e47d>] ? select_task_rq_fair+0x2ad/0x730 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0524fc1>] ? nf_iterate+0x71/0x90 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4914>] ? br_handle_frame_finish+0x184/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e46e9>] ? br_handle_frame+0x189/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4560>] ? br_handle_frame+0x0/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff026>] ? __netif_receive_skb+0x1b6/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04f7a30>] ? skb_copy_bits+0x110/0x210 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0503a7f>] ? netif_receive_skb+0x6f/0x80 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cb74c>] ? ieee80211_deliver_skb+0x8c/0x1a0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cc836>] ? ieee80211_rx_handlers+0xeb6/0x1aa0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff1f0>] ? __netif_receive_skb+0x380/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e242>] ? sched_clock_local+0xb2/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c012b688>] ? default_spin_lock_flags+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cd621>] ? ieee80211_prepare_and_rx_handle+0x201/0xa90 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82ce154>] ? ieee80211_rx+0x2a4/0x830 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f815a8d6>] ? iwl_update_stats+0xa6/0x2a0 [iwlcore] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8499212>] ? iwlagn_rx_reply_rx+0x292/0x3b0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8483697>] ? iwl_rx_handle+0xe7/0x350 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8486ab7>] ? iwl_irq_tasklet+0xf7/0x5c0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01aece1>] ? __rcu_process_callbacks+0x201/0x2d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150d05>] ? tasklet_action+0xc5/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150a07>] ? __do_softirq+0x97/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d910c>] ? nmi_stack_correct+0x2f/0x34 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150970>] ? __do_softirq+0x0/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] <IRQ> Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01508f5>] ? irq_exit+0x65/0x70 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05df062>] ? do_IRQ+0x52/0xc0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01036b0>] ? common_interrupt+0x30/0x38 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c03a1fc2>] ? intel_idle+0xc2/0x160 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04daebb>] ? cpuidle_idle_call+0x6b/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0101dea>] ? cpu_idle+0x8a/0xf0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d2702>] ? start_secondary+0x1e8/0x1ee Cc: David Miller <davem@davemloft.net> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 19:26:08 +00:00
struct sk_buff *skb2;
const struct ipv6hdr *ip6h;
u8 icmp6_type;
u8 nexthdr;
__be16 frag_off;
unsigned int len;
int offset;
int err;
if (!pskb_may_pull(skb, sizeof(*ip6h)))
return -EINVAL;
ip6h = ipv6_hdr(skb);
/*
* We're interested in MLD messages only.
* - Version is 6
* - MLD has always Router Alert hop-by-hop option
* - But we do not support jumbrograms.
*/
if (ip6h->version != 6)
return 0;
/* Prevent flooding this packet if there is no listener present */
if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr))
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
ip6h->payload_len == 0)
return 0;
len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
if (skb->len < len)
return -EINVAL;
nexthdr = ip6h->nexthdr;
offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
return 0;
/* Okay, we found ICMPv6 header */
skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
return -ENOMEM;
bridge: fix br_multicast_ipv6_rcv for paged skbs use pskb_may_pull to access ipv6 header correctly for paged skbs It was omitted in the bridge code leading to crash in blind __skb_pull since the skb is cloned undonditionally we also simplify the the exit path this fixes bug https://bugzilla.kernel.org/show_bug.cgi?id=25202 Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: authenticated Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: associated (aid 2) Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 RADIUS: starting accounting session 4D0608A3-00000005 Dec 15 14:36:41 User-PC kernel: [175576.120287] ------------[ cut here ]------------ Dec 15 14:36:41 User-PC kernel: [175576.120452] kernel BUG at include/linux/skbuff.h:1178! Dec 15 14:36:41 User-PC kernel: [175576.120609] invalid opcode: 0000 [#1] SMP Dec 15 14:36:41 User-PC kernel: [175576.120749] last sysfs file: /sys/devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda/uevent Dec 15 14:36:41 User-PC kernel: [175576.121035] Modules linked in: approvals binfmt_misc bridge stp llc parport_pc ppdev arc4 iwlagn snd_hda_codec_realtek iwlcore i915 snd_hda_intel mac80211 joydev snd_hda_codec snd_hwdep snd_pcm snd_seq_midi drm_kms_helper snd_rawmidi drm snd_seq_midi_event snd_seq snd_timer snd_seq_device cfg80211 eeepc_wmi usbhid psmouse intel_agp i2c_algo_bit intel_gtt uvcvideo agpgart videodev sparse_keymap snd shpchp v4l1_compat lp hid video serio_raw soundcore output snd_page_alloc ahci libahci atl1c Dec 15 14:36:41 User-PC kernel: [175576.122712] Dec 15 14:36:41 User-PC kernel: [175576.122769] Pid: 0, comm: kworker/0:0 Tainted: G W 2.6.37-rc5-wl+ #3 1015PE/1016P Dec 15 14:36:41 User-PC kernel: [175576.123012] EIP: 0060:[<f83edd65>] EFLAGS: 00010283 CPU: 1 Dec 15 14:36:41 User-PC kernel: [175576.123193] EIP is at br_multicast_rcv+0xc95/0xe1c [bridge] Dec 15 14:36:41 User-PC kernel: [175576.123362] EAX: 0000001c EBX: f5626318 ECX: 00000000 EDX: 00000000 Dec 15 14:36:41 User-PC kernel: [175576.123550] ESI: ec512262 EDI: f5626180 EBP: f60b5ca0 ESP: f60b5bd8 Dec 15 14:36:41 User-PC kernel: [175576.123737] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Dec 15 14:36:41 User-PC kernel: [175576.123902] Process kworker/0:0 (pid: 0, ti=f60b4000 task=f60a8000 task.ti=f60b0000) Dec 15 14:36:41 User-PC kernel: [175576.124137] Stack: Dec 15 14:36:41 User-PC kernel: [175576.124181] ec556500 f6d06800 f60b5be8 c01087d8 ec512262 00000030 00000024 f5626180 Dec 15 14:36:41 User-PC kernel: [175576.124181] f572c200 ef463440 f5626300 3affffff f6d06dd0 e60766a4 000000c4 f6d06860 Dec 15 14:36:41 User-PC kernel: [175576.124181] ffffffff ec55652c 00000001 f6d06844 f60b5c64 c0138264 c016e451 c013e47d Dec 15 14:36:41 User-PC kernel: [175576.124181] Call Trace: Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01087d8>] ? sched_clock+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0138264>] ? enqueue_entity+0x174/0x440 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e451>] ? sched_clock_cpu+0x131/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c013e47d>] ? select_task_rq_fair+0x2ad/0x730 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0524fc1>] ? nf_iterate+0x71/0x90 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4914>] ? br_handle_frame_finish+0x184/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e46e9>] ? br_handle_frame+0x189/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4560>] ? br_handle_frame+0x0/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff026>] ? __netif_receive_skb+0x1b6/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04f7a30>] ? skb_copy_bits+0x110/0x210 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0503a7f>] ? netif_receive_skb+0x6f/0x80 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cb74c>] ? ieee80211_deliver_skb+0x8c/0x1a0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cc836>] ? ieee80211_rx_handlers+0xeb6/0x1aa0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff1f0>] ? __netif_receive_skb+0x380/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e242>] ? sched_clock_local+0xb2/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c012b688>] ? default_spin_lock_flags+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cd621>] ? ieee80211_prepare_and_rx_handle+0x201/0xa90 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82ce154>] ? ieee80211_rx+0x2a4/0x830 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f815a8d6>] ? iwl_update_stats+0xa6/0x2a0 [iwlcore] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8499212>] ? iwlagn_rx_reply_rx+0x292/0x3b0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8483697>] ? iwl_rx_handle+0xe7/0x350 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8486ab7>] ? iwl_irq_tasklet+0xf7/0x5c0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01aece1>] ? __rcu_process_callbacks+0x201/0x2d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150d05>] ? tasklet_action+0xc5/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150a07>] ? __do_softirq+0x97/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d910c>] ? nmi_stack_correct+0x2f/0x34 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150970>] ? __do_softirq+0x0/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] <IRQ> Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01508f5>] ? irq_exit+0x65/0x70 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05df062>] ? do_IRQ+0x52/0xc0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01036b0>] ? common_interrupt+0x30/0x38 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c03a1fc2>] ? intel_idle+0xc2/0x160 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04daebb>] ? cpuidle_idle_call+0x6b/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0101dea>] ? cpu_idle+0x8a/0xf0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d2702>] ? start_secondary+0x1e8/0x1ee Cc: David Miller <davem@davemloft.net> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 19:26:08 +00:00
err = -EINVAL;
if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
goto out;
len -= offset - skb_network_offset(skb2);
__skb_pull(skb2, offset);
skb_reset_transport_header(skb2);
skb_postpull_rcsum(skb2, skb_network_header(skb2),
skb_network_header_len(skb2));
icmp6_type = icmp6_hdr(skb2)->icmp6_type;
switch (icmp6_type) {
case ICMPV6_MGM_QUERY:
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
case ICMPV6_MLD2_REPORT:
break;
default:
err = 0;
goto out;
}
/* Okay, we found MLD message. Check further. */
if (skb2->len > len) {
err = pskb_trim_rcsum(skb2, len);
if (err)
goto out;
err = -EINVAL;
}
ip6h = ipv6_hdr(skb2);
switch (skb2->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
IPPROTO_ICMPV6, skb2->csum))
break;
/*FALLTHROUGH*/
case CHECKSUM_NONE:
skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
&ip6h->daddr,
skb2->len,
IPPROTO_ICMPV6, 0));
if (__skb_checksum_complete(skb2))
goto out;
}
err = 0;
BR_INPUT_SKB_CB(skb)->igmp = 1;
switch (icmp6_type) {
case ICMPV6_MGM_REPORT:
{
bridge: fix br_multicast_ipv6_rcv for paged skbs use pskb_may_pull to access ipv6 header correctly for paged skbs It was omitted in the bridge code leading to crash in blind __skb_pull since the skb is cloned undonditionally we also simplify the the exit path this fixes bug https://bugzilla.kernel.org/show_bug.cgi?id=25202 Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: authenticated Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: associated (aid 2) Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 RADIUS: starting accounting session 4D0608A3-00000005 Dec 15 14:36:41 User-PC kernel: [175576.120287] ------------[ cut here ]------------ Dec 15 14:36:41 User-PC kernel: [175576.120452] kernel BUG at include/linux/skbuff.h:1178! Dec 15 14:36:41 User-PC kernel: [175576.120609] invalid opcode: 0000 [#1] SMP Dec 15 14:36:41 User-PC kernel: [175576.120749] last sysfs file: /sys/devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda/uevent Dec 15 14:36:41 User-PC kernel: [175576.121035] Modules linked in: approvals binfmt_misc bridge stp llc parport_pc ppdev arc4 iwlagn snd_hda_codec_realtek iwlcore i915 snd_hda_intel mac80211 joydev snd_hda_codec snd_hwdep snd_pcm snd_seq_midi drm_kms_helper snd_rawmidi drm snd_seq_midi_event snd_seq snd_timer snd_seq_device cfg80211 eeepc_wmi usbhid psmouse intel_agp i2c_algo_bit intel_gtt uvcvideo agpgart videodev sparse_keymap snd shpchp v4l1_compat lp hid video serio_raw soundcore output snd_page_alloc ahci libahci atl1c Dec 15 14:36:41 User-PC kernel: [175576.122712] Dec 15 14:36:41 User-PC kernel: [175576.122769] Pid: 0, comm: kworker/0:0 Tainted: G W 2.6.37-rc5-wl+ #3 1015PE/1016P Dec 15 14:36:41 User-PC kernel: [175576.123012] EIP: 0060:[<f83edd65>] EFLAGS: 00010283 CPU: 1 Dec 15 14:36:41 User-PC kernel: [175576.123193] EIP is at br_multicast_rcv+0xc95/0xe1c [bridge] Dec 15 14:36:41 User-PC kernel: [175576.123362] EAX: 0000001c EBX: f5626318 ECX: 00000000 EDX: 00000000 Dec 15 14:36:41 User-PC kernel: [175576.123550] ESI: ec512262 EDI: f5626180 EBP: f60b5ca0 ESP: f60b5bd8 Dec 15 14:36:41 User-PC kernel: [175576.123737] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Dec 15 14:36:41 User-PC kernel: [175576.123902] Process kworker/0:0 (pid: 0, ti=f60b4000 task=f60a8000 task.ti=f60b0000) Dec 15 14:36:41 User-PC kernel: [175576.124137] Stack: Dec 15 14:36:41 User-PC kernel: [175576.124181] ec556500 f6d06800 f60b5be8 c01087d8 ec512262 00000030 00000024 f5626180 Dec 15 14:36:41 User-PC kernel: [175576.124181] f572c200 ef463440 f5626300 3affffff f6d06dd0 e60766a4 000000c4 f6d06860 Dec 15 14:36:41 User-PC kernel: [175576.124181] ffffffff ec55652c 00000001 f6d06844 f60b5c64 c0138264 c016e451 c013e47d Dec 15 14:36:41 User-PC kernel: [175576.124181] Call Trace: Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01087d8>] ? sched_clock+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0138264>] ? enqueue_entity+0x174/0x440 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e451>] ? sched_clock_cpu+0x131/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c013e47d>] ? select_task_rq_fair+0x2ad/0x730 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0524fc1>] ? nf_iterate+0x71/0x90 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4914>] ? br_handle_frame_finish+0x184/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e46e9>] ? br_handle_frame+0x189/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4560>] ? br_handle_frame+0x0/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff026>] ? __netif_receive_skb+0x1b6/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04f7a30>] ? skb_copy_bits+0x110/0x210 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0503a7f>] ? netif_receive_skb+0x6f/0x80 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cb74c>] ? ieee80211_deliver_skb+0x8c/0x1a0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cc836>] ? ieee80211_rx_handlers+0xeb6/0x1aa0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff1f0>] ? __netif_receive_skb+0x380/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e242>] ? sched_clock_local+0xb2/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c012b688>] ? default_spin_lock_flags+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cd621>] ? ieee80211_prepare_and_rx_handle+0x201/0xa90 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82ce154>] ? ieee80211_rx+0x2a4/0x830 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f815a8d6>] ? iwl_update_stats+0xa6/0x2a0 [iwlcore] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8499212>] ? iwlagn_rx_reply_rx+0x292/0x3b0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8483697>] ? iwl_rx_handle+0xe7/0x350 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8486ab7>] ? iwl_irq_tasklet+0xf7/0x5c0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01aece1>] ? __rcu_process_callbacks+0x201/0x2d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150d05>] ? tasklet_action+0xc5/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150a07>] ? __do_softirq+0x97/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d910c>] ? nmi_stack_correct+0x2f/0x34 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150970>] ? __do_softirq+0x0/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] <IRQ> Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01508f5>] ? irq_exit+0x65/0x70 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05df062>] ? do_IRQ+0x52/0xc0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01036b0>] ? common_interrupt+0x30/0x38 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c03a1fc2>] ? intel_idle+0xc2/0x160 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04daebb>] ? cpuidle_idle_call+0x6b/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0101dea>] ? cpu_idle+0x8a/0xf0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d2702>] ? start_secondary+0x1e8/0x1ee Cc: David Miller <davem@davemloft.net> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 19:26:08 +00:00
struct mld_msg *mld;
if (!pskb_may_pull(skb2, sizeof(*mld))) {
err = -EINVAL;
goto out;
}
mld = (struct mld_msg *)skb_transport_header(skb2);
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
break;
}
case ICMPV6_MLD2_REPORT:
err = br_ip6_multicast_mld2_report(br, port, skb2, vid);
break;
case ICMPV6_MGM_QUERY:
err = br_ip6_multicast_query(br, port, skb2, vid);
break;
case ICMPV6_MGM_REDUCTION:
{
bridge: fix br_multicast_ipv6_rcv for paged skbs use pskb_may_pull to access ipv6 header correctly for paged skbs It was omitted in the bridge code leading to crash in blind __skb_pull since the skb is cloned undonditionally we also simplify the the exit path this fixes bug https://bugzilla.kernel.org/show_bug.cgi?id=25202 Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: authenticated Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: associated (aid 2) Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 RADIUS: starting accounting session 4D0608A3-00000005 Dec 15 14:36:41 User-PC kernel: [175576.120287] ------------[ cut here ]------------ Dec 15 14:36:41 User-PC kernel: [175576.120452] kernel BUG at include/linux/skbuff.h:1178! Dec 15 14:36:41 User-PC kernel: [175576.120609] invalid opcode: 0000 [#1] SMP Dec 15 14:36:41 User-PC kernel: [175576.120749] last sysfs file: /sys/devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda/uevent Dec 15 14:36:41 User-PC kernel: [175576.121035] Modules linked in: approvals binfmt_misc bridge stp llc parport_pc ppdev arc4 iwlagn snd_hda_codec_realtek iwlcore i915 snd_hda_intel mac80211 joydev snd_hda_codec snd_hwdep snd_pcm snd_seq_midi drm_kms_helper snd_rawmidi drm snd_seq_midi_event snd_seq snd_timer snd_seq_device cfg80211 eeepc_wmi usbhid psmouse intel_agp i2c_algo_bit intel_gtt uvcvideo agpgart videodev sparse_keymap snd shpchp v4l1_compat lp hid video serio_raw soundcore output snd_page_alloc ahci libahci atl1c Dec 15 14:36:41 User-PC kernel: [175576.122712] Dec 15 14:36:41 User-PC kernel: [175576.122769] Pid: 0, comm: kworker/0:0 Tainted: G W 2.6.37-rc5-wl+ #3 1015PE/1016P Dec 15 14:36:41 User-PC kernel: [175576.123012] EIP: 0060:[<f83edd65>] EFLAGS: 00010283 CPU: 1 Dec 15 14:36:41 User-PC kernel: [175576.123193] EIP is at br_multicast_rcv+0xc95/0xe1c [bridge] Dec 15 14:36:41 User-PC kernel: [175576.123362] EAX: 0000001c EBX: f5626318 ECX: 00000000 EDX: 00000000 Dec 15 14:36:41 User-PC kernel: [175576.123550] ESI: ec512262 EDI: f5626180 EBP: f60b5ca0 ESP: f60b5bd8 Dec 15 14:36:41 User-PC kernel: [175576.123737] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Dec 15 14:36:41 User-PC kernel: [175576.123902] Process kworker/0:0 (pid: 0, ti=f60b4000 task=f60a8000 task.ti=f60b0000) Dec 15 14:36:41 User-PC kernel: [175576.124137] Stack: Dec 15 14:36:41 User-PC kernel: [175576.124181] ec556500 f6d06800 f60b5be8 c01087d8 ec512262 00000030 00000024 f5626180 Dec 15 14:36:41 User-PC kernel: [175576.124181] f572c200 ef463440 f5626300 3affffff f6d06dd0 e60766a4 000000c4 f6d06860 Dec 15 14:36:41 User-PC kernel: [175576.124181] ffffffff ec55652c 00000001 f6d06844 f60b5c64 c0138264 c016e451 c013e47d Dec 15 14:36:41 User-PC kernel: [175576.124181] Call Trace: Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01087d8>] ? sched_clock+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0138264>] ? enqueue_entity+0x174/0x440 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e451>] ? sched_clock_cpu+0x131/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c013e47d>] ? select_task_rq_fair+0x2ad/0x730 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0524fc1>] ? nf_iterate+0x71/0x90 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4914>] ? br_handle_frame_finish+0x184/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e46e9>] ? br_handle_frame+0x189/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4560>] ? br_handle_frame+0x0/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff026>] ? __netif_receive_skb+0x1b6/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04f7a30>] ? skb_copy_bits+0x110/0x210 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0503a7f>] ? netif_receive_skb+0x6f/0x80 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cb74c>] ? ieee80211_deliver_skb+0x8c/0x1a0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cc836>] ? ieee80211_rx_handlers+0xeb6/0x1aa0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff1f0>] ? __netif_receive_skb+0x380/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e242>] ? sched_clock_local+0xb2/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c012b688>] ? default_spin_lock_flags+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cd621>] ? ieee80211_prepare_and_rx_handle+0x201/0xa90 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82ce154>] ? ieee80211_rx+0x2a4/0x830 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f815a8d6>] ? iwl_update_stats+0xa6/0x2a0 [iwlcore] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8499212>] ? iwlagn_rx_reply_rx+0x292/0x3b0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8483697>] ? iwl_rx_handle+0xe7/0x350 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8486ab7>] ? iwl_irq_tasklet+0xf7/0x5c0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01aece1>] ? __rcu_process_callbacks+0x201/0x2d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150d05>] ? tasklet_action+0xc5/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150a07>] ? __do_softirq+0x97/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d910c>] ? nmi_stack_correct+0x2f/0x34 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150970>] ? __do_softirq+0x0/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] <IRQ> Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01508f5>] ? irq_exit+0x65/0x70 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05df062>] ? do_IRQ+0x52/0xc0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01036b0>] ? common_interrupt+0x30/0x38 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c03a1fc2>] ? intel_idle+0xc2/0x160 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04daebb>] ? cpuidle_idle_call+0x6b/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0101dea>] ? cpu_idle+0x8a/0xf0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d2702>] ? start_secondary+0x1e8/0x1ee Cc: David Miller <davem@davemloft.net> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 19:26:08 +00:00
struct mld_msg *mld;
if (!pskb_may_pull(skb2, sizeof(*mld))) {
err = -EINVAL;
goto out;
}
mld = (struct mld_msg *)skb_transport_header(skb2);
br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
}
}
out:
bridge: fix br_multicast_ipv6_rcv for paged skbs use pskb_may_pull to access ipv6 header correctly for paged skbs It was omitted in the bridge code leading to crash in blind __skb_pull since the skb is cloned undonditionally we also simplify the the exit path this fixes bug https://bugzilla.kernel.org/show_bug.cgi?id=25202 Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: authenticated Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 IEEE 802.11: associated (aid 2) Dec 15 14:36:40 User-PC hostapd: wlan0: STA 00:15:00:60:5d:34 RADIUS: starting accounting session 4D0608A3-00000005 Dec 15 14:36:41 User-PC kernel: [175576.120287] ------------[ cut here ]------------ Dec 15 14:36:41 User-PC kernel: [175576.120452] kernel BUG at include/linux/skbuff.h:1178! Dec 15 14:36:41 User-PC kernel: [175576.120609] invalid opcode: 0000 [#1] SMP Dec 15 14:36:41 User-PC kernel: [175576.120749] last sysfs file: /sys/devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda/uevent Dec 15 14:36:41 User-PC kernel: [175576.121035] Modules linked in: approvals binfmt_misc bridge stp llc parport_pc ppdev arc4 iwlagn snd_hda_codec_realtek iwlcore i915 snd_hda_intel mac80211 joydev snd_hda_codec snd_hwdep snd_pcm snd_seq_midi drm_kms_helper snd_rawmidi drm snd_seq_midi_event snd_seq snd_timer snd_seq_device cfg80211 eeepc_wmi usbhid psmouse intel_agp i2c_algo_bit intel_gtt uvcvideo agpgart videodev sparse_keymap snd shpchp v4l1_compat lp hid video serio_raw soundcore output snd_page_alloc ahci libahci atl1c Dec 15 14:36:41 User-PC kernel: [175576.122712] Dec 15 14:36:41 User-PC kernel: [175576.122769] Pid: 0, comm: kworker/0:0 Tainted: G W 2.6.37-rc5-wl+ #3 1015PE/1016P Dec 15 14:36:41 User-PC kernel: [175576.123012] EIP: 0060:[<f83edd65>] EFLAGS: 00010283 CPU: 1 Dec 15 14:36:41 User-PC kernel: [175576.123193] EIP is at br_multicast_rcv+0xc95/0xe1c [bridge] Dec 15 14:36:41 User-PC kernel: [175576.123362] EAX: 0000001c EBX: f5626318 ECX: 00000000 EDX: 00000000 Dec 15 14:36:41 User-PC kernel: [175576.123550] ESI: ec512262 EDI: f5626180 EBP: f60b5ca0 ESP: f60b5bd8 Dec 15 14:36:41 User-PC kernel: [175576.123737] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Dec 15 14:36:41 User-PC kernel: [175576.123902] Process kworker/0:0 (pid: 0, ti=f60b4000 task=f60a8000 task.ti=f60b0000) Dec 15 14:36:41 User-PC kernel: [175576.124137] Stack: Dec 15 14:36:41 User-PC kernel: [175576.124181] ec556500 f6d06800 f60b5be8 c01087d8 ec512262 00000030 00000024 f5626180 Dec 15 14:36:41 User-PC kernel: [175576.124181] f572c200 ef463440 f5626300 3affffff f6d06dd0 e60766a4 000000c4 f6d06860 Dec 15 14:36:41 User-PC kernel: [175576.124181] ffffffff ec55652c 00000001 f6d06844 f60b5c64 c0138264 c016e451 c013e47d Dec 15 14:36:41 User-PC kernel: [175576.124181] Call Trace: Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01087d8>] ? sched_clock+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0138264>] ? enqueue_entity+0x174/0x440 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e451>] ? sched_clock_cpu+0x131/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c013e47d>] ? select_task_rq_fair+0x2ad/0x730 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0524fc1>] ? nf_iterate+0x71/0x90 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4914>] ? br_handle_frame_finish+0x184/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e46e9>] ? br_handle_frame+0x189/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4790>] ? br_handle_frame_finish+0x0/0x220 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f83e4560>] ? br_handle_frame+0x0/0x230 [bridge] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff026>] ? __netif_receive_skb+0x1b6/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04f7a30>] ? skb_copy_bits+0x110/0x210 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0503a7f>] ? netif_receive_skb+0x6f/0x80 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cb74c>] ? ieee80211_deliver_skb+0x8c/0x1a0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cc836>] ? ieee80211_rx_handlers+0xeb6/0x1aa0 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04ff1f0>] ? __netif_receive_skb+0x380/0x5b0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c016e242>] ? sched_clock_local+0xb2/0x190 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c012b688>] ? default_spin_lock_flags+0x8/0x10 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82cd621>] ? ieee80211_prepare_and_rx_handle+0x201/0xa90 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f82ce154>] ? ieee80211_rx+0x2a4/0x830 [mac80211] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f815a8d6>] ? iwl_update_stats+0xa6/0x2a0 [iwlcore] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8499212>] ? iwlagn_rx_reply_rx+0x292/0x3b0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d83df>] ? _raw_spin_lock_irqsave+0x2f/0x50 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8483697>] ? iwl_rx_handle+0xe7/0x350 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<f8486ab7>] ? iwl_irq_tasklet+0xf7/0x5c0 [iwlagn] Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01aece1>] ? __rcu_process_callbacks+0x201/0x2d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150d05>] ? tasklet_action+0xc5/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150a07>] ? __do_softirq+0x97/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d910c>] ? nmi_stack_correct+0x2f/0x34 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0150970>] ? __do_softirq+0x0/0x1d0 Dec 15 14:36:41 User-PC kernel: [175576.124181] <IRQ> Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01508f5>] ? irq_exit+0x65/0x70 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05df062>] ? do_IRQ+0x52/0xc0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c01036b0>] ? common_interrupt+0x30/0x38 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c03a1fc2>] ? intel_idle+0xc2/0x160 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c04daebb>] ? cpuidle_idle_call+0x6b/0x100 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c0101dea>] ? cpu_idle+0x8a/0xf0 Dec 15 14:36:41 User-PC kernel: [175576.124181] [<c05d2702>] ? start_secondary+0x1e8/0x1ee Cc: David Miller <davem@davemloft.net> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 19:26:08 +00:00
kfree_skb(skb2);
return err;
}
#endif
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
struct sk_buff *skb, u16 vid)
{
BR_INPUT_SKB_CB(skb)->igmp = 0;
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
if (br->multicast_disabled)
return 0;
switch (skb->protocol) {
case htons(ETH_P_IP):
return br_multicast_ipv4_rcv(br, port, skb, vid);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return br_multicast_ipv6_rcv(br, port, skb, vid);
#endif
}
return 0;
}
static void br_multicast_query_expired(struct net_bridge *br,
struct bridge_mcast_own_query *query,
struct bridge_mcast_querier *querier)
{
spin_lock(&br->multicast_lock);
if (query->startup_sent < br->multicast_startup_query_count)
query->startup_sent++;
RCU_INIT_POINTER(querier, NULL);
br_multicast_send_query(br, NULL, query);
spin_unlock(&br->multicast_lock);
}
static void br_ip4_multicast_query_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_query_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
}
#endif
void br_multicast_init(struct net_bridge *br)
{
br->hash_elasticity = 4;
br->hash_max = 512;
br->multicast_router = 1;
br->multicast_querier = 0;
br->multicast_query_use_ifaddr = 0;
br->multicast_last_member_count = 2;
br->multicast_startup_query_count = 2;
br->multicast_last_member_interval = HZ;
br->multicast_query_response_interval = 10 * HZ;
br->multicast_startup_query_interval = 125 * HZ / 4;
br->multicast_query_interval = 125 * HZ;
br->multicast_querier_interval = 255 * HZ;
br->multicast_membership_interval = 260 * HZ;
br->ip4_other_query.delay_time = 0;
br->ip4_querier.port = NULL;
#if IS_ENABLED(CONFIG_IPV6)
br->ip6_other_query.delay_time = 0;
br->ip6_querier.port = NULL;
#endif
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
br_multicast_local_router_expired, 0);
setup_timer(&br->ip4_other_query.timer,
br_ip4_multicast_querier_expired, (unsigned long)br);
setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
(unsigned long)br);
#if IS_ENABLED(CONFIG_IPV6)
setup_timer(&br->ip6_other_query.timer,
br_ip6_multicast_querier_expired, (unsigned long)br);
setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
(unsigned long)br);
#endif
}
static void __br_multicast_open(struct net_bridge *br,
struct bridge_mcast_own_query *query)
{
query->startup_sent = 0;
if (br->multicast_disabled)
return;
mod_timer(&query->timer, jiffies);
}
void br_multicast_open(struct net_bridge *br)
{
__br_multicast_open(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open(br, &br->ip6_own_query);
#endif
}
void br_multicast_stop(struct net_bridge *br)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
struct hlist_node *n;
u32 ver;
int i;
del_timer_sync(&br->multicast_router_timer);
del_timer_sync(&br->ip4_other_query.timer);
del_timer_sync(&br->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
if (!mdb)
goto out;
br->mdb = NULL;
ver = mdb->ver;
for (i = 0; i < mdb->max; i++) {
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
hlist[ver]) {
del_timer(&mp->timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group);
}
}
if (mdb->old) {
spin_unlock_bh(&br->multicast_lock);
rcu_barrier_bh();
spin_lock_bh(&br->multicast_lock);
WARN_ON(mdb->old);
}
mdb->old = mdb;
call_rcu_bh(&mdb->rcu, br_mdb_free);
out:
spin_unlock_bh(&br->multicast_lock);
}
int br_multicast_set_router(struct net_bridge *br, unsigned long val)
{
int err = -ENOENT;
spin_lock_bh(&br->multicast_lock);
if (!netif_running(br->dev))
goto unlock;
switch (val) {
case 0:
case 2:
del_timer(&br->multicast_router_timer);
/* fall through */
case 1:
br->multicast_router = val;
err = 0;
break;
default:
err = -EINVAL;
break;
}
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
{
struct net_bridge *br = p->br;
int err = -ENOENT;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
goto unlock;
switch (val) {
case 0:
case 1:
case 2:
p->multicast_router = val;
err = 0;
if (val < 2 && !hlist_unhashed(&p->rlist))
hlist_del_init_rcu(&p->rlist);
if (val == 1)
break;
del_timer(&p->multicast_router_timer);
if (val == 0)
break;
br_multicast_add_router(br, p);
break;
default:
err = -EINVAL;
break;
}
unlock:
spin_unlock(&br->multicast_lock);
return err;
}
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query)
{
struct net_bridge_port *port;
__br_multicast_open(br, query);
list_for_each_entry(port, &br->port_list, list) {
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
continue;
if (query == &br->ip4_own_query)
br_multicast_enable(&port->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
else
br_multicast_enable(&port->ip6_own_query);
#endif
}
}
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
int err = 0;
struct net_bridge_mdb_htable *mdb;
spin_lock_bh(&br->multicast_lock);
if (br->multicast_disabled == !val)
goto unlock;
br->multicast_disabled = !val;
if (br->multicast_disabled)
goto unlock;
if (!netif_running(br->dev))
goto unlock;
mdb = mlock_dereference(br->mdb, br);
if (mdb) {
if (mdb->old) {
err = -EEXIST;
rollback:
br->multicast_disabled = !!val;
goto unlock;
}
err = br_mdb_rehash(&br->mdb, mdb->max,
br->hash_elasticity);
if (err)
goto rollback;
}
br_multicast_start_querier(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_start_querier(br, &br->ip6_own_query);
#endif
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
unsigned long max_delay;
val = !!val;
spin_lock_bh(&br->multicast_lock);
if (br->multicast_querier == val)
goto unlock;
br->multicast_querier = val;
if (!val)
goto unlock;
max_delay = br->multicast_query_response_interval;
if (!timer_pending(&br->ip4_other_query.timer))
br->ip4_other_query.delay_time = jiffies + max_delay;
br_multicast_start_querier(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
if (!timer_pending(&br->ip6_other_query.timer))
br->ip6_other_query.delay_time = jiffies + max_delay;
br_multicast_start_querier(br, &br->ip6_own_query);
#endif
unlock:
spin_unlock_bh(&br->multicast_lock);
return 0;
}
int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
{
int err = -ENOENT;
u32 old;
struct net_bridge_mdb_htable *mdb;
spin_lock_bh(&br->multicast_lock);
if (!netif_running(br->dev))
goto unlock;
err = -EINVAL;
if (!is_power_of_2(val))
goto unlock;
mdb = mlock_dereference(br->mdb, br);
if (mdb && val < mdb->size)
goto unlock;
err = 0;
old = br->hash_max;
br->hash_max = val;
if (mdb) {
if (mdb->old) {
err = -EEXIST;
rollback:
br->hash_max = old;
goto unlock;
}
err = br_mdb_rehash(&br->mdb, br->hash_max,
br->hash_elasticity);
if (err)
goto rollback;
}
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
/**
* br_multicast_list_adjacent - Returns snooped multicast addresses
* @dev: The bridge port adjacent to which to retrieve addresses
* @br_ip_list: The list to store found, snooped multicast IP addresses in
*
* Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
* snooping feature on all bridge ports of dev's bridge device, excluding
* the addresses from dev itself.
*
* Returns the number of items added to br_ip_list.
*
* Notes:
* - br_ip_list needs to be initialized by caller
* - br_ip_list might contain duplicates in the end
* (needs to be taken care of by caller)
* - br_ip_list needs to be freed by caller
*/
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
{
struct net_bridge *br;
struct net_bridge_port *port;
struct net_bridge_port_group *group;
struct br_ip_list *entry;
int count = 0;
rcu_read_lock();
if (!br_ip_list || !br_port_exists(dev))
goto unlock;
port = br_port_get_rcu(dev);
if (!port || !port->br)
goto unlock;
br = port->br;
list_for_each_entry_rcu(port, &br->port_list, list) {
if (!port->dev || port->dev == dev)
continue;
hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
goto unlock;
entry->addr = group->addr;
list_add(&entry->list, br_ip_list);
count++;
}
}
unlock:
rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
/**
* br_multicast_has_querier_anywhere - Checks for a querier on a bridge
* @dev: The bridge port providing the bridge on which to check for a querier
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
*
* Checks whether the given interface has a bridge on top and if so returns
* true if a valid querier exists anywhere on the bridged link layer.
* Otherwise returns false.
*/
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
{
struct net_bridge *br;
struct net_bridge_port *port;
struct ethhdr eth;
bool ret = false;
rcu_read_lock();
if (!br_port_exists(dev))
goto unlock;
port = br_port_get_rcu(dev);
if (!port || !port->br)
goto unlock;
br = port->br;
memset(&eth, 0, sizeof(eth));
eth.h_proto = htons(proto);
ret = br_multicast_querier_exists(br, &eth);
unlock:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
/**
* br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
* @dev: The bridge port adjacent to which to check for a querier
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
*
* Checks whether the given interface has a bridge on top and if so returns
* true if a selected querier is behind one of the other ports of this
* bridge. Otherwise returns false.
*/
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
{
struct net_bridge *br;
struct net_bridge_port *port;
bool ret = false;
rcu_read_lock();
if (!br_port_exists(dev))
goto unlock;
port = br_port_get_rcu(dev);
if (!port || !port->br)
goto unlock;
br = port->br;
switch (proto) {
case ETH_P_IP:
if (!timer_pending(&br->ip4_other_query.timer) ||
rcu_dereference(br->ip4_querier.port) == port)
goto unlock;
break;
#if IS_ENABLED(CONFIG_IPV6)
case ETH_P_IPV6:
if (!timer_pending(&br->ip6_other_query.timer) ||
rcu_dereference(br->ip6_querier.port) == port)
goto unlock;
break;
#endif
default:
goto unlock;
}
ret = true;
unlock:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);