mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 05:41:55 +00:00
34666d467c
Jesper reported that br_netfilter always registers the hooks since this is part of the bridge core. This harms performance for people that don't need this. This patch modularizes br_netfilter so it can be rmmod'ed, thus, the hooks can be unregistered. I think the bridge netfilter should have been a separated module since the beginning, Patrick agreed on that. Note that this is breaking compatibility for users that expect that bridge netfilter is going to be available after explicitly 'modprobe bridge' or via automatic load through brctl. However, the damage can be easily undone by modprobing br_netfilter. The bridge core also spots a message to provide a clue to people that didn't notice that this has been deprecated. On top of that, the plan is that nftables will not rely on this software layer, but integrate the connection tracking into the bridge layer to enable stateful filtering and NAT, which is was bridge netfilter users seem to require. This patch still keeps the fake_dst_ops in the bridge core, since this is required by when the bridge port is initialized. So we can safely modprobe/rmmod br_netfilter anytime. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Acked-by: Florian Westphal <fw@strlen.de>
283 lines
6.4 KiB
C
283 lines
6.4 KiB
C
/*
|
|
* Forwarding decision
|
|
* Linux ethernet bridge
|
|
*
|
|
* Authors:
|
|
* Lennert Buytenhek <buytenh@gnu.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/err.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/netpoll.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/netfilter_bridge.h>
|
|
#include "br_private.h"
|
|
|
|
static int deliver_clone(const struct net_bridge_port *prev,
|
|
struct sk_buff *skb,
|
|
void (*__packet_hook)(const struct net_bridge_port *p,
|
|
struct sk_buff *skb));
|
|
|
|
/* Don't forward packets to originating port or forwarding disabled */
|
|
static inline int should_deliver(const struct net_bridge_port *p,
|
|
const struct sk_buff *skb)
|
|
{
|
|
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
|
|
br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
|
|
p->state == BR_STATE_FORWARDING;
|
|
}
|
|
|
|
int br_dev_queue_push_xmit(struct sk_buff *skb)
|
|
{
|
|
/* ip_fragment doesn't copy the MAC header */
|
|
if (nf_bridge_maybe_copy_header(skb) ||
|
|
!is_skb_forwardable(skb->dev, skb)) {
|
|
kfree_skb(skb);
|
|
} else {
|
|
skb_push(skb, ETH_HLEN);
|
|
br_drop_fake_rtable(skb);
|
|
dev_queue_xmit(skb);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
|
|
|
|
int br_forward_finish(struct sk_buff *skb)
|
|
{
|
|
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
|
|
br_dev_queue_push_xmit);
|
|
|
|
}
|
|
EXPORT_SYMBOL_GPL(br_forward_finish);
|
|
|
|
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
|
|
{
|
|
skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
|
|
if (!skb)
|
|
return;
|
|
|
|
skb->dev = to->dev;
|
|
|
|
if (unlikely(netpoll_tx_running(to->br->dev))) {
|
|
if (!is_skb_forwardable(skb->dev, skb))
|
|
kfree_skb(skb);
|
|
else {
|
|
skb_push(skb, ETH_HLEN);
|
|
br_netpoll_send_skb(to, skb);
|
|
}
|
|
return;
|
|
}
|
|
|
|
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
|
|
br_forward_finish);
|
|
}
|
|
|
|
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
|
{
|
|
struct net_device *indev;
|
|
|
|
if (skb_warn_if_lro(skb)) {
|
|
kfree_skb(skb);
|
|
return;
|
|
}
|
|
|
|
skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
|
|
if (!skb)
|
|
return;
|
|
|
|
indev = skb->dev;
|
|
skb->dev = to->dev;
|
|
skb_forward_csum(skb);
|
|
|
|
NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
|
|
br_forward_finish);
|
|
}
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
|
|
{
|
|
if (to && should_deliver(to, skb)) {
|
|
__br_deliver(to, skb);
|
|
return;
|
|
}
|
|
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
|
|
{
|
|
if (should_deliver(to, skb)) {
|
|
if (skb0)
|
|
deliver_clone(to, skb, __br_forward);
|
|
else
|
|
__br_forward(to, skb);
|
|
return;
|
|
}
|
|
|
|
if (!skb0)
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
static int deliver_clone(const struct net_bridge_port *prev,
|
|
struct sk_buff *skb,
|
|
void (*__packet_hook)(const struct net_bridge_port *p,
|
|
struct sk_buff *skb))
|
|
{
|
|
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
|
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
|
if (!skb) {
|
|
dev->stats.tx_dropped++;
|
|
return -ENOMEM;
|
|
}
|
|
|
|
__packet_hook(prev, skb);
|
|
return 0;
|
|
}
|
|
|
|
static struct net_bridge_port *maybe_deliver(
|
|
struct net_bridge_port *prev, struct net_bridge_port *p,
|
|
struct sk_buff *skb,
|
|
void (*__packet_hook)(const struct net_bridge_port *p,
|
|
struct sk_buff *skb))
|
|
{
|
|
int err;
|
|
|
|
if (!should_deliver(p, skb))
|
|
return prev;
|
|
|
|
if (!prev)
|
|
goto out;
|
|
|
|
err = deliver_clone(prev, skb, __packet_hook);
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
|
|
out:
|
|
return p;
|
|
}
|
|
|
|
/* called under bridge lock */
|
|
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
|
|
struct sk_buff *skb0,
|
|
void (*__packet_hook)(const struct net_bridge_port *p,
|
|
struct sk_buff *skb),
|
|
bool unicast)
|
|
{
|
|
struct net_bridge_port *p;
|
|
struct net_bridge_port *prev;
|
|
|
|
prev = NULL;
|
|
|
|
list_for_each_entry_rcu(p, &br->port_list, list) {
|
|
/* Do not flood unicast traffic to ports that turn it off */
|
|
if (unicast && !(p->flags & BR_FLOOD))
|
|
continue;
|
|
prev = maybe_deliver(prev, p, skb, __packet_hook);
|
|
if (IS_ERR(prev))
|
|
goto out;
|
|
}
|
|
|
|
if (!prev)
|
|
goto out;
|
|
|
|
if (skb0)
|
|
deliver_clone(prev, skb, __packet_hook);
|
|
else
|
|
__packet_hook(prev, skb);
|
|
return;
|
|
|
|
out:
|
|
if (!skb0)
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
|
|
{
|
|
br_flood(br, skb, NULL, __br_deliver, unicast);
|
|
}
|
|
|
|
/* called under bridge lock */
|
|
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
|
|
struct sk_buff *skb2, bool unicast)
|
|
{
|
|
br_flood(br, skb, skb2, __br_forward, unicast);
|
|
}
|
|
|
|
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
|
|
/* called with rcu_read_lock */
|
|
static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
|
|
struct sk_buff *skb, struct sk_buff *skb0,
|
|
void (*__packet_hook)(
|
|
const struct net_bridge_port *p,
|
|
struct sk_buff *skb))
|
|
{
|
|
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
|
|
struct net_bridge *br = netdev_priv(dev);
|
|
struct net_bridge_port *prev = NULL;
|
|
struct net_bridge_port_group *p;
|
|
struct hlist_node *rp;
|
|
|
|
rp = rcu_dereference(hlist_first_rcu(&br->router_list));
|
|
p = mdst ? rcu_dereference(mdst->ports) : NULL;
|
|
while (p || rp) {
|
|
struct net_bridge_port *port, *lport, *rport;
|
|
|
|
lport = p ? p->port : NULL;
|
|
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
|
|
NULL;
|
|
|
|
port = (unsigned long)lport > (unsigned long)rport ?
|
|
lport : rport;
|
|
|
|
prev = maybe_deliver(prev, port, skb, __packet_hook);
|
|
if (IS_ERR(prev))
|
|
goto out;
|
|
|
|
if ((unsigned long)lport >= (unsigned long)port)
|
|
p = rcu_dereference(p->next);
|
|
if ((unsigned long)rport >= (unsigned long)port)
|
|
rp = rcu_dereference(hlist_next_rcu(rp));
|
|
}
|
|
|
|
if (!prev)
|
|
goto out;
|
|
|
|
if (skb0)
|
|
deliver_clone(prev, skb, __packet_hook);
|
|
else
|
|
__packet_hook(prev, skb);
|
|
return;
|
|
|
|
out:
|
|
if (!skb0)
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
|
|
struct sk_buff *skb)
|
|
{
|
|
br_multicast_flood(mdst, skb, NULL, __br_deliver);
|
|
}
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
|
|
struct sk_buff *skb, struct sk_buff *skb2)
|
|
{
|
|
br_multicast_flood(mdst, skb, skb2, __br_forward);
|
|
}
|
|
#endif
|