mirror of
https://github.com/torvalds/linux.git
synced 2024-11-02 10:11:36 +00:00
3982d3d28b
This patch adds a 'hairpin' (also called 'reflective relay') mode port configuration to the Linux Ethernet bridge kernel module. A bridge supporting hairpin forwarding mode can send frames back out through the port the frame was received on. Hairpin mode is required to support basic VEPA (Virtual Ethernet Port Aggregator) capabilities. You can find additional information on VEPA here: http://tech.groups.yahoo.com/group/evb/ http://www.ieee802.org/1/files/public/docs2009/new-hudson-vepa_seminar-20090514d.pdf http://www.internet2.edu/presentations/jt2009jul/20090719-congdon.pdf An additional patch 'bridge-utils: Add 'hairpin' port forwarding mode' is provided to allow configuring hairpin mode from userspace tools. Signed-off-by: Paul Congdon <paul.congdon@hp.com> Signed-off-by: Anna Fischer <anna.fischer@hp.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: David S. Miller <davem@davemloft.net>
154 lines
3.3 KiB
C
154 lines
3.3 KiB
C
/*
|
|
* Forwarding decision
|
|
* Linux ethernet bridge
|
|
*
|
|
* Authors:
|
|
* Lennert Buytenhek <buytenh@gnu.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/netfilter_bridge.h>
|
|
#include "br_private.h"
|
|
|
|
/* Don't forward packets to originating port or forwarding diasabled */
|
|
static inline int should_deliver(const struct net_bridge_port *p,
|
|
const struct sk_buff *skb)
|
|
{
|
|
return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
|
|
p->state == BR_STATE_FORWARDING);
|
|
}
|
|
|
|
static inline unsigned packet_length(const struct sk_buff *skb)
|
|
{
|
|
return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
|
|
}
|
|
|
|
int br_dev_queue_push_xmit(struct sk_buff *skb)
|
|
{
|
|
/* drop mtu oversized packets except gso */
|
|
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
|
|
kfree_skb(skb);
|
|
else {
|
|
/* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
|
|
if (nf_bridge_maybe_copy_header(skb))
|
|
kfree_skb(skb);
|
|
else {
|
|
skb_push(skb, ETH_HLEN);
|
|
|
|
dev_queue_xmit(skb);
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int br_forward_finish(struct sk_buff *skb)
|
|
{
|
|
return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
|
|
br_dev_queue_push_xmit);
|
|
|
|
}
|
|
|
|
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
|
|
{
|
|
skb->dev = to->dev;
|
|
NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
|
|
br_forward_finish);
|
|
}
|
|
|
|
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
|
{
|
|
struct net_device *indev;
|
|
|
|
if (skb_warn_if_lro(skb)) {
|
|
kfree_skb(skb);
|
|
return;
|
|
}
|
|
|
|
indev = skb->dev;
|
|
skb->dev = to->dev;
|
|
skb_forward_csum(skb);
|
|
|
|
NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
|
|
br_forward_finish);
|
|
}
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
|
|
{
|
|
if (should_deliver(to, skb)) {
|
|
__br_deliver(to, skb);
|
|
return;
|
|
}
|
|
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
|
{
|
|
if (should_deliver(to, skb)) {
|
|
__br_forward(to, skb);
|
|
return;
|
|
}
|
|
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
/* called under bridge lock */
|
|
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
|
|
void (*__packet_hook)(const struct net_bridge_port *p,
|
|
struct sk_buff *skb))
|
|
{
|
|
struct net_bridge_port *p;
|
|
struct net_bridge_port *prev;
|
|
|
|
prev = NULL;
|
|
|
|
list_for_each_entry_rcu(p, &br->port_list, list) {
|
|
if (should_deliver(p, skb)) {
|
|
if (prev != NULL) {
|
|
struct sk_buff *skb2;
|
|
|
|
if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
|
|
br->dev->stats.tx_dropped++;
|
|
kfree_skb(skb);
|
|
return;
|
|
}
|
|
|
|
__packet_hook(prev, skb2);
|
|
}
|
|
|
|
prev = p;
|
|
}
|
|
}
|
|
|
|
if (prev != NULL) {
|
|
__packet_hook(prev, skb);
|
|
return;
|
|
}
|
|
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
|
|
/* called with rcu_read_lock */
|
|
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
|
|
{
|
|
br_flood(br, skb, __br_deliver);
|
|
}
|
|
|
|
/* called under bridge lock */
|
|
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb)
|
|
{
|
|
br_flood(br, skb, __br_forward);
|
|
}
|