2019-04-05 17:31:34 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2014-07-04 21:35:24 +00:00
|
|
|
/* Copyright 2011-2014 Autronica Fire and Security AS
|
|
|
|
*
|
|
|
|
* Author(s):
|
|
|
|
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
|
2020-07-22 14:40:16 +00:00
|
|
|
*
|
|
|
|
* Frame handler other utility functions for HSR and PRP.
|
2014-07-04 21:35:24 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "hsr_slave.h"
|
|
|
|
#include <linux/etherdevice.h>
|
2014-07-04 21:37:27 +00:00
|
|
|
#include <linux/if_arp.h>
|
2017-02-04 17:00:49 +00:00
|
|
|
#include <linux/if_vlan.h>
|
2014-07-04 21:35:24 +00:00
|
|
|
#include "hsr_main.h"
|
2014-07-04 21:37:27 +00:00
|
|
|
#include "hsr_device.h"
|
2014-07-04 21:41:03 +00:00
|
|
|
#include "hsr_forward.h"
|
2014-07-04 21:35:24 +00:00
|
|
|
#include "hsr_framereg.h"
|
|
|
|
|
2020-07-22 14:40:21 +00:00
|
|
|
bool hsr_invalid_dan_ingress_frame(__be16 protocol)
|
|
|
|
{
|
|
|
|
return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
|
|
|
|
}
|
|
|
|
|
2014-07-04 21:41:03 +00:00
|
|
|
static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = *pskb;
|
|
|
|
struct hsr_port *port;
|
2020-07-22 14:40:21 +00:00
|
|
|
struct hsr_priv *hsr;
|
2020-05-06 15:41:07 +00:00
|
|
|
__be16 protocol;
|
2014-07-04 21:41:03 +00:00
|
|
|
|
2020-07-22 14:40:21 +00:00
|
|
|
/* Packets from dev_loopback_xmit() do not have L2 header, bail out */
|
|
|
|
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
|
|
|
|
return RX_HANDLER_PASS;
|
|
|
|
|
2014-07-04 21:41:03 +00:00
|
|
|
if (!skb_mac_header_was_set(skb)) {
|
|
|
|
WARN_ONCE(1, "%s: skb invalid", __func__);
|
|
|
|
return RX_HANDLER_PASS;
|
|
|
|
}
|
|
|
|
|
|
|
|
port = hsr_port_get_rcu(skb->dev);
|
2020-02-03 18:15:07 +00:00
|
|
|
if (!port)
|
|
|
|
goto finish_pass;
|
2020-07-22 14:40:21 +00:00
|
|
|
hsr = port->hsr;
|
2014-07-04 21:41:03 +00:00
|
|
|
|
|
|
|
if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
|
|
|
|
/* Directly kill frames sent by ourselves */
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto finish_consume;
|
|
|
|
}
|
|
|
|
|
2021-02-10 01:02:11 +00:00
|
|
|
/* For HSR, only tagged frames are expected (unless the device offloads
|
|
|
|
* HSR tag removal), but for PRP there could be non tagged frames as
|
|
|
|
* well from Single attached nodes (SANs).
|
2020-07-22 14:40:21 +00:00
|
|
|
*/
|
2016-04-13 11:52:22 +00:00
|
|
|
protocol = eth_hdr(skb)->h_proto;
|
2021-02-10 01:02:11 +00:00
|
|
|
|
|
|
|
if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
|
2024-04-23 12:49:04 +00:00
|
|
|
port->type != HSR_PT_INTERLINK &&
|
2021-02-10 01:02:11 +00:00
|
|
|
hsr->proto_ops->invalid_dan_ingress_frame &&
|
2020-07-22 14:40:21 +00:00
|
|
|
hsr->proto_ops->invalid_dan_ingress_frame(protocol))
|
2014-07-04 21:41:03 +00:00
|
|
|
goto finish_pass;
|
|
|
|
|
|
|
|
skb_push(skb, ETH_HLEN);
|
2021-05-24 18:50:54 +00:00
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
|
|
|
|
protocol == htons(ETH_P_HSR))
|
|
|
|
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
|
|
|
|
skb_reset_mac_len(skb);
|
2020-07-22 14:40:21 +00:00
|
|
|
|
2014-07-04 21:41:03 +00:00
|
|
|
hsr_forward_skb(skb, port);
|
|
|
|
|
|
|
|
finish_consume:
|
|
|
|
return RX_HANDLER_CONSUMED;
|
|
|
|
|
|
|
|
finish_pass:
|
|
|
|
return RX_HANDLER_PASS;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool hsr_port_exists(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
|
|
|
|
}
|
|
|
|
|
2020-02-28 18:01:35 +00:00
|
|
|
static int hsr_check_dev_ok(struct net_device *dev,
|
|
|
|
struct netlink_ext_ack *extack)
|
2014-07-04 21:37:27 +00:00
|
|
|
{
|
|
|
|
/* Don't allow HSR on non-ethernet like devices */
|
2019-04-05 17:31:25 +00:00
|
|
|
if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
|
|
|
|
dev->addr_len != ETH_ALEN) {
|
2020-02-28 18:01:35 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
|
2014-07-04 21:37:27 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Don't allow enslaving hsr devices */
|
|
|
|
if (is_hsr_master(dev)) {
|
2020-02-28 18:01:35 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Cannot create trees of HSR devices.");
|
2014-07-04 21:37:27 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-07-04 21:38:05 +00:00
|
|
|
if (hsr_port_exists(dev)) {
|
2020-02-28 18:01:35 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"This device is already a HSR slave.");
|
2014-07-04 21:37:27 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-02-04 17:00:49 +00:00
|
|
|
if (is_vlan_dev(dev)) {
|
2020-02-28 18:01:35 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
|
2014-07-04 21:37:27 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-07-04 21:41:03 +00:00
|
|
|
if (dev->priv_flags & IFF_DONT_BRIDGE) {
|
2020-02-28 18:01:35 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"This device does not support bridging.");
|
2014-07-04 21:41:03 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2014-07-04 21:37:27 +00:00
|
|
|
/* HSR over bonded devices has not been tested, but I'm not sure it
|
|
|
|
* won't work...
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-04 21:38:05 +00:00
|
|
|
/* Setup device to be added to the HSR bridge. */
|
2020-02-28 18:02:10 +00:00
|
|
|
static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
|
|
|
|
struct hsr_port *port,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
2014-07-04 21:37:27 +00:00
|
|
|
{
|
2020-02-28 18:02:10 +00:00
|
|
|
struct net_device *hsr_dev;
|
|
|
|
struct hsr_port *master;
|
2014-07-04 21:37:27 +00:00
|
|
|
int res;
|
|
|
|
|
2023-06-14 11:47:10 +00:00
|
|
|
/* Don't use promiscuous mode for offload since L2 frame forward
|
|
|
|
* happens at the offloaded hardware.
|
|
|
|
*/
|
|
|
|
if (!port->hsr->fwd_offloaded) {
|
|
|
|
res = dev_set_promiscuity(dev, 1);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
}
|
2014-07-04 21:37:27 +00:00
|
|
|
|
2020-02-28 18:02:10 +00:00
|
|
|
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
|
|
|
hsr_dev = master->dev;
|
|
|
|
|
|
|
|
res = netdev_upper_dev_link(dev, hsr_dev, extack);
|
|
|
|
if (res)
|
|
|
|
goto fail_upper_dev_link;
|
2014-07-04 21:37:27 +00:00
|
|
|
|
2014-07-04 21:41:03 +00:00
|
|
|
res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
|
|
|
|
if (res)
|
|
|
|
goto fail_rx_handler;
|
|
|
|
dev_disable_lro(dev);
|
|
|
|
|
2014-07-04 21:37:27 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_rx_handler:
|
2020-02-28 18:02:10 +00:00
|
|
|
netdev_upper_dev_unlink(dev, hsr_dev);
|
|
|
|
fail_upper_dev_link:
|
2023-06-14 11:47:10 +00:00
|
|
|
if (!port->hsr->fwd_offloaded)
|
|
|
|
dev_set_promiscuity(dev, -1);
|
|
|
|
|
2014-07-04 21:37:27 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2014-07-04 21:38:05 +00:00
|
|
|
int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
|
2020-02-28 18:01:35 +00:00
|
|
|
enum hsr_port_type type, struct netlink_ext_ack *extack)
|
2014-07-04 21:37:27 +00:00
|
|
|
{
|
2014-07-04 21:38:05 +00:00
|
|
|
struct hsr_port *port, *master;
|
|
|
|
int res;
|
2014-07-04 21:37:27 +00:00
|
|
|
|
2014-07-04 21:38:05 +00:00
|
|
|
if (type != HSR_PT_MASTER) {
|
2020-02-28 18:01:35 +00:00
|
|
|
res = hsr_check_dev_ok(dev, extack);
|
2014-07-04 21:38:05 +00:00
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
port = hsr_port_get_hsr(hsr, type);
|
2019-04-05 17:31:28 +00:00
|
|
|
if (port)
|
2014-07-04 21:38:05 +00:00
|
|
|
return -EBUSY; /* This port already exists */
|
|
|
|
|
|
|
|
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
2019-04-05 17:31:28 +00:00
|
|
|
if (!port)
|
2014-07-04 21:38:05 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-03-21 06:46:50 +00:00
|
|
|
port->hsr = hsr;
|
|
|
|
port->dev = dev;
|
|
|
|
port->type = type;
|
|
|
|
|
2014-07-04 21:38:05 +00:00
|
|
|
if (type != HSR_PT_MASTER) {
|
2020-02-28 18:02:10 +00:00
|
|
|
res = hsr_portdev_setup(hsr, dev, port, extack);
|
2014-07-04 21:38:05 +00:00
|
|
|
if (res)
|
|
|
|
goto fail_dev_setup;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail_rcu(&port->port_list, &hsr->ports);
|
|
|
|
synchronize_rcu();
|
|
|
|
|
|
|
|
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
2014-07-04 21:38:57 +00:00
|
|
|
netdev_update_features(master->dev);
|
2014-07-04 21:38:05 +00:00
|
|
|
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
|
2014-07-04 21:37:27 +00:00
|
|
|
|
2014-07-04 21:38:05 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_dev_setup:
|
|
|
|
kfree(port);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hsr_del_port(struct hsr_port *port)
|
|
|
|
{
|
|
|
|
struct hsr_priv *hsr;
|
|
|
|
struct hsr_port *master;
|
|
|
|
|
|
|
|
hsr = port->hsr;
|
|
|
|
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
|
|
|
list_del_rcu(&port->port_list);
|
|
|
|
|
|
|
|
if (port != master) {
|
2020-02-28 18:02:10 +00:00
|
|
|
netdev_update_features(master->dev);
|
|
|
|
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
|
2014-07-04 21:38:05 +00:00
|
|
|
netdev_rx_handler_unregister(port->dev);
|
2024-03-22 10:04:47 +00:00
|
|
|
if (!port->hsr->fwd_offloaded)
|
|
|
|
dev_set_promiscuity(port->dev, -1);
|
2020-02-28 18:02:10 +00:00
|
|
|
netdev_upper_dev_unlink(port->dev, master->dev);
|
2014-07-04 21:37:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
synchronize_rcu();
|
2015-02-27 20:26:03 +00:00
|
|
|
|
2019-07-04 00:21:12 +00:00
|
|
|
kfree(port);
|
2014-07-04 21:37:27 +00:00
|
|
|
}
|