Merge branch 'net-rmnet-fix-several-bugs'

Taehee Yoo says:

====================
net: rmnet: fix several bugs

This patchset is to fix several bugs in RMNET module.

1. The first patch fixes NULL-ptr-deref in rmnet_newlink().
When rmnet interface is being created, it uses IFLA_LINK
without checking NULL.
So, if userspace doesn't set IFLA_LINK, panic will occur.
In this patch, checking NULL pointer code is added.

2. The second patch fixes NULL-ptr-deref in rmnet_changelink().
To get real device in rmnet_changelink(), it uses IFLA_LINK.
But, IFLA_LINK should not be used in rmnet_changelink().

3. The third patch fixes suspicious RCU usage in rmnet_get_port().
rmnet_get_port() uses rcu_dereference_rtnl().
But, rmnet_get_port() is used by datapath.
So, rcu_dereference_bh() should be used instead of rcu_dereference_rtnl().

4. The fourth patch fixes suspicious RCU usage in
rmnet_force_unassociate_device().
RCU critical section should not be scheduled.
But, unregister_netdevice_queue() in the rmnet_force_unassociate_device()
would be scheduled.
So, the RCU warning occurs.
In this patch, the rcu_read_lock() in the rmnet_force_unassociate_device()
is removed because it's unnecessary.

5. The fifth patch fixes duplicate MUX ID case.
RMNET MUX ID is unique.
So, rmnet interface isn't allowed to be created, which have
a duplicate MUX ID.
But, only rmnet_newlink() checks this condition, rmnet_changelink()
doesn't check this.
So, duplicate MUX ID case would happen.

6. The sixth patch fixes upper/lower interface relationship problems.
When IFLA_LINK is used, the upper/lower infrastructure should be used.
Because it checks the maximum depth of upper/lower interfaces and it also
checks circular interface relationship, etc.
In this patch, netdev_upper_dev_link() is used.

7. The seventh patch fixes bridge related problems.
a) ->ndo_del_slave() doesn't work.
b) It couldn't detect circular upper/lower interface relationship.
c) It couldn't prevent stack overflow because of too deep depth
of upper/lower interface
d) It doesn't check the number of lower interfaces.
e) Panics because of several reasons.
These problems are actually the same problem.
So, this patch fixes these problems.

8. The eighth patch fixes packet forwarding issue in bridge mode
Packet forwarding is not working in rmnet bridge mode.
Because when a packet is forwarded, skb_push() for an ethernet header
is needed. But it doesn't call skb_push().
So, the ethernet header will be lost.

Change log:
 - update commit logs.
 - drop two patches in this patchset because of wrong target branch.
   - ("net: rmnet: add missing module alias")
   - ("net: rmnet: print error message when command fails")
 - remove unneessary rcu_read_lock() in the third patch.
 - use rcu_dereference_bh() instead of rcu_dereference in third patch.
 - do not allow to add a bridge device if rmnet interface is already
   bridge mode in the seventh patch.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-02-27 11:45:07 -08:00
commit 795c03a5d0
5 changed files with 98 additions and 107 deletions

View File

@ -13,25 +13,6 @@
#include "rmnet_vnd.h"
#include "rmnet_private.h"
/* Locking scheme -
* The shared resource which needs to be protected is realdev->rx_handler_data.
* For the writer path, this is using rtnl_lock(). The writer paths are
* rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
* paths are already called with rtnl_lock() acquired in. There is also an
* ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
* dereference here, we will need to use rtnl_dereference(). Dev list writing
* needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
* For the reader path, the real_dev->rx_handler_data is called in the TX / RX
* path. We only need rcu_read_lock() for these scenarios. In these cases,
* the rcu_read_lock() is held in __dev_queue_xmit() and
* netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
* to get the relevant information. For dev list reading, we again acquire
* rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
* We also use unregister_netdevice_many() to free all rmnet devices in
* rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
* same context.
*/
/* Local Definitions and Declarations */
static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
@ -51,9 +32,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev)
return rtnl_dereference(real_dev->rx_handler_data);
}
static int rmnet_unregister_real_device(struct net_device *real_dev,
struct rmnet_port *port)
static int rmnet_unregister_real_device(struct net_device *real_dev)
{
struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
if (port->nr_rmnet_devs)
return -EINVAL;
@ -61,9 +43,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
kfree(port);
/* release reference on real_dev */
dev_put(real_dev);
netdev_dbg(real_dev, "Removed from rmnet\n");
return 0;
}
@ -89,9 +68,6 @@ static int rmnet_register_real_device(struct net_device *real_dev)
return -EBUSY;
}
/* hold on to real dev for MAP data */
dev_hold(real_dev);
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
@ -99,28 +75,33 @@ static int rmnet_register_real_device(struct net_device *real_dev)
return 0;
}
static void rmnet_unregister_bridge(struct net_device *dev,
struct rmnet_port *port)
static void rmnet_unregister_bridge(struct rmnet_port *port)
{
struct rmnet_port *bridge_port;
struct net_device *bridge_dev;
struct net_device *bridge_dev, *real_dev, *rmnet_dev;
struct rmnet_port *real_port;
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
return;
/* bridge slave handling */
rmnet_dev = port->rmnet_dev;
if (!port->nr_rmnet_devs) {
bridge_dev = port->bridge_ep;
/* bridge device */
real_dev = port->bridge_ep;
bridge_dev = port->dev;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
bridge_port->bridge_ep = NULL;
bridge_port->rmnet_mode = RMNET_EPMODE_VND;
real_port = rmnet_get_port_rtnl(real_dev);
real_port->bridge_ep = NULL;
real_port->rmnet_mode = RMNET_EPMODE_VND;
} else {
/* real device */
bridge_dev = port->bridge_ep;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
rmnet_unregister_real_device(bridge_dev, bridge_port);
port->bridge_ep = NULL;
port->rmnet_mode = RMNET_EPMODE_VND;
}
netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
rmnet_unregister_real_device(bridge_dev);
}
static int rmnet_newlink(struct net *src_net, struct net_device *dev,
@ -135,6 +116,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
int err = 0;
u16 mux_id;
if (!tb[IFLA_LINK]) {
NL_SET_ERR_MSG_MOD(extack, "link not specified");
return -EINVAL;
}
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev || !dev)
return -ENODEV;
@ -157,7 +143,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto err1;
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
goto err2;
port->rmnet_mode = mode;
port->rmnet_dev = dev;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
@ -173,8 +164,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return 0;
err2:
unregister_netdevice(dev);
rmnet_vnd_dellink(mux_id, port, ep);
err1:
rmnet_unregister_real_device(real_dev, port);
rmnet_unregister_real_device(real_dev);
err0:
kfree(ep);
return err;
@ -183,77 +177,74 @@ err0:
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
struct net_device *real_dev, *bridge_dev;
struct rmnet_port *real_port, *bridge_port;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
u8 mux_id;
u8 mux_id = priv->mux_id;
real_dev = priv->real_dev;
if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
if (!rmnet_is_real_dev_registered(real_dev))
return;
port = rmnet_get_port_rtnl(real_dev);
real_port = rmnet_get_port_rtnl(real_dev);
bridge_dev = real_port->bridge_ep;
if (bridge_dev) {
bridge_port = rmnet_get_port_rtnl(bridge_dev);
rmnet_unregister_bridge(bridge_port);
}
mux_id = rmnet_vnd_get_mux(dev);
ep = rmnet_get_endpoint(port, mux_id);
ep = rmnet_get_endpoint(real_port, mux_id);
if (ep) {
hlist_del_init_rcu(&ep->hlnode);
rmnet_unregister_bridge(dev, port);
rmnet_vnd_dellink(mux_id, port, ep);
rmnet_vnd_dellink(mux_id, real_port, ep);
kfree(ep);
}
rmnet_unregister_real_device(real_dev, port);
netdev_upper_dev_unlink(real_dev, dev);
rmnet_unregister_real_device(real_dev);
unregister_netdevice_queue(dev, head);
}
static void rmnet_force_unassociate_device(struct net_device *dev)
static void rmnet_force_unassociate_device(struct net_device *real_dev)
{
struct net_device *real_dev = dev;
struct hlist_node *tmp_ep;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
unsigned long bkt_ep;
LIST_HEAD(list);
if (!rmnet_is_real_dev_registered(real_dev))
return;
port = rmnet_get_port_rtnl(real_dev);
ASSERT_RTNL();
port = rmnet_get_port_rtnl(dev);
rcu_read_lock();
rmnet_unregister_bridge(dev, port);
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
unregister_netdevice_queue(ep->egress_dev, &list);
rmnet_vnd_dellink(ep->mux_id, port, ep);
hlist_del_init_rcu(&ep->hlnode);
kfree(ep);
if (port->nr_rmnet_devs) {
/* real device */
rmnet_unregister_bridge(port);
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
unregister_netdevice_queue(ep->egress_dev, &list);
netdev_upper_dev_unlink(real_dev, ep->egress_dev);
rmnet_vnd_dellink(ep->mux_id, port, ep);
hlist_del_init_rcu(&ep->hlnode);
kfree(ep);
}
rmnet_unregister_real_device(real_dev);
unregister_netdevice_many(&list);
} else {
rmnet_unregister_bridge(port);
}
rcu_read_unlock();
unregister_netdevice_many(&list);
rmnet_unregister_real_device(real_dev, port);
}
static int rmnet_config_notify_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct net_device *dev = netdev_notifier_info_to_dev(data);
struct net_device *real_dev = netdev_notifier_info_to_dev(data);
if (!dev)
if (!rmnet_is_real_dev_registered(real_dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
netdev_dbg(dev, "Kernel unregister\n");
rmnet_force_unassociate_device(dev);
netdev_dbg(real_dev, "Kernel unregister\n");
rmnet_force_unassociate_device(real_dev);
break;
default:
@ -295,16 +286,18 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (!dev)
return -ENODEV;
real_dev = __dev_get_by_index(dev_net(dev),
nla_get_u32(tb[IFLA_LINK]));
if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
real_dev = priv->real_dev;
if (!rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
port = rmnet_get_port_rtnl(real_dev);
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
if (rmnet_get_endpoint(port, mux_id)) {
NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
return -EINVAL;
}
ep = rmnet_get_endpoint(port, priv->mux_id);
if (!ep)
return -ENODEV;
@ -379,11 +372,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.fill_info = rmnet_fill_info,
};
/* Needs either rcu_read_lock() or rtnl lock */
struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
{
if (rmnet_is_real_dev_registered(real_dev))
return rcu_dereference_rtnl(real_dev->rx_handler_data);
return rcu_dereference_bh(real_dev->rx_handler_data);
else
return NULL;
}
@ -409,7 +401,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
struct rmnet_port *port, *slave_port;
int err;
port = rmnet_get_port(real_dev);
port = rmnet_get_port_rtnl(real_dev);
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
@ -417,6 +409,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (port->nr_rmnet_devs > 1)
return -EINVAL;
if (port->rmnet_mode != RMNET_EPMODE_VND)
return -EINVAL;
if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
@ -424,9 +419,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (err)
return -EBUSY;
slave_port = rmnet_get_port(slave_dev);
err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
extack);
if (err) {
rmnet_unregister_real_device(slave_dev);
return err;
}
slave_port = rmnet_get_port_rtnl(slave_dev);
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
slave_port->bridge_ep = real_dev;
slave_port->rmnet_dev = rmnet_dev;
port->rmnet_mode = RMNET_EPMODE_BRIDGE;
port->bridge_ep = slave_dev;
@ -438,16 +441,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
struct net_device *real_dev = priv->real_dev;
struct rmnet_port *port, *slave_port;
struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
port = rmnet_get_port(real_dev);
port->rmnet_mode = RMNET_EPMODE_VND;
port->bridge_ep = NULL;
slave_port = rmnet_get_port(slave_dev);
rmnet_unregister_real_device(slave_dev, slave_port);
rmnet_unregister_bridge(port);
netdev_dbg(slave_dev, "removed from rmnet as slave\n");
return 0;
@ -473,8 +469,8 @@ static int __init rmnet_init(void)
static void __exit rmnet_exit(void)
{
unregister_netdevice_notifier(&rmnet_dev_notifier);
rtnl_link_unregister(&rmnet_link_ops);
unregister_netdevice_notifier(&rmnet_dev_notifier);
}
module_init(rmnet_init)

View File

@ -28,6 +28,7 @@ struct rmnet_port {
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
struct net_device *rmnet_dev;
};
extern struct rtnl_link_ops rmnet_link_ops;
@ -65,7 +66,7 @@ struct rmnet_priv {
struct rmnet_priv_stats stats;
};
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev);
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
int rmnet_add_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev,

View File

@ -159,6 +159,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
if (skb_mac_header_was_set(skb))
skb_push(skb, skb->mac_len);
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
@ -184,7 +187,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
return RX_HANDLER_PASS;
dev = skb->dev;
port = rmnet_get_port(dev);
port = rmnet_get_port_rcu(dev);
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
@ -217,7 +220,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
port = rmnet_get_port_rcu(skb->dev);
if (!port)
goto drop;

View File

@ -266,14 +266,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
return 0;
}
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
{
struct rmnet_priv *priv;
priv = netdev_priv(rmnet_dev);
return priv->mux_id;
}
int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);

View File

@ -16,6 +16,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
void rmnet_vnd_setup(struct net_device *dev);
#endif /* _RMNET_VND_H_ */