Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f95
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct in6_addr *daddr;
|
||||
struct in6_addr *daddr, old_daddr;
|
||||
__be32 fl6_flowlabel = 0;
|
||||
__be32 old_fl6_flowlabel;
|
||||
__be16 old_dport;
|
||||
int addr_type;
|
||||
int err;
|
||||
__be32 fl6_flowlabel = 0;
|
||||
|
||||
if (usin->sin6_family == AF_INET) {
|
||||
if (__ipv6_only_sock(sk))
|
||||
@@ -238,9 +240,13 @@ ipv4_connected:
|
||||
}
|
||||
}
|
||||
|
||||
/* save the current peer information before updating it */
|
||||
old_daddr = sk->sk_v6_daddr;
|
||||
old_fl6_flowlabel = np->flow_label;
|
||||
old_dport = inet->inet_dport;
|
||||
|
||||
sk->sk_v6_daddr = *daddr;
|
||||
np->flow_label = fl6_flowlabel;
|
||||
|
||||
inet->inet_dport = usin->sin6_port;
|
||||
|
||||
/*
|
||||
@@ -250,11 +256,12 @@ ipv4_connected:
|
||||
|
||||
err = ip6_datagram_dst_update(sk, true);
|
||||
if (err) {
|
||||
/* Reset daddr and dport so that udp_v6_early_demux()
|
||||
* fails to find this socket
|
||||
/* Restore the socket peer info, to keep it consistent with
|
||||
* the old socket state
|
||||
*/
|
||||
memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
|
||||
inet->inet_dport = 0;
|
||||
sk->sk_v6_daddr = old_daddr;
|
||||
np->flow_label = old_fl6_flowlabel;
|
||||
inet->inet_dport = old_dport;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
@@ -126,7 +126,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
|
||||
struct ip6_tnl *t, *cand = NULL;
|
||||
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
|
||||
int dev_type = (gre_proto == htons(ETH_P_TEB) ||
|
||||
gre_proto == htons(ETH_P_ERSPAN)) ?
|
||||
gre_proto == htons(ETH_P_ERSPAN) ||
|
||||
gre_proto == htons(ETH_P_ERSPAN2)) ?
|
||||
ARPHRD_ETHER : ARPHRD_IP6GRE;
|
||||
int score, cand_score = 4;
|
||||
|
||||
@@ -905,6 +906,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
||||
truncate = true;
|
||||
}
|
||||
|
||||
if (skb_cow_head(skb, dev->needed_headroom))
|
||||
goto tx_err;
|
||||
|
||||
t->parms.o_flags &= ~TUNNEL_KEY;
|
||||
IPCB(skb)->flags = 0;
|
||||
|
||||
@@ -947,6 +951,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
||||
md->u.md2.dir,
|
||||
get_hwid(&md->u.md2),
|
||||
truncate, false);
|
||||
} else {
|
||||
goto tx_err;
|
||||
}
|
||||
} else {
|
||||
switch (skb->protocol) {
|
||||
|
||||
@@ -1554,7 +1554,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
|
||||
*(opt++) = (rd_len >> 3);
|
||||
opt += 6;
|
||||
|
||||
memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
|
||||
skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
|
||||
rd_len - 8);
|
||||
}
|
||||
|
||||
void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
|
||||
|
||||
@@ -128,7 +128,7 @@ struct uncached_list {
|
||||
|
||||
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
|
||||
|
||||
static void rt6_uncached_list_add(struct rt6_info *rt)
|
||||
void rt6_uncached_list_add(struct rt6_info *rt)
|
||||
{
|
||||
struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
|
||||
|
||||
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
|
||||
spin_unlock_bh(&ul->lock);
|
||||
}
|
||||
|
||||
static void rt6_uncached_list_del(struct rt6_info *rt)
|
||||
void rt6_uncached_list_del(struct rt6_info *rt)
|
||||
{
|
||||
if (!list_empty(&rt->rt6i_uncached)) {
|
||||
struct uncached_list *ul = rt->rt6i_uncached_list;
|
||||
@@ -1514,7 +1514,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
|
||||
}
|
||||
}
|
||||
|
||||
static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
|
||||
static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
|
||||
struct rt6_info *rt, int mtu)
|
||||
{
|
||||
/* If the new MTU is lower than the route PMTU, this new MTU will be the
|
||||
* lowest MTU in the path: always allow updating the route PMTU to
|
||||
* reflect PMTU decreases.
|
||||
*
|
||||
* If the new MTU is higher, and the route PMTU is equal to the local
|
||||
* MTU, this means the old MTU is the lowest in the path, so allow
|
||||
* updating it: if other nodes now have lower MTUs, PMTU discovery will
|
||||
* handle this.
|
||||
*/
|
||||
|
||||
if (dst_mtu(&rt->dst) >= mtu)
|
||||
return true;
|
||||
|
||||
if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
|
||||
struct rt6_info *rt, int mtu)
|
||||
{
|
||||
struct rt6_exception_bucket *bucket;
|
||||
struct rt6_exception *rt6_ex;
|
||||
@@ -1523,20 +1546,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
|
||||
bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
|
||||
lockdep_is_held(&rt6_exception_lock));
|
||||
|
||||
if (bucket) {
|
||||
for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
|
||||
hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
|
||||
struct rt6_info *entry = rt6_ex->rt6i;
|
||||
/* For RTF_CACHE with rt6i_pmtu == 0
|
||||
* (i.e. a redirected route),
|
||||
* the metrics of its rt->dst.from has already
|
||||
* been updated.
|
||||
*/
|
||||
if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu)
|
||||
entry->rt6i_pmtu = mtu;
|
||||
}
|
||||
bucket++;
|
||||
if (!bucket)
|
||||
return;
|
||||
|
||||
for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
|
||||
hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
|
||||
struct rt6_info *entry = rt6_ex->rt6i;
|
||||
|
||||
/* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
|
||||
* route), the metrics of its rt->dst.from have already
|
||||
* been updated.
|
||||
*/
|
||||
if (entry->rt6i_pmtu &&
|
||||
rt6_mtu_change_route_allowed(idev, entry, mtu))
|
||||
entry->rt6i_pmtu = mtu;
|
||||
}
|
||||
bucket++;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3899,25 +3924,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
|
||||
Since RFC 1981 doesn't include administrative MTU increase
|
||||
update PMTU increase is a MUST. (i.e. jumbo frame)
|
||||
*/
|
||||
/*
|
||||
If new MTU is less than route PMTU, this new MTU will be the
|
||||
lowest MTU in the path, update the route PMTU to reflect PMTU
|
||||
decreases; if new MTU is greater than route PMTU, and the
|
||||
old MTU is the lowest MTU in the path, update the route PMTU
|
||||
to reflect the increase. In this case if the other nodes' MTU
|
||||
also have the lowest MTU, TOO BIG MESSAGE will be lead to
|
||||
PMTU discovery.
|
||||
*/
|
||||
if (rt->dst.dev == arg->dev &&
|
||||
dst_metric_raw(&rt->dst, RTAX_MTU) &&
|
||||
!dst_metric_locked(&rt->dst, RTAX_MTU)) {
|
||||
spin_lock_bh(&rt6_exception_lock);
|
||||
if (dst_mtu(&rt->dst) >= arg->mtu ||
|
||||
(dst_mtu(&rt->dst) < arg->mtu &&
|
||||
dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
|
||||
if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
|
||||
rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
|
||||
dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
|
||||
}
|
||||
rt6_exceptions_update_pmtu(rt, arg->mtu);
|
||||
rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
|
||||
spin_unlock_bh(&rt6_exception_lock);
|
||||
}
|
||||
return 0;
|
||||
@@ -4189,6 +4202,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
|
||||
r_cfg.fc_encap_type = nla_get_u16(nla);
|
||||
}
|
||||
|
||||
r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
|
||||
rt = ip6_route_info_create(&r_cfg, extack);
|
||||
if (IS_ERR(rt)) {
|
||||
err = PTR_ERR(rt);
|
||||
|
||||
@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev,
|
||||
/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
|
||||
int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
||||
{
|
||||
struct net *net = dev_net(skb_dst(skb)->dev);
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
struct ipv6hdr *hdr, *inner_hdr;
|
||||
struct ipv6_sr_hdr *isrh;
|
||||
int hdrlen, tot_len, err;
|
||||
@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
||||
isrh->nexthdr = proto;
|
||||
|
||||
hdr->daddr = isrh->segments[isrh->first_segment];
|
||||
set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr);
|
||||
set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
|
||||
|
||||
#ifdef CONFIG_IPV6_SEG6_HMAC
|
||||
if (sr_has_hmac(isrh)) {
|
||||
@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla,
|
||||
|
||||
slwt = seg6_lwt_lwtunnel(newts);
|
||||
|
||||
err = dst_cache_init(&slwt->cache, GFP_KERNEL);
|
||||
err = dst_cache_init(&slwt->cache, GFP_ATOMIC);
|
||||
if (err) {
|
||||
kfree(newts);
|
||||
return err;
|
||||
|
||||
@@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
skb_mac_header_rebuild(skb);
|
||||
eth_hdr(skb)->h_proto = skb->protocol;
|
||||
if (skb->mac_len)
|
||||
eth_hdr(skb)->h_proto = skb->protocol;
|
||||
|
||||
err = 0;
|
||||
|
||||
|
||||
@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
|
||||
xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
|
||||
xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
|
||||
xdst->u.rt6.rt6i_src = rt->rt6i_src;
|
||||
INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
|
||||
rt6_uncached_list_add(&xdst->u.rt6);
|
||||
atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
|
||||
if (likely(xdst->u.rt6.rt6i_idev))
|
||||
in6_dev_put(xdst->u.rt6.rt6i_idev);
|
||||
dst_destroy_metrics_generic(dst);
|
||||
if (xdst->u.rt6.rt6i_uncached_list)
|
||||
rt6_uncached_list_del(&xdst->u.rt6);
|
||||
xfrm_dst_destroy(xdst);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user