Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
In netdevice.h we removed the structure in net-next that is being changes in 'net'. In macsec.c and rtnetlink.c we have overlaps between fixes in 'net' and the u64 attribute changes in 'net-next'. The mlx5 conflicts have to do with vxlan support dependencies. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -21,18 +21,19 @@
|
||||
#include <asm/uaccess.h>
|
||||
#include "br_private.h"
|
||||
|
||||
/* called with RTNL */
|
||||
static int get_bridge_ifindices(struct net *net, int *indices, int num)
|
||||
{
|
||||
struct net_device *dev;
|
||||
int i = 0;
|
||||
|
||||
for_each_netdev(net, dev) {
|
||||
rcu_read_lock();
|
||||
for_each_netdev_rcu(net, dev) {
|
||||
if (i >= num)
|
||||
break;
|
||||
if (dev->priv_flags & IFF_EBRIDGE)
|
||||
indices[i++] = dev->ifindex;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
@@ -1279,6 +1279,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
|
||||
struct br_ip saddr;
|
||||
unsigned long max_delay;
|
||||
unsigned long now = jiffies;
|
||||
unsigned int offset = skb_transport_offset(skb);
|
||||
__be32 group;
|
||||
int err = 0;
|
||||
|
||||
@@ -1289,14 +1290,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
|
||||
|
||||
group = ih->group;
|
||||
|
||||
if (skb->len == sizeof(*ih)) {
|
||||
if (skb->len == offset + sizeof(*ih)) {
|
||||
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
|
||||
|
||||
if (!max_delay) {
|
||||
max_delay = 10 * HZ;
|
||||
group = 0;
|
||||
}
|
||||
} else if (skb->len >= sizeof(*ih3)) {
|
||||
} else if (skb->len >= offset + sizeof(*ih3)) {
|
||||
ih3 = igmpv3_query_hdr(skb);
|
||||
if (ih3->nsrcs)
|
||||
goto out;
|
||||
@@ -1357,6 +1358,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
||||
struct br_ip saddr;
|
||||
unsigned long max_delay;
|
||||
unsigned long now = jiffies;
|
||||
unsigned int offset = skb_transport_offset(skb);
|
||||
const struct in6_addr *group = NULL;
|
||||
bool is_general_query;
|
||||
int err = 0;
|
||||
@@ -1366,8 +1368,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
||||
(port && port->state == BR_STATE_DISABLED))
|
||||
goto out;
|
||||
|
||||
if (skb->len == sizeof(*mld)) {
|
||||
if (!pskb_may_pull(skb, sizeof(*mld))) {
|
||||
if (skb->len == offset + sizeof(*mld)) {
|
||||
if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -1376,7 +1378,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
||||
if (max_delay)
|
||||
group = &mld->mld_mca;
|
||||
} else {
|
||||
if (!pskb_may_pull(skb, sizeof(*mld2q))) {
|
||||
if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work)
|
||||
list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
|
||||
spin_unlock_bh(&xfrm->flow_cache_gc_lock);
|
||||
|
||||
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
|
||||
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
|
||||
flow_entry_kill(fce, xfrm);
|
||||
atomic_dec(&xfrm->flow_cache_gc_count);
|
||||
WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
|
||||
@@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
|
||||
struct netns_xfrm *xfrm)
|
||||
{
|
||||
if (deleted) {
|
||||
atomic_add(deleted, &xfrm->flow_cache_gc_count);
|
||||
fcp->hash_count -= deleted;
|
||||
spin_lock_bh(&xfrm->flow_cache_gc_lock);
|
||||
list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
|
||||
@@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
|
||||
if (fcp->hash_count > fc->high_watermark)
|
||||
flow_cache_shrink(fc, fcp);
|
||||
|
||||
if (fcp->hash_count > 2 * fc->high_watermark ||
|
||||
atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
|
||||
atomic_inc(&net->xfrm.flow_cache_genid);
|
||||
flo = ERR_PTR(-ENOBUFS);
|
||||
goto ret_object;
|
||||
}
|
||||
|
||||
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
|
||||
if (fle) {
|
||||
fle->net = net;
|
||||
@@ -446,6 +457,7 @@ int flow_cache_init(struct net *net)
|
||||
INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
|
||||
INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
|
||||
mutex_init(&net->xfrm.flow_flush_sem);
|
||||
atomic_set(&net->xfrm.flow_cache_gc_count, 0);
|
||||
|
||||
fc->hash_shift = 10;
|
||||
fc->low_watermark = 2 * flow_cache_hash_size(fc);
|
||||
|
||||
@@ -1173,14 +1173,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
|
||||
|
||||
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct rtnl_link_ifmap map = {
|
||||
.mem_start = dev->mem_start,
|
||||
.mem_end = dev->mem_end,
|
||||
.base_addr = dev->base_addr,
|
||||
.irq = dev->irq,
|
||||
.dma = dev->dma,
|
||||
.port = dev->if_port,
|
||||
};
|
||||
struct rtnl_link_ifmap map;
|
||||
|
||||
memset(&map, 0, sizeof(map));
|
||||
map.mem_start = dev->mem_start;
|
||||
map.mem_end = dev->mem_end;
|
||||
map.base_addr = dev->base_addr;
|
||||
map.irq = dev->irq;
|
||||
map.dma = dev->dma;
|
||||
map.port = dev->if_port;
|
||||
|
||||
if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
|
||||
return -EMSGSIZE;
|
||||
|
||||
|
||||
@@ -227,8 +227,6 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
|
||||
int err = -ENOSYS;
|
||||
const struct net_offload **offloads;
|
||||
|
||||
udp_tunnel_gro_complete(skb, nhoff);
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
@@ -237,6 +235,8 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
err = ops->callbacks.gro_complete(skb, nhoff);
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -412,6 +412,8 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
|
||||
|
||||
err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + guehlen);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
|
||||
@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *tdev; /* Device to other host */
|
||||
int err;
|
||||
int mtu;
|
||||
|
||||
if (!dst) {
|
||||
dev->stats.tx_carrier_errors++;
|
||||
@@ -192,6 +193,23 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
tunnel->err_count = 0;
|
||||
}
|
||||
|
||||
mtu = dst_mtu(dst);
|
||||
if (skb->len > mtu) {
|
||||
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
||||
htonl(mtu));
|
||||
} else {
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
}
|
||||
|
||||
dst_release(dst);
|
||||
goto tx_error;
|
||||
}
|
||||
|
||||
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
|
||||
skb_dst_set(skb, dst);
|
||||
skb->dev = skb_dst(skb)->dev;
|
||||
|
||||
@@ -350,6 +350,11 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
|
||||
|
||||
uh->len = newlen;
|
||||
|
||||
/* Set encapsulation before calling into inner gro_complete() functions
|
||||
* to make them set up the inner offsets.
|
||||
*/
|
||||
skb->encapsulation = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
sk = (*lookup)(skb, uh->source, uh->dest);
|
||||
if (sk && udp_sk(sk)->gro_complete)
|
||||
@@ -360,9 +365,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
|
||||
if (skb->remcsum_offload)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
|
||||
|
||||
skb->encapsulation = 1;
|
||||
skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(udp_gro_complete);
|
||||
|
||||
@@ -446,6 +446,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
|
||||
|
||||
if (__ipv6_addr_needs_scope_id(addr_type))
|
||||
iif = skb->dev->ifindex;
|
||||
else
|
||||
iif = l3mdev_master_ifindex(skb->dev);
|
||||
|
||||
/*
|
||||
* Must not send error if the source does not uniquely
|
||||
@@ -500,9 +502,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
|
||||
else if (!fl6.flowi6_oif)
|
||||
fl6.flowi6_oif = np->ucast_oif;
|
||||
|
||||
if (!fl6.flowi6_oif)
|
||||
fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev);
|
||||
|
||||
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
|
||||
if (IS_ERR(dst))
|
||||
goto out;
|
||||
|
||||
@@ -810,8 +810,13 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
|
||||
fl6.flowi6_proto = IPPROTO_TCP;
|
||||
if (rt6_need_strict(&fl6.daddr) && !oif)
|
||||
fl6.flowi6_oif = tcp_v6_iif(skb);
|
||||
else
|
||||
else {
|
||||
if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
|
||||
oif = skb->skb_iif;
|
||||
|
||||
fl6.flowi6_oif = oif;
|
||||
}
|
||||
|
||||
fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
|
||||
fl6.fl6_dport = t1->dest;
|
||||
fl6.fl6_sport = t1->source;
|
||||
|
||||
@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
|
||||
if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
|
||||
struct llc_pktinfo info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
|
||||
llc_pdu_decode_dsap(skb, &info.lpi_sap);
|
||||
llc_pdu_decode_da(skb, info.lpi_mac);
|
||||
|
||||
@@ -1808,27 +1808,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
else if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||
err = 0;
|
||||
|
||||
if (copied > 0) {
|
||||
/* We only do these additional bookkeeping/notification steps
|
||||
* if we actually copied something out of the queue pair
|
||||
* instead of just peeking ahead.
|
||||
*/
|
||||
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
/* If the other side has shutdown for sending and there
|
||||
* is nothing more to read, then modify the socket
|
||||
* state.
|
||||
*/
|
||||
if (vsk->peer_shutdown & SEND_SHUTDOWN) {
|
||||
if (vsock_stream_has_data(vsk) <= 0) {
|
||||
sk->sk_state = SS_UNCONNECTED;
|
||||
sock_set_flag(sk, SOCK_DONE);
|
||||
sk->sk_state_change(sk);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (copied > 0)
|
||||
err = copied;
|
||||
}
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
|
||||
@@ -99,6 +99,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
|
||||
|
||||
skb_dst_force(skb);
|
||||
|
||||
/* Inner headers are invalid now. */
|
||||
skb->encapsulation = 0;
|
||||
|
||||
err = x->type->output(x, skb);
|
||||
if (err == -EINPROGRESS)
|
||||
goto out;
|
||||
|
||||
Reference in New Issue
Block a user