Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [NETFILTER]: nat: avoid rerouting packets if only XFRM policy key changed [NETFILTER]: nf_conntrack_netlink: add missing dependency on NF_NAT [NET]: fix up misplaced inlines. [SCTP]: Correctly reset ssthresh when restarting association [BRIDGE]: Fix fdb RCU race [NET]: Fix fib_rules dump race [XFRM]: ipsecv6 needs a space when printing audit record. [X25] x25_forward_call(): fix NULL dereferences [SCTP]: Reset some transport and association variables on restart [SCTP]: Increment error counters on user requested HBs. [SCTP]: Clean up stale data during association restart [IrDA]: Calling ppp_unregister_channel() from process context [IrDA]: irttp_dup spin_lock initialisation [IrDA]: Delay needed when uploading firmware chunks
This commit is contained in:
commit
d6e8823e7b
@ -1057,6 +1057,8 @@ static int stir421x_fw_upload(struct irda_usb_cb *self,
|
|||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
mdelay(10);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(patch_block);
|
kfree(patch_block);
|
||||||
|
@ -1002,6 +1002,7 @@ void sctp_transport_update_rto(struct sctp_transport *, __u32);
|
|||||||
void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
|
void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
|
||||||
void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
|
void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
|
||||||
unsigned long sctp_transport_timeout(struct sctp_transport *);
|
unsigned long sctp_transport_timeout(struct sctp_transport *);
|
||||||
|
void sctp_transport_reset(struct sctp_transport *);
|
||||||
|
|
||||||
|
|
||||||
/* This is the structure we use to queue packets as they come into
|
/* This is the structure we use to queue packets as they come into
|
||||||
|
@ -59,6 +59,7 @@ struct sctp_ulpq {
|
|||||||
/* Prototypes. */
|
/* Prototypes. */
|
||||||
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *,
|
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *,
|
||||||
struct sctp_association *);
|
struct sctp_association *);
|
||||||
|
void sctp_ulpq_flush(struct sctp_ulpq *ulpq);
|
||||||
void sctp_ulpq_free(struct sctp_ulpq *);
|
void sctp_ulpq_free(struct sctp_ulpq *);
|
||||||
|
|
||||||
/* Add a new DATA chunk for processing. */
|
/* Add a new DATA chunk for processing. */
|
||||||
|
@ -319,7 +319,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int inline hidp_send_ctrl_message(struct hidp_session *session,
|
static inline int hidp_send_ctrl_message(struct hidp_session *session,
|
||||||
unsigned char hdr, unsigned char *data, int size)
|
unsigned char hdr, unsigned char *data, int size)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
@ -197,8 +197,8 @@ struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
fdb = __br_fdb_get(br, addr);
|
fdb = __br_fdb_get(br, addr);
|
||||||
if (fdb)
|
if (fdb && !atomic_inc_not_zero(&fdb->use_count))
|
||||||
atomic_inc(&fdb->use_count);
|
fdb = NULL;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return fdb;
|
return fdb;
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ static int brnf_filter_vlan_tagged __read_mostly = 1;
|
|||||||
#define brnf_filter_vlan_tagged 1
|
#define brnf_filter_vlan_tagged 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static __be16 inline vlan_proto(const struct sk_buff *skb)
|
static inline __be16 vlan_proto(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
|
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
|
||||||
}
|
}
|
||||||
|
@ -374,7 +374,7 @@ int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
|
|||||||
return -EAFNOSUPPORT;
|
return -EAFNOSUPPORT;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry(rule, ops->rules_list, list) {
|
list_for_each_entry_rcu(rule, ops->rules_list, list) {
|
||||||
if (idx < cb->args[0])
|
if (idx < cb->args[0])
|
||||||
goto skip;
|
goto skip;
|
||||||
|
|
||||||
|
@ -808,7 +808,7 @@ lenout:
|
|||||||
*
|
*
|
||||||
* (We also register the sk_lock with the lock validator.)
|
* (We also register the sk_lock with the lock validator.)
|
||||||
*/
|
*/
|
||||||
static void inline sock_lock_init(struct sock *sk)
|
static inline void sock_lock_init(struct sock *sk)
|
||||||
{
|
{
|
||||||
sock_lock_init_class_and_name(sk,
|
sock_lock_init_class_and_name(sk,
|
||||||
af_family_slock_key_strings[sk->sk_family],
|
af_family_slock_key_strings[sk->sk_family],
|
||||||
|
@ -253,14 +253,17 @@ ip_nat_local_fn(unsigned int hooknum,
|
|||||||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||||
|
|
||||||
if (ct->tuplehash[dir].tuple.dst.ip !=
|
if (ct->tuplehash[dir].tuple.dst.ip !=
|
||||||
ct->tuplehash[!dir].tuple.src.ip
|
ct->tuplehash[!dir].tuple.src.ip) {
|
||||||
#ifdef CONFIG_XFRM
|
|
||||||
|| ct->tuplehash[dir].tuple.dst.u.all !=
|
|
||||||
ct->tuplehash[!dir].tuple.src.u.all
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
if (ip_route_me_harder(pskb, RTN_UNSPEC))
|
if (ip_route_me_harder(pskb, RTN_UNSPEC))
|
||||||
ret = NF_DROP;
|
ret = NF_DROP;
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_XFRM
|
||||||
|
else if (ct->tuplehash[dir].tuple.dst.u.all !=
|
||||||
|
ct->tuplehash[!dir].tuple.src.u.all)
|
||||||
|
if (ip_xfrm_me_harder(pskb))
|
||||||
|
ret = NF_DROP;
|
||||||
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -245,14 +245,16 @@ nf_nat_local_fn(unsigned int hooknum,
|
|||||||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||||
|
|
||||||
if (ct->tuplehash[dir].tuple.dst.u3.ip !=
|
if (ct->tuplehash[dir].tuple.dst.u3.ip !=
|
||||||
ct->tuplehash[!dir].tuple.src.u3.ip
|
ct->tuplehash[!dir].tuple.src.u3.ip) {
|
||||||
#ifdef CONFIG_XFRM
|
|
||||||
|| ct->tuplehash[dir].tuple.dst.u.all !=
|
|
||||||
ct->tuplehash[!dir].tuple.src.u.all
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
if (ip_route_me_harder(pskb, RTN_UNSPEC))
|
if (ip_route_me_harder(pskb, RTN_UNSPEC))
|
||||||
ret = NF_DROP;
|
ret = NF_DROP;
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_XFRM
|
||||||
|
else if (ct->tuplehash[dir].tuple.dst.u.all !=
|
||||||
|
ct->tuplehash[!dir].tuple.src.u.all)
|
||||||
|
if (ip_xfrm_me_harder(pskb))
|
||||||
|
ret = NF_DROP;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -804,7 +804,7 @@ struct ipv6_saddr_score {
|
|||||||
#define IPV6_SADDR_SCORE_LABEL 0x0020
|
#define IPV6_SADDR_SCORE_LABEL 0x0020
|
||||||
#define IPV6_SADDR_SCORE_PRIVACY 0x0040
|
#define IPV6_SADDR_SCORE_PRIVACY 0x0040
|
||||||
|
|
||||||
static int inline ipv6_saddr_preferred(int type)
|
static inline int ipv6_saddr_preferred(int type)
|
||||||
{
|
{
|
||||||
if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|
|
if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|
|
||||||
IPV6_ADDR_LOOPBACK|IPV6_ADDR_RESERVED))
|
IPV6_ADDR_LOOPBACK|IPV6_ADDR_RESERVED))
|
||||||
@ -813,7 +813,7 @@ static int inline ipv6_saddr_preferred(int type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* static matching label */
|
/* static matching label */
|
||||||
static int inline ipv6_saddr_label(const struct in6_addr *addr, int type)
|
static inline int ipv6_saddr_label(const struct in6_addr *addr, int type)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* prefix (longest match) label
|
* prefix (longest match) label
|
||||||
@ -3318,7 +3318,7 @@ errout:
|
|||||||
rtnl_set_sk_err(RTNLGRP_IPV6_IFADDR, err);
|
rtnl_set_sk_err(RTNLGRP_IPV6_IFADDR, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inline ipv6_store_devconf(struct ipv6_devconf *cnf,
|
static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
|
||||||
__s32 *array, int bytes)
|
__s32 *array, int bytes)
|
||||||
{
|
{
|
||||||
BUG_ON(bytes < (DEVCONF_MAX * 4));
|
BUG_ON(bytes < (DEVCONF_MAX * 4));
|
||||||
|
@ -308,7 +308,7 @@ static inline void rt6_probe(struct rt6_info *rt)
|
|||||||
/*
|
/*
|
||||||
* Default Router Selection (RFC 2461 6.3.6)
|
* Default Router Selection (RFC 2461 6.3.6)
|
||||||
*/
|
*/
|
||||||
static int inline rt6_check_dev(struct rt6_info *rt, int oif)
|
static inline int rt6_check_dev(struct rt6_info *rt, int oif)
|
||||||
{
|
{
|
||||||
struct net_device *dev = rt->rt6i_dev;
|
struct net_device *dev = rt->rt6i_dev;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -328,7 +328,7 @@ static int inline rt6_check_dev(struct rt6_info *rt, int oif)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int inline rt6_check_neigh(struct rt6_info *rt)
|
static inline int rt6_check_neigh(struct rt6_info *rt)
|
||||||
{
|
{
|
||||||
struct neighbour *neigh = rt->rt6i_nexthop;
|
struct neighbour *neigh = rt->rt6i_nexthop;
|
||||||
int m = 0;
|
int m = 0;
|
||||||
|
@ -58,7 +58,7 @@ static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
|
|||||||
static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
|
static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
|
||||||
static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
|
static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
|
||||||
|
|
||||||
static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
|
static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
|
||||||
{
|
{
|
||||||
unsigned h;
|
unsigned h;
|
||||||
|
|
||||||
@ -70,7 +70,7 @@ static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
|
|||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned inline xfrm6_tunnel_spi_hash_byspi(u32 spi)
|
static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
|
||||||
{
|
{
|
||||||
return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
|
return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
|
||||||
}
|
}
|
||||||
|
@ -419,7 +419,7 @@ typedef struct irnet_socket
|
|||||||
u32 raccm; /* to please pppd - dummy) */
|
u32 raccm; /* to please pppd - dummy) */
|
||||||
unsigned int flags; /* PPP flags (compression, ...) */
|
unsigned int flags; /* PPP flags (compression, ...) */
|
||||||
unsigned int rbits; /* Unused receive flags ??? */
|
unsigned int rbits; /* Unused receive flags ??? */
|
||||||
|
struct work_struct disconnect_work; /* Process context disconnection */
|
||||||
/* ------------------------ IrTTP part ------------------------ */
|
/* ------------------------ IrTTP part ------------------------ */
|
||||||
/* We create a pseudo "socket" over the IrDA tranport */
|
/* We create a pseudo "socket" over the IrDA tranport */
|
||||||
unsigned long ttp_open; /* Set when IrTTP is ready */
|
unsigned long ttp_open; /* Set when IrTTP is ready */
|
||||||
|
@ -10,6 +10,27 @@
|
|||||||
|
|
||||||
#include "irnet_irda.h" /* Private header */
|
#include "irnet_irda.h" /* Private header */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PPP disconnect work: we need to make sure we're in
|
||||||
|
* process context when calling ppp_unregister_channel().
|
||||||
|
*/
|
||||||
|
static void irnet_ppp_disconnect(struct work_struct *work)
|
||||||
|
{
|
||||||
|
irnet_socket * self =
|
||||||
|
container_of(work, irnet_socket, disconnect_work);
|
||||||
|
|
||||||
|
if (self == NULL)
|
||||||
|
return;
|
||||||
|
/*
|
||||||
|
* If we were connected, cleanup & close the PPP
|
||||||
|
* channel, which will kill pppd (hangup) and the rest.
|
||||||
|
*/
|
||||||
|
if (self->ppp_open && !self->ttp_open && !self->ttp_connect) {
|
||||||
|
ppp_unregister_channel(&self->chan);
|
||||||
|
self->ppp_open = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/************************* CONTROL CHANNEL *************************/
|
/************************* CONTROL CHANNEL *************************/
|
||||||
/*
|
/*
|
||||||
* When ppp is not active, /dev/irnet act as a control channel.
|
* When ppp is not active, /dev/irnet act as a control channel.
|
||||||
@ -499,6 +520,8 @@ irda_irnet_create(irnet_socket * self)
|
|||||||
#endif /* DISCOVERY_NOMASK */
|
#endif /* DISCOVERY_NOMASK */
|
||||||
self->tx_flow = FLOW_START; /* Flow control from IrTTP */
|
self->tx_flow = FLOW_START; /* Flow control from IrTTP */
|
||||||
|
|
||||||
|
INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect);
|
||||||
|
|
||||||
DEXIT(IRDA_SOCK_TRACE, "\n");
|
DEXIT(IRDA_SOCK_TRACE, "\n");
|
||||||
return(0);
|
return(0);
|
||||||
}
|
}
|
||||||
@ -1134,15 +1157,8 @@ irnet_disconnect_indication(void * instance,
|
|||||||
{
|
{
|
||||||
if(test_open)
|
if(test_open)
|
||||||
{
|
{
|
||||||
#ifdef MISSING_PPP_API
|
/* ppp_unregister_channel() wants a user context. */
|
||||||
/* ppp_unregister_channel() wants a user context, which we
|
schedule_work(&self->disconnect_work);
|
||||||
* are guaranteed to NOT have here. What are we supposed
|
|
||||||
* to do here ? Jean II */
|
|
||||||
/* If we were connected, cleanup & close the PPP channel,
|
|
||||||
* which will kill pppd (hangup) and the rest */
|
|
||||||
ppp_unregister_channel(&self->chan);
|
|
||||||
self->ppp_open = 0;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -1455,6 +1455,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
|
|||||||
|
|
||||||
/* Not everything should be copied */
|
/* Not everything should be copied */
|
||||||
new->notify.instance = instance;
|
new->notify.instance = instance;
|
||||||
|
spin_lock_init(&new->lock);
|
||||||
init_timer(&new->todo_timer);
|
init_timer(&new->todo_timer);
|
||||||
|
|
||||||
skb_queue_head_init(&new->rx_queue);
|
skb_queue_head_init(&new->rx_queue);
|
||||||
|
@ -275,6 +275,7 @@ config NF_CT_NETLINK
|
|||||||
tristate 'Connection tracking netlink interface (EXPERIMENTAL)'
|
tristate 'Connection tracking netlink interface (EXPERIMENTAL)'
|
||||||
depends on EXPERIMENTAL && NF_CONNTRACK && NETFILTER_NETLINK
|
depends on EXPERIMENTAL && NF_CONNTRACK && NETFILTER_NETLINK
|
||||||
depends on NF_CONNTRACK!=y || NETFILTER_NETLINK!=m
|
depends on NF_CONNTRACK!=y || NETFILTER_NETLINK!=m
|
||||||
|
depends on NF_NAT=n || NF_NAT
|
||||||
help
|
help
|
||||||
This option enables support for a netlink-based userspace interface
|
This option enables support for a netlink-based userspace interface
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32
|
|||||||
spin_unlock_bh(&dev->queue_lock);
|
spin_unlock_bh(&dev->queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __inline__
|
static inline void
|
||||||
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
|
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
|
||||||
struct route4_filter *f)
|
struct route4_filter *f)
|
||||||
{
|
{
|
||||||
|
@ -1046,6 +1046,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
|
|||||||
trans = list_entry(pos, struct sctp_transport, transports);
|
trans = list_entry(pos, struct sctp_transport, transports);
|
||||||
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
|
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
|
||||||
sctp_assoc_del_peer(asoc, &trans->ipaddr);
|
sctp_assoc_del_peer(asoc, &trans->ipaddr);
|
||||||
|
|
||||||
|
if (asoc->state >= SCTP_STATE_ESTABLISHED)
|
||||||
|
sctp_transport_reset(trans);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the case is A (association restart), use
|
/* If the case is A (association restart), use
|
||||||
@ -1063,6 +1066,18 @@ void sctp_assoc_update(struct sctp_association *asoc,
|
|||||||
*/
|
*/
|
||||||
sctp_ssnmap_clear(asoc->ssnmap);
|
sctp_ssnmap_clear(asoc->ssnmap);
|
||||||
|
|
||||||
|
/* Flush the ULP reassembly and ordered queue.
|
||||||
|
* Any data there will now be stale and will
|
||||||
|
* cause problems.
|
||||||
|
*/
|
||||||
|
sctp_ulpq_flush(&asoc->ulpq);
|
||||||
|
|
||||||
|
/* reset the overall association error count so
|
||||||
|
* that the restarted association doesn't get torn
|
||||||
|
* down on the next retransmission timer.
|
||||||
|
*/
|
||||||
|
asoc->overall_error_count = 0;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
/* Add any peer addresses from the new association. */
|
/* Add any peer addresses from the new association. */
|
||||||
list_for_each(pos, &new->peer.transport_addr_list) {
|
list_for_each(pos, &new->peer.transport_addr_list) {
|
||||||
|
@ -4342,8 +4342,24 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
|
|||||||
void *arg,
|
void *arg,
|
||||||
sctp_cmd_seq_t *commands)
|
sctp_cmd_seq_t *commands)
|
||||||
{
|
{
|
||||||
return sctp_sf_heartbeat(ep, asoc, type, (struct sctp_transport *)arg,
|
if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type,
|
||||||
commands);
|
(struct sctp_transport *)arg, commands))
|
||||||
|
return SCTP_DISPOSITION_NOMEM;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RFC 2960 (bis), section 8.3
|
||||||
|
*
|
||||||
|
* D) Request an on-demand HEARTBEAT on a specific destination
|
||||||
|
* transport address of a given association.
|
||||||
|
*
|
||||||
|
* The endpoint should increment the respective error counter of
|
||||||
|
* the destination transport address each time a HEARTBEAT is sent
|
||||||
|
* to that address and not acknowledged within one RTO.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET,
|
||||||
|
SCTP_TRANSPORT(arg));
|
||||||
|
return SCTP_DISPOSITION_CONSUME;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -526,3 +526,35 @@ unsigned long sctp_transport_timeout(struct sctp_transport *t)
|
|||||||
timeout += jiffies;
|
timeout += jiffies;
|
||||||
return timeout;
|
return timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Reset transport variables to their initial values */
|
||||||
|
void sctp_transport_reset(struct sctp_transport *t)
|
||||||
|
{
|
||||||
|
struct sctp_association *asoc = t->asoc;
|
||||||
|
|
||||||
|
/* RFC 2960 (bis), Section 5.2.4
|
||||||
|
* All the congestion control parameters (e.g., cwnd, ssthresh)
|
||||||
|
* related to this peer MUST be reset to their initial values
|
||||||
|
* (see Section 6.2.1)
|
||||||
|
*/
|
||||||
|
t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
|
||||||
|
t->ssthresh = asoc->peer.i.a_rwnd;
|
||||||
|
t->rto = asoc->rto_initial;
|
||||||
|
t->rtt = 0;
|
||||||
|
t->srtt = 0;
|
||||||
|
t->rttvar = 0;
|
||||||
|
|
||||||
|
/* Reset these additional varibles so that we have a clean
|
||||||
|
* slate.
|
||||||
|
*/
|
||||||
|
t->partial_bytes_acked = 0;
|
||||||
|
t->flight_size = 0;
|
||||||
|
t->error_count = 0;
|
||||||
|
t->rto_pending = 0;
|
||||||
|
|
||||||
|
/* Initialize the state information for SFR-CACC */
|
||||||
|
t->cacc.changeover_active = 0;
|
||||||
|
t->cacc.cycling_changeover = 0;
|
||||||
|
t->cacc.next_tsn_at_change = 0;
|
||||||
|
t->cacc.cacc_saw_newack = 0;
|
||||||
|
}
|
||||||
|
@ -73,7 +73,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
|
|||||||
|
|
||||||
|
|
||||||
/* Flush the reassembly and ordering queues. */
|
/* Flush the reassembly and ordering queues. */
|
||||||
static void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
|
void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct sctp_ulpevent *event;
|
struct sctp_ulpevent *event;
|
||||||
|
@ -26,64 +26,66 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
|
|||||||
short same_lci = 0;
|
short same_lci = 0;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if ((rt = x25_get_route(dest_addr)) != NULL) {
|
if ((rt = x25_get_route(dest_addr)) == NULL)
|
||||||
|
goto out_no_route;
|
||||||
|
|
||||||
if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
|
if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
|
||||||
/* This shouldnt happen, if it occurs somehow
|
/* This shouldnt happen, if it occurs somehow
|
||||||
* do something sensible
|
* do something sensible
|
||||||
*/
|
|
||||||
goto out_put_route;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Avoid a loop. This is the normal exit path for a
|
|
||||||
* system with only one x.25 iface and default route
|
|
||||||
*/
|
*/
|
||||||
if (rt->dev == from->dev) {
|
goto out_put_route;
|
||||||
goto out_put_nb;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Remote end sending a call request on an already
|
|
||||||
* established LCI? It shouldnt happen, just in case..
|
|
||||||
*/
|
|
||||||
read_lock_bh(&x25_forward_list_lock);
|
|
||||||
list_for_each(entry, &x25_forward_list) {
|
|
||||||
x25_frwd = list_entry(entry, struct x25_forward, node);
|
|
||||||
if (x25_frwd->lci == lci) {
|
|
||||||
printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
|
|
||||||
same_lci = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
read_unlock_bh(&x25_forward_list_lock);
|
|
||||||
|
|
||||||
/* Save the forwarding details for future traffic */
|
|
||||||
if (!same_lci){
|
|
||||||
if ((new_frwd = kmalloc(sizeof(struct x25_forward),
|
|
||||||
GFP_ATOMIC)) == NULL){
|
|
||||||
rc = -ENOMEM;
|
|
||||||
goto out_put_nb;
|
|
||||||
}
|
|
||||||
new_frwd->lci = lci;
|
|
||||||
new_frwd->dev1 = rt->dev;
|
|
||||||
new_frwd->dev2 = from->dev;
|
|
||||||
write_lock_bh(&x25_forward_list_lock);
|
|
||||||
list_add(&new_frwd->node, &x25_forward_list);
|
|
||||||
write_unlock_bh(&x25_forward_list_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Forward the call request */
|
|
||||||
if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
|
|
||||||
goto out_put_nb;
|
|
||||||
}
|
|
||||||
x25_transmit_link(skbn, neigh_new);
|
|
||||||
rc = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Avoid a loop. This is the normal exit path for a
|
||||||
|
* system with only one x.25 iface and default route
|
||||||
|
*/
|
||||||
|
if (rt->dev == from->dev) {
|
||||||
|
goto out_put_nb;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Remote end sending a call request on an already
|
||||||
|
* established LCI? It shouldnt happen, just in case..
|
||||||
|
*/
|
||||||
|
read_lock_bh(&x25_forward_list_lock);
|
||||||
|
list_for_each(entry, &x25_forward_list) {
|
||||||
|
x25_frwd = list_entry(entry, struct x25_forward, node);
|
||||||
|
if (x25_frwd->lci == lci) {
|
||||||
|
printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
|
||||||
|
same_lci = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
read_unlock_bh(&x25_forward_list_lock);
|
||||||
|
|
||||||
|
/* Save the forwarding details for future traffic */
|
||||||
|
if (!same_lci){
|
||||||
|
if ((new_frwd = kmalloc(sizeof(struct x25_forward),
|
||||||
|
GFP_ATOMIC)) == NULL){
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto out_put_nb;
|
||||||
|
}
|
||||||
|
new_frwd->lci = lci;
|
||||||
|
new_frwd->dev1 = rt->dev;
|
||||||
|
new_frwd->dev2 = from->dev;
|
||||||
|
write_lock_bh(&x25_forward_list_lock);
|
||||||
|
list_add(&new_frwd->node, &x25_forward_list);
|
||||||
|
write_unlock_bh(&x25_forward_list_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Forward the call request */
|
||||||
|
if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
|
||||||
|
goto out_put_nb;
|
||||||
|
}
|
||||||
|
x25_transmit_link(skbn, neigh_new);
|
||||||
|
rc = 1;
|
||||||
|
|
||||||
|
|
||||||
out_put_nb:
|
out_put_nb:
|
||||||
x25_neigh_put(neigh_new);
|
x25_neigh_put(neigh_new);
|
||||||
|
|
||||||
out_put_route:
|
out_put_route:
|
||||||
x25_route_put(rt);
|
x25_route_put(rt);
|
||||||
|
|
||||||
|
out_no_route:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2089,7 +2089,7 @@ void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
|
|||||||
sizeof(struct in6_addr));
|
sizeof(struct in6_addr));
|
||||||
}
|
}
|
||||||
audit_log_format(audit_buf,
|
audit_log_format(audit_buf,
|
||||||
" src=" NIP6_FMT "dst=" NIP6_FMT,
|
" src=" NIP6_FMT " dst=" NIP6_FMT,
|
||||||
NIP6(saddr6), NIP6(daddr6));
|
NIP6(saddr6), NIP6(daddr6));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2025,7 +2025,7 @@ nlmsg_failure:
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int inline xfrm_sa_len(struct xfrm_state *x)
|
static inline int xfrm_sa_len(struct xfrm_state *x)
|
||||||
{
|
{
|
||||||
int l = 0;
|
int l = 0;
|
||||||
if (x->aalg)
|
if (x->aalg)
|
||||||
|
Loading…
Reference in New Issue
Block a user