mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits) [NetLabel]: correct usage of RCU locking [TCP]: fix D-SACK cwnd handling [NET] napi: use non-interruptible sleep in napi_disable [SCTP] net/sctp/auth.c: make 3 functions static [TCP]: Add missing I/O AT code to ipv6 side. [SCTP]: #if 0 sctp_update_copy_cksum() [INET]: Unexport icmpmsg_statistics [NET]: Unexport sock_enable_timestamp(). [TCP]: Make tcp_match_skb_to_sack() static. [IRDA]: Make ircomm_tty static. [NET] fs/proc/proc_net.c: make a struct static [NET] dev_change_name: ignore changes to same name [NET]: Document some simple rules for actions [NET_CLS_ACT]: Use skb_act_clone [NET_CLS_ACT]: Introduce skb_act_clone [TCP]: Fix scatterlist handling in MD5 signature support. [IPSEC]: Fix scatterlist handling in skb_icv_walk(). [IPSEC]: Add missing sg_init_table() calls to ESP. [CRYPTO]: Initialize TCRYPT on-stack scatterlist objects correctly. [CRYPTO]: HMAC needs some more scatterlist fixups. ...
This commit is contained in:
commit
ec3b67c11d
@ -184,14 +184,14 @@ tcp_frto - INTEGER
|
||||
F-RTO is an enhanced recovery algorithm for TCP retransmission
|
||||
timeouts. It is particularly beneficial in wireless environments
|
||||
where packet loss is typically due to random radio interference
|
||||
rather than intermediate router congestion. FRTO is sender-side
|
||||
rather than intermediate router congestion. F-RTO is sender-side
|
||||
only modification. Therefore it does not require any support from
|
||||
the peer, but in a typical case, however, where wireless link is
|
||||
the local access link and most of the data flows downlink, the
|
||||
faraway servers should have FRTO enabled to take advantage of it.
|
||||
faraway servers should have F-RTO enabled to take advantage of it.
|
||||
If set to 1, basic version is enabled. 2 enables SACK enhanced
|
||||
F-RTO if flow uses SACK. The basic version can be used also when
|
||||
SACK is in use though scenario(s) with it exists where FRTO
|
||||
SACK is in use though scenario(s) with it exists where F-RTO
|
||||
interacts badly with the packet counting of the SACK enabled TCP
|
||||
flow.
|
||||
|
||||
|
29
Documentation/networking/tc-actions-env-rules.txt
Normal file
29
Documentation/networking/tc-actions-env-rules.txt
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
The "enviromental" rules for authors of any new tc actions are:
|
||||
|
||||
1) If you stealeth or borroweth any packet thou shalt be branching
|
||||
from the righteous path and thou shalt cloneth.
|
||||
|
||||
For example if your action queues a packet to be processed later
|
||||
or intentionaly branches by redirecting a packet then you need to
|
||||
clone the packet.
|
||||
There are certain fields in the skb tc_verd that need to be reset so we
|
||||
avoid loops etc. A few are generic enough so much so that skb_act_clone()
|
||||
resets them for you. So invoke skb_act_clone() rather than skb_clone()
|
||||
|
||||
2) If you munge any packet thou shalt call pskb_expand_head in the case
|
||||
someone else is referencing the skb. After that you "own" the skb.
|
||||
You must also tell us if it is ok to munge the packet (TC_OK2MUNGE),
|
||||
this way any action downstream can stomp on the packet.
|
||||
|
||||
3) dropping packets you dont own is a nono. You simply return
|
||||
TC_ACT_SHOT to the caller and they will drop it.
|
||||
|
||||
The "enviromental" rules for callers of actions (qdiscs etc) are:
|
||||
|
||||
*) thou art responsible for freeing anything returned as being
|
||||
TC_ACT_SHOT/STOLEN/QUEUED. If none of TC_ACT_SHOT/STOLEN/QUEUED is
|
||||
returned then all is great and you dont need to do anything.
|
||||
|
||||
Post on netdev if something is unclear.
|
||||
|
@ -2449,13 +2449,15 @@ W: http://www.tazenda.demon.co.uk/phil/linux-hp
|
||||
S: Maintained
|
||||
|
||||
MAC80211
|
||||
P: Jiri Benc
|
||||
M: jbenc@suse.cz
|
||||
P: Michael Wu
|
||||
M: flamingice@sourmilk.net
|
||||
P: Johannes Berg
|
||||
M: johannes@sipsolutions.net
|
||||
P: Jiri Benc
|
||||
M: jbenc@suse.cz
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://linuxwireless.org/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/jbenc/mac80211.git
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
|
||||
S: Maintained
|
||||
|
||||
MACVLAN DRIVER
|
||||
|
@ -61,7 +61,7 @@ static int hmac_setkey(struct crypto_hash *parent,
|
||||
desc.tfm = tfm;
|
||||
desc.flags = crypto_hash_get_flags(parent);
|
||||
desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
sg_set_buf(&tmp, inkey, keylen);
|
||||
sg_init_one(&tmp, inkey, keylen);
|
||||
|
||||
err = crypto_hash_digest(&desc, &tmp, keylen, digest);
|
||||
if (err)
|
||||
@ -96,7 +96,7 @@ static int hmac_init(struct hash_desc *pdesc)
|
||||
|
||||
desc.tfm = ctx->child;
|
||||
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
sg_set_buf(&tmp, ipad, bs);
|
||||
sg_init_one(&tmp, ipad, bs);
|
||||
|
||||
err = crypto_hash_init(&desc);
|
||||
if (unlikely(err))
|
||||
@ -131,7 +131,7 @@ static int hmac_final(struct hash_desc *pdesc, u8 *out)
|
||||
|
||||
desc.tfm = ctx->child;
|
||||
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
sg_set_buf(&tmp, opad, bs + ds);
|
||||
sg_init_one(&tmp, opad, bs + ds);
|
||||
|
||||
err = crypto_hash_final(&desc, digest);
|
||||
if (unlikely(err))
|
||||
@ -158,9 +158,11 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
|
||||
desc.tfm = ctx->child;
|
||||
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
sg_init_table(sg1, 2);
|
||||
sg_set_buf(sg1, ipad, bs);
|
||||
sg_set_page(&sg1[1], (void *) sg, 0, 0);
|
||||
|
||||
sg_set_page(&sg[1], (void *) sg, 0, 0);
|
||||
sg_init_table(sg2, 1);
|
||||
sg_set_buf(sg2, opad, bs + ds);
|
||||
|
||||
err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest);
|
||||
|
@ -139,7 +139,7 @@ static void test_hash(char *algo, struct hash_testvec *template,
|
||||
printk("test %u:\n", i + 1);
|
||||
memset(result, 0, 64);
|
||||
|
||||
sg_set_buf(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize);
|
||||
sg_init_one(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize);
|
||||
|
||||
if (hash_tv[i].ksize) {
|
||||
ret = crypto_hash_setkey(tfm, hash_tv[i].key,
|
||||
@ -176,6 +176,7 @@ static void test_hash(char *algo, struct hash_testvec *template,
|
||||
memset(result, 0, 64);
|
||||
|
||||
temp = 0;
|
||||
sg_init_table(sg, hash_tv[i].np);
|
||||
for (k = 0; k < hash_tv[i].np; k++) {
|
||||
memcpy(&xbuf[IDX[k]],
|
||||
hash_tv[i].plaintext + temp,
|
||||
@ -289,8 +290,8 @@ static void test_cipher(char *algo, int enc,
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_set_buf(&sg[0], cipher_tv[i].input,
|
||||
cipher_tv[i].ilen);
|
||||
sg_init_one(&sg[0], cipher_tv[i].input,
|
||||
cipher_tv[i].ilen);
|
||||
|
||||
ablkcipher_request_set_crypt(req, sg, sg,
|
||||
cipher_tv[i].ilen,
|
||||
@ -353,6 +354,7 @@ static void test_cipher(char *algo, int enc,
|
||||
}
|
||||
|
||||
temp = 0;
|
||||
sg_init_table(sg, cipher_tv[i].np);
|
||||
for (k = 0; k < cipher_tv[i].np; k++) {
|
||||
memcpy(&xbuf[IDX[k]],
|
||||
cipher_tv[i].input + temp,
|
||||
@ -414,7 +416,7 @@ static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p,
|
||||
int bcount;
|
||||
int ret;
|
||||
|
||||
sg_set_buf(sg, p, blen);
|
||||
sg_init_one(sg, p, blen);
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
@ -440,7 +442,7 @@ static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, char *p,
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
sg_set_buf(sg, p, blen);
|
||||
sg_init_one(sg, p, blen);
|
||||
|
||||
local_bh_disable();
|
||||
local_irq_disable();
|
||||
@ -572,7 +574,7 @@ static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen,
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
sg_set_buf(sg, p, blen);
|
||||
sg_init_one(sg, p, blen);
|
||||
ret = crypto_hash_digest(desc, sg, blen, out);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -601,7 +603,7 @@ static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen,
|
||||
if (ret)
|
||||
return ret;
|
||||
for (pcount = 0; pcount < blen; pcount += plen) {
|
||||
sg_set_buf(sg, p + pcount, plen);
|
||||
sg_init_one(sg, p + pcount, plen);
|
||||
ret = crypto_hash_update(desc, sg, plen);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -631,7 +633,7 @@ static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen,
|
||||
|
||||
/* Warm-up run. */
|
||||
for (i = 0; i < 4; i++) {
|
||||
sg_set_buf(sg, p, blen);
|
||||
sg_init_one(sg, p, blen);
|
||||
ret = crypto_hash_digest(desc, sg, blen, out);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -643,7 +645,7 @@ static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen,
|
||||
|
||||
start = get_cycles();
|
||||
|
||||
sg_set_buf(sg, p, blen);
|
||||
sg_init_one(sg, p, blen);
|
||||
ret = crypto_hash_digest(desc, sg, blen, out);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -686,7 +688,7 @@ static int test_hash_cycles(struct hash_desc *desc, char *p, int blen,
|
||||
if (ret)
|
||||
goto out;
|
||||
for (pcount = 0; pcount < blen; pcount += plen) {
|
||||
sg_set_buf(sg, p + pcount, plen);
|
||||
sg_init_one(sg, p + pcount, plen);
|
||||
ret = crypto_hash_update(desc, sg, plen);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -706,7 +708,7 @@ static int test_hash_cycles(struct hash_desc *desc, char *p, int blen,
|
||||
if (ret)
|
||||
goto out;
|
||||
for (pcount = 0; pcount < blen; pcount += plen) {
|
||||
sg_set_buf(sg, p + pcount, plen);
|
||||
sg_init_one(sg, p + pcount, plen);
|
||||
ret = crypto_hash_update(desc, sg, plen);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -185,7 +185,7 @@ static __net_exit void proc_net_ns_exit(struct net *net)
|
||||
kfree(net->proc_net_root);
|
||||
}
|
||||
|
||||
struct pernet_operations __net_initdata proc_net_ns_ops = {
|
||||
static struct pernet_operations __net_initdata proc_net_ns_ops = {
|
||||
.init = proc_net_ns_init,
|
||||
.exit = proc_net_ns_exit,
|
||||
};
|
||||
|
@ -144,6 +144,8 @@ enum dccp_reset_codes {
|
||||
DCCP_RESET_CODE_TOO_BUSY,
|
||||
DCCP_RESET_CODE_BAD_INIT_COOKIE,
|
||||
DCCP_RESET_CODE_AGGRESSION_PENALTY,
|
||||
|
||||
DCCP_MAX_RESET_CODES /* Leave at the end! */
|
||||
};
|
||||
|
||||
/* DCCP options */
|
||||
@ -270,10 +272,9 @@ static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
|
||||
return memset(skb_transport_header(skb), 0, headlen);
|
||||
}
|
||||
|
||||
static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb)
|
||||
static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh)
|
||||
{
|
||||
return (struct dccp_hdr_ext *)(skb_transport_header(skb) +
|
||||
sizeof(struct dccp_hdr));
|
||||
return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh));
|
||||
}
|
||||
|
||||
static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
|
||||
@ -287,13 +288,12 @@ static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
|
||||
return __dccp_basic_hdr_len(dh);
|
||||
}
|
||||
|
||||
static inline __u64 dccp_hdr_seq(const struct sk_buff *skb)
|
||||
static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
|
||||
{
|
||||
const struct dccp_hdr *dh = dccp_hdr(skb);
|
||||
__u64 seq_nr = ntohs(dh->dccph_seq);
|
||||
|
||||
if (dh->dccph_x != 0)
|
||||
seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(skb)->dccph_seq_low);
|
||||
seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(dh)->dccph_seq_low);
|
||||
else
|
||||
seq_nr += (u32)dh->dccph_seq2 << 16;
|
||||
|
||||
|
@ -390,7 +390,7 @@ static inline void napi_complete(struct napi_struct *n)
|
||||
static inline void napi_disable(struct napi_struct *n)
|
||||
{
|
||||
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
|
||||
msleep_interruptible(1);
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -26,7 +26,6 @@
|
||||
|
||||
#include <net/inet_connection_sock.h>
|
||||
#include <net/inet_sock.h>
|
||||
#include <net/route.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp_states.h>
|
||||
|
||||
@ -266,11 +265,6 @@ out:
|
||||
wake_up(&hashinfo->lhash_wait);
|
||||
}
|
||||
|
||||
static inline int inet_iif(const struct sk_buff *skb)
|
||||
{
|
||||
return ((struct rtable *)skb->dst)->rt_iif;
|
||||
}
|
||||
|
||||
extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
|
||||
const __be32 daddr,
|
||||
const unsigned short hnum,
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <net/flow.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/request_sock.h>
|
||||
#include <net/route.h>
|
||||
|
||||
/** struct ip_options - IP Options
|
||||
*
|
||||
@ -190,4 +191,10 @@ static inline int inet_sk_ehashfn(const struct sock *sk)
|
||||
return inet_ehashfn(laddr, lport, faddr, fport);
|
||||
}
|
||||
|
||||
|
||||
static inline int inet_iif(const struct sk_buff *skb)
|
||||
{
|
||||
return ((struct rtable *)skb->dst)->rt_iif;
|
||||
}
|
||||
|
||||
#endif /* _INET_SOCK_H */
|
||||
|
@ -127,7 +127,6 @@ extern int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern void ircomm_tty_set_termios(struct tty_struct *tty,
|
||||
struct ktermios *old_termios);
|
||||
extern hashbin_t *ircomm_tty;
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -316,4 +316,19 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
|
||||
return rtab->data[slot];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
|
||||
{
|
||||
struct sk_buff *n = skb_clone(skb, gfp_mask);
|
||||
|
||||
if (n) {
|
||||
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
|
||||
n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
|
||||
n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
|
||||
n->iif = skb->iif;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -88,7 +88,6 @@ static inline void sctp_auth_key_hold(struct sctp_auth_bytes *key)
|
||||
|
||||
void sctp_auth_key_put(struct sctp_auth_bytes *key);
|
||||
struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp);
|
||||
void sctp_auth_shkey_free(struct sctp_shared_key *sh_key);
|
||||
void sctp_auth_destroy_keys(struct list_head *keys);
|
||||
int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp);
|
||||
struct sctp_shared_key *sctp_auth_get_shkey(
|
||||
|
@ -156,7 +156,6 @@ int sctp_primitive_ASCONF(struct sctp_association *, void *arg);
|
||||
__u32 sctp_start_cksum(__u8 *ptr, __u16 count);
|
||||
__u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum);
|
||||
__u32 sctp_end_cksum(__u32 cksum);
|
||||
__u32 sctp_update_copy_cksum(__u8 *, __u8 *, __u16 count, __u32 cksum);
|
||||
|
||||
/*
|
||||
* sctp/input.c
|
||||
|
@ -883,6 +883,9 @@ int dev_change_name(struct net_device *dev, char *newname)
|
||||
if (!dev_valid_name(newname))
|
||||
return -EINVAL;
|
||||
|
||||
if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
|
||||
return 0;
|
||||
|
||||
memcpy(oldname, dev->name, IFNAMSIZ);
|
||||
|
||||
if (strchr(newname, '%')) {
|
||||
|
@ -415,13 +415,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
|
||||
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
|
||||
n->nohdr = 0;
|
||||
n->destructor = NULL;
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
/* FIXME What is this and why don't we do it in copy_skb_header? */
|
||||
n->tc_verd = SET_TC_VERD(n->tc_verd,0);
|
||||
n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
|
||||
n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
|
||||
C(iif);
|
||||
#endif
|
||||
C(truesize);
|
||||
atomic_set(&n->users, 1);
|
||||
C(head);
|
||||
|
@ -1649,7 +1649,6 @@ void sock_enable_timestamp(struct sock *sk)
|
||||
net_enable_timestamp();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(sock_enable_timestamp);
|
||||
|
||||
/*
|
||||
* Get a socket option on an socket.
|
||||
|
@ -750,20 +750,16 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
|
||||
*/
|
||||
hctx->ccid2hctx_ssthresh = ~0;
|
||||
hctx->ccid2hctx_numdupack = 3;
|
||||
hctx->ccid2hctx_seqbufc = 0;
|
||||
|
||||
/* XXX init ~ to window size... */
|
||||
if (ccid2_hc_tx_alloc_seq(hctx))
|
||||
return -ENOMEM;
|
||||
|
||||
hctx->ccid2hctx_sent = 0;
|
||||
hctx->ccid2hctx_rto = 3 * HZ;
|
||||
ccid2_change_srtt(hctx, -1);
|
||||
hctx->ccid2hctx_rttvar = -1;
|
||||
hctx->ccid2hctx_lastrtt = 0;
|
||||
hctx->ccid2hctx_rpdupack = -1;
|
||||
hctx->ccid2hctx_last_cong = jiffies;
|
||||
hctx->ccid2hctx_high_ack = 0;
|
||||
|
||||
hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire;
|
||||
hctx->ccid2hctx_rtotimer.data = (unsigned long)sk;
|
||||
|
@ -40,6 +40,8 @@
|
||||
#include "lib/tfrc.h"
|
||||
#include "ccid3.h"
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
|
||||
static int ccid3_debug;
|
||||
#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
|
||||
@ -544,6 +546,7 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
|
||||
const struct dccp_sock *dp = dccp_sk(sk);
|
||||
struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
|
||||
struct ccid3_options_received *opt_recv;
|
||||
__be32 opt_val;
|
||||
|
||||
opt_recv = &hctx->ccid3hctx_options_received;
|
||||
|
||||
@ -563,8 +566,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
|
||||
dccp_role(sk), sk, len);
|
||||
rc = -EINVAL;
|
||||
} else {
|
||||
opt_recv->ccid3or_loss_event_rate =
|
||||
ntohl(*(__be32 *)value);
|
||||
opt_val = get_unaligned((__be32 *)value);
|
||||
opt_recv->ccid3or_loss_event_rate = ntohl(opt_val);
|
||||
ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
|
||||
dccp_role(sk), sk,
|
||||
opt_recv->ccid3or_loss_event_rate);
|
||||
@ -585,8 +588,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
|
||||
dccp_role(sk), sk, len);
|
||||
rc = -EINVAL;
|
||||
} else {
|
||||
opt_recv->ccid3or_receive_rate =
|
||||
ntohl(*(__be32 *)value);
|
||||
opt_val = get_unaligned((__be32 *)value);
|
||||
opt_recv->ccid3or_receive_rate = ntohl(opt_val);
|
||||
ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
|
||||
dccp_role(sk), sk,
|
||||
opt_recv->ccid3or_receive_rate);
|
||||
@ -601,8 +604,6 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
|
||||
{
|
||||
struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
|
||||
|
||||
hctx->ccid3hctx_s = 0;
|
||||
hctx->ccid3hctx_rtt = 0;
|
||||
hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
|
||||
INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
|
||||
|
||||
@ -963,8 +964,6 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
|
||||
INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
|
||||
hcrx->ccid3hcrx_tstamp_last_feedback =
|
||||
hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real();
|
||||
hcrx->ccid3hcrx_s = 0;
|
||||
hcrx->ccid3hcrx_rtt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -58,6 +58,42 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
|
||||
dccp_send_close(sk, 0);
|
||||
}
|
||||
|
||||
static u8 dccp_reset_code_convert(const u8 code)
|
||||
{
|
||||
const u8 error_code[] = {
|
||||
[DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
|
||||
[DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
|
||||
[DCCP_RESET_CODE_ABORTED] = ECONNRESET,
|
||||
|
||||
[DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED,
|
||||
[DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
|
||||
[DCCP_RESET_CODE_TOO_BUSY] = EUSERS,
|
||||
[DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
|
||||
|
||||
[DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG,
|
||||
[DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR,
|
||||
[DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC,
|
||||
[DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ,
|
||||
[DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP,
|
||||
};
|
||||
|
||||
return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
|
||||
}
|
||||
|
||||
static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
|
||||
|
||||
sk->sk_err = err;
|
||||
|
||||
/* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
|
||||
dccp_fin(sk, skb);
|
||||
|
||||
if (err && !sock_flag(sk, SOCK_DEAD))
|
||||
sk_wake_async(sk, 0, POLL_ERR);
|
||||
dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
|
||||
}
|
||||
|
||||
static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
@ -191,9 +227,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
* S.state := TIMEWAIT
|
||||
* Set TIMEWAIT timer
|
||||
* Drop packet and return
|
||||
*/
|
||||
dccp_fin(sk, skb);
|
||||
dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
|
||||
*/
|
||||
dccp_rcv_reset(sk, skb);
|
||||
return 0;
|
||||
case DCCP_PKT_CLOSEREQ:
|
||||
dccp_rcv_closereq(sk, skb);
|
||||
@ -521,12 +556,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
* Drop packet and return
|
||||
*/
|
||||
if (dh->dccph_type == DCCP_PKT_RESET) {
|
||||
/*
|
||||
* Queue the equivalent of TCP fin so that dccp_recvmsg
|
||||
* exits the loop
|
||||
*/
|
||||
dccp_fin(sk, skb);
|
||||
dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
|
||||
dccp_rcv_reset(sk, skb);
|
||||
return 0;
|
||||
/*
|
||||
* Step 7: Check for unexpected packet types
|
||||
|
@ -241,8 +241,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
||||
goto out;
|
||||
|
||||
dp = dccp_sk(sk);
|
||||
seq = dccp_hdr_seq(skb);
|
||||
if (sk->sk_state != DCCP_LISTEN &&
|
||||
seq = dccp_hdr_seq(dh);
|
||||
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
|
||||
!between48(seq, dp->dccps_swl, dp->dccps_swh)) {
|
||||
NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
goto out;
|
||||
@ -795,7 +795,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
|
||||
|
||||
dh = dccp_hdr(skb);
|
||||
|
||||
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
|
||||
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
|
||||
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
|
||||
|
||||
dccp_pr_debug("%8.8s "
|
||||
|
@ -173,7 +173,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
||||
icmpv6_err_convert(type, code, &err);
|
||||
|
||||
seq = DCCP_SKB_CB(skb)->dccpd_seq;
|
||||
seq = dccp_hdr_seq(dh);
|
||||
/* Might be for an request_sock */
|
||||
switch (sk->sk_state) {
|
||||
struct request_sock *req, **prev;
|
||||
@ -787,7 +787,7 @@ static int dccp_v6_rcv(struct sk_buff *skb)
|
||||
|
||||
dh = dccp_hdr(skb);
|
||||
|
||||
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
|
||||
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
|
||||
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
|
||||
|
||||
if (dccp_packet_without_ack(skb))
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/dccp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
@ -59,6 +60,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
|
||||
unsigned char opt, len;
|
||||
unsigned char *value;
|
||||
u32 elapsed_time;
|
||||
__be32 opt_val;
|
||||
int rc;
|
||||
int mandatory = 0;
|
||||
|
||||
@ -145,7 +147,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
|
||||
if (len != 4)
|
||||
goto out_invalid_option;
|
||||
|
||||
opt_recv->dccpor_timestamp = ntohl(*(__be32 *)value);
|
||||
opt_val = get_unaligned((__be32 *)value);
|
||||
opt_recv->dccpor_timestamp = ntohl(opt_val);
|
||||
|
||||
dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp;
|
||||
dp->dccps_timestamp_time = ktime_get_real();
|
||||
@ -159,7 +162,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
|
||||
if (len != 4 && len != 6 && len != 8)
|
||||
goto out_invalid_option;
|
||||
|
||||
opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value);
|
||||
opt_val = get_unaligned((__be32 *)value);
|
||||
opt_recv->dccpor_timestamp_echo = ntohl(opt_val);
|
||||
|
||||
dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, "
|
||||
"ackno=%llu", dccp_role(sk),
|
||||
@ -168,16 +172,20 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
|
||||
(unsigned long long)
|
||||
DCCP_SKB_CB(skb)->dccpd_ack_seq);
|
||||
|
||||
value += 4;
|
||||
|
||||
if (len == 4) {
|
||||
if (len == 4) { /* no elapsed time included */
|
||||
dccp_pr_debug_cat("\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (len == 6)
|
||||
elapsed_time = ntohs(*(__be16 *)(value + 4));
|
||||
else
|
||||
elapsed_time = ntohl(*(__be32 *)(value + 4));
|
||||
if (len == 6) { /* 2-byte elapsed time */
|
||||
__be16 opt_val2 = get_unaligned((__be16 *)value);
|
||||
elapsed_time = ntohs(opt_val2);
|
||||
} else { /* 4-byte elapsed time */
|
||||
opt_val = get_unaligned((__be32 *)value);
|
||||
elapsed_time = ntohl(opt_val);
|
||||
}
|
||||
|
||||
dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time);
|
||||
|
||||
@ -192,10 +200,13 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
|
||||
if (pkt_type == DCCP_PKT_DATA)
|
||||
continue;
|
||||
|
||||
if (len == 2)
|
||||
elapsed_time = ntohs(*(__be16 *)value);
|
||||
else
|
||||
elapsed_time = ntohl(*(__be32 *)value);
|
||||
if (len == 2) {
|
||||
__be16 opt_val2 = get_unaligned((__be16 *)value);
|
||||
elapsed_time = ntohs(opt_val2);
|
||||
} else {
|
||||
opt_val = get_unaligned((__be32 *)value);
|
||||
elapsed_time = ntohl(opt_val);
|
||||
}
|
||||
|
||||
if (elapsed_time > opt_recv->dccpor_elapsed_time)
|
||||
opt_recv->dccpor_elapsed_time = elapsed_time;
|
||||
|
@ -504,22 +504,16 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def)
|
||||
INIT_RCU_HEAD(&doi_def->rcu);
|
||||
INIT_LIST_HEAD(&doi_def->dom_list);
|
||||
|
||||
rcu_read_lock();
|
||||
if (cipso_v4_doi_search(doi_def->doi) != NULL)
|
||||
goto doi_add_failure_rlock;
|
||||
spin_lock(&cipso_v4_doi_list_lock);
|
||||
if (cipso_v4_doi_search(doi_def->doi) != NULL)
|
||||
goto doi_add_failure_slock;
|
||||
goto doi_add_failure;
|
||||
list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
|
||||
doi_add_failure_slock:
|
||||
doi_add_failure:
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
doi_add_failure_rlock:
|
||||
rcu_read_unlock();
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
@ -543,29 +537,23 @@ int cipso_v4_doi_remove(u32 doi,
|
||||
struct cipso_v4_doi *doi_def;
|
||||
struct cipso_v4_domhsh_entry *dom_iter;
|
||||
|
||||
rcu_read_lock();
|
||||
if (cipso_v4_doi_search(doi) != NULL) {
|
||||
spin_lock(&cipso_v4_doi_list_lock);
|
||||
doi_def = cipso_v4_doi_search(doi);
|
||||
if (doi_def == NULL) {
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
rcu_read_unlock();
|
||||
return -ENOENT;
|
||||
}
|
||||
spin_lock(&cipso_v4_doi_list_lock);
|
||||
doi_def = cipso_v4_doi_search(doi);
|
||||
if (doi_def != NULL) {
|
||||
doi_def->valid = 0;
|
||||
list_del_rcu(&doi_def->list);
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list)
|
||||
if (dom_iter->valid)
|
||||
netlbl_domhsh_remove(dom_iter->domain,
|
||||
audit_info);
|
||||
cipso_v4_cache_invalidate();
|
||||
rcu_read_unlock();
|
||||
|
||||
cipso_v4_cache_invalidate();
|
||||
call_rcu(&doi_def->rcu, callback);
|
||||
return 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
@ -653,22 +641,19 @@ int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, const char *domain)
|
||||
new_dom->valid = 1;
|
||||
INIT_RCU_HEAD(&new_dom->rcu);
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&cipso_v4_doi_list_lock);
|
||||
list_for_each_entry_rcu(iter, &doi_def->dom_list, list)
|
||||
list_for_each_entry(iter, &doi_def->dom_list, list)
|
||||
if (iter->valid &&
|
||||
((domain != NULL && iter->domain != NULL &&
|
||||
strcmp(iter->domain, domain) == 0) ||
|
||||
(domain == NULL && iter->domain == NULL))) {
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
rcu_read_unlock();
|
||||
kfree(new_dom->domain);
|
||||
kfree(new_dom);
|
||||
return -EEXIST;
|
||||
}
|
||||
list_add_tail_rcu(&new_dom->list, &doi_def->dom_list);
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -689,9 +674,8 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
|
||||
{
|
||||
struct cipso_v4_domhsh_entry *iter;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&cipso_v4_doi_list_lock);
|
||||
list_for_each_entry_rcu(iter, &doi_def->dom_list, list)
|
||||
list_for_each_entry(iter, &doi_def->dom_list, list)
|
||||
if (iter->valid &&
|
||||
((domain != NULL && iter->domain != NULL &&
|
||||
strcmp(iter->domain, domain) == 0) ||
|
||||
@ -699,13 +683,10 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
|
||||
iter->valid = 0;
|
||||
list_del_rcu(&iter->list);
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
rcu_read_unlock();
|
||||
call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free);
|
||||
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -110,6 +110,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
if (!sg)
|
||||
goto unlock;
|
||||
}
|
||||
sg_init_table(sg, nfrags);
|
||||
skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
|
||||
err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
|
||||
if (unlikely(sg != &esp->sgbuf[0]))
|
||||
@ -201,6 +202,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
if (!sg)
|
||||
goto out;
|
||||
}
|
||||
sg_init_table(sg, nfrags);
|
||||
skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen);
|
||||
err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
|
||||
if (unlikely(sg != &esp->sgbuf[0]))
|
||||
|
@ -1104,5 +1104,4 @@ void __init icmp_init(struct net_proto_family *ops)
|
||||
EXPORT_SYMBOL(icmp_err_convert);
|
||||
EXPORT_SYMBOL(icmp_send);
|
||||
EXPORT_SYMBOL(icmp_statistics);
|
||||
EXPORT_SYMBOL(icmpmsg_statistics);
|
||||
EXPORT_SYMBOL(xrlim_allow);
|
||||
|
@ -121,14 +121,6 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
|
||||
SNMP_MIB_SENTINEL
|
||||
};
|
||||
|
||||
static const struct snmp_mib snmp4_icmp_list[] = {
|
||||
SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS),
|
||||
SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS),
|
||||
SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS),
|
||||
SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS),
|
||||
SNMP_MIB_SENTINEL
|
||||
};
|
||||
|
||||
static struct {
|
||||
char *name;
|
||||
int index;
|
||||
|
@ -103,7 +103,7 @@ int sysctl_tcp_abc __read_mostly;
|
||||
#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
|
||||
#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
|
||||
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
|
||||
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */
|
||||
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
|
||||
#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
|
||||
|
||||
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
|
||||
@ -866,7 +866,7 @@ static void tcp_disable_fack(struct tcp_sock *tp)
|
||||
tp->rx_opt.sack_ok &= ~2;
|
||||
}
|
||||
|
||||
/* Take a notice that peer is sending DSACKs */
|
||||
/* Take a notice that peer is sending D-SACKs */
|
||||
static void tcp_dsack_seen(struct tcp_sock *tp)
|
||||
{
|
||||
tp->rx_opt.sack_ok |= 4;
|
||||
@ -1058,7 +1058,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
|
||||
*
|
||||
* With D-SACK the lower bound is extended to cover sequence space below
|
||||
* SND.UNA down to undo_marker, which is the last point of interest. Yet
|
||||
* again, DSACK block must not to go across snd_una (for the same reason as
|
||||
* again, D-SACK block must not to go across snd_una (for the same reason as
|
||||
* for the normal SACK blocks, explained above). But there all simplicity
|
||||
* ends, TCP might receive valid D-SACKs below that. As long as they reside
|
||||
* fully below undo_marker they do not affect behavior in anyway and can
|
||||
@ -1080,7 +1080,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
|
||||
if (!before(start_seq, tp->snd_nxt))
|
||||
return 0;
|
||||
|
||||
/* In outstanding window? ...This is valid exit for DSACKs too.
|
||||
/* In outstanding window? ...This is valid exit for D-SACKs too.
|
||||
* start_seq == snd_una is non-sensical (see comments above)
|
||||
*/
|
||||
if (after(start_seq, tp->snd_una))
|
||||
@ -1204,8 +1204,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
|
||||
* which may fail and creates some hassle (caller must handle error case
|
||||
* returns).
|
||||
*/
|
||||
int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
|
||||
u32 start_seq, u32 end_seq)
|
||||
static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
|
||||
u32 start_seq, u32 end_seq)
|
||||
{
|
||||
int in_sack, err;
|
||||
unsigned int pkt_len;
|
||||
@ -1248,6 +1248,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
||||
int cached_fack_count;
|
||||
int i;
|
||||
int first_sack_index;
|
||||
int force_one_sack;
|
||||
|
||||
if (!tp->sacked_out) {
|
||||
if (WARN_ON(tp->fackets_out))
|
||||
@ -1272,18 +1273,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
||||
* if the only SACK change is the increase of the end_seq of
|
||||
* the first block then only apply that SACK block
|
||||
* and use retrans queue hinting otherwise slowpath */
|
||||
flag = 1;
|
||||
force_one_sack = 1;
|
||||
for (i = 0; i < num_sacks; i++) {
|
||||
__be32 start_seq = sp[i].start_seq;
|
||||
__be32 end_seq = sp[i].end_seq;
|
||||
|
||||
if (i == 0) {
|
||||
if (tp->recv_sack_cache[i].start_seq != start_seq)
|
||||
flag = 0;
|
||||
force_one_sack = 0;
|
||||
} else {
|
||||
if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
|
||||
(tp->recv_sack_cache[i].end_seq != end_seq))
|
||||
flag = 0;
|
||||
force_one_sack = 0;
|
||||
}
|
||||
tp->recv_sack_cache[i].start_seq = start_seq;
|
||||
tp->recv_sack_cache[i].end_seq = end_seq;
|
||||
@ -1295,7 +1296,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
||||
}
|
||||
|
||||
first_sack_index = 0;
|
||||
if (flag)
|
||||
if (force_one_sack)
|
||||
num_sacks = 1;
|
||||
else {
|
||||
int j;
|
||||
@ -1321,9 +1322,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
||||
}
|
||||
}
|
||||
|
||||
/* clear flag as used for different purpose in following code */
|
||||
flag = 0;
|
||||
|
||||
/* Use SACK fastpath hint if valid */
|
||||
cached_skb = tp->fastpath_skb_hint;
|
||||
cached_fack_count = tp->fastpath_cnt_hint;
|
||||
@ -1615,7 +1613,7 @@ void tcp_enter_frto(struct sock *sk)
|
||||
!icsk->icsk_retransmits)) {
|
||||
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
||||
/* Our state is too optimistic in ssthresh() call because cwnd
|
||||
* is not reduced until tcp_enter_frto_loss() when previous FRTO
|
||||
* is not reduced until tcp_enter_frto_loss() when previous F-RTO
|
||||
* recovery has not yet completed. Pattern would be this: RTO,
|
||||
* Cumulative ACK, RTO (2xRTO for the same segment does not end
|
||||
* up here twice).
|
||||
@ -1801,7 +1799,7 @@ void tcp_enter_loss(struct sock *sk, int how)
|
||||
tcp_set_ca_state(sk, TCP_CA_Loss);
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
TCP_ECN_queue_cwr(tp);
|
||||
/* Abort FRTO algorithm if one is in progress */
|
||||
/* Abort F-RTO algorithm if one is in progress */
|
||||
tp->frto_counter = 0;
|
||||
}
|
||||
|
||||
@ -1946,7 +1944,7 @@ static int tcp_time_to_recover(struct sock *sk)
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
__u32 packets_out;
|
||||
|
||||
/* Do not perform any recovery during FRTO algorithm */
|
||||
/* Do not perform any recovery during F-RTO algorithm */
|
||||
if (tp->frto_counter)
|
||||
return 0;
|
||||
|
||||
@ -2962,7 +2960,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
|
||||
}
|
||||
|
||||
if (tp->frto_counter == 1) {
|
||||
/* Sending of the next skb must be allowed or no FRTO */
|
||||
/* Sending of the next skb must be allowed or no F-RTO */
|
||||
if (!tcp_send_head(sk) ||
|
||||
after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
|
||||
tp->snd_una + tp->snd_wnd)) {
|
||||
|
@ -1055,6 +1055,9 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
|
||||
bp->pad = 0;
|
||||
bp->protocol = protocol;
|
||||
bp->len = htons(tcplen);
|
||||
|
||||
sg_init_table(sg, 4);
|
||||
|
||||
sg_set_buf(&sg[block++], bp, sizeof(*bp));
|
||||
nbytes += sizeof(*bp);
|
||||
|
||||
@ -1080,6 +1083,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
|
||||
sg_set_buf(&sg[block++], key->key, key->keylen);
|
||||
nbytes += key->keylen;
|
||||
|
||||
sg_mark_end(sg, block);
|
||||
|
||||
/* Now store the Hash into the packet */
|
||||
err = crypto_hash_init(desc);
|
||||
if (err)
|
||||
|
@ -1152,7 +1152,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
|
||||
return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
|
||||
|
||||
sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest,
|
||||
skb->dev->ifindex, udptable );
|
||||
inet_iif(skb), udptable);
|
||||
|
||||
if (sk != NULL) {
|
||||
int ret = udp_queue_rcv_skb(sk, skb);
|
||||
|
@ -109,6 +109,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
if (!sg)
|
||||
goto unlock;
|
||||
}
|
||||
sg_init_table(sg, nfrags);
|
||||
skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
|
||||
err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
|
||||
if (unlikely(sg != &esp->sgbuf[0]))
|
||||
@ -205,6 +206,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
sg_init_table(sg, nfrags);
|
||||
skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen);
|
||||
ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
|
||||
if (unlikely(sg != &esp->sgbuf[0]))
|
||||
|
@ -757,6 +757,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
|
||||
bp->len = htonl(tcplen);
|
||||
bp->protocol = htonl(protocol);
|
||||
|
||||
sg_init_table(sg, 4);
|
||||
|
||||
sg_set_buf(&sg[block++], bp, sizeof(*bp));
|
||||
nbytes += sizeof(*bp);
|
||||
|
||||
@ -778,6 +780,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
|
||||
sg_set_buf(&sg[block++], key->key, key->keylen);
|
||||
nbytes += key->keylen;
|
||||
|
||||
sg_mark_end(sg, block);
|
||||
|
||||
/* Now store the hash into the packet */
|
||||
err = crypto_hash_init(desc);
|
||||
if (err) {
|
||||
@ -1728,6 +1732,8 @@ process:
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
#ifdef CONFIG_NET_DMA
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = get_softnet_dma();
|
||||
if (tp->ucopy.dma_chan)
|
||||
ret = tcp_v6_do_rcv(sk, skb);
|
||||
else
|
||||
|
@ -77,7 +77,7 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
static struct tty_driver *driver;
|
||||
|
||||
hashbin_t *ircomm_tty = NULL;
|
||||
static hashbin_t *ircomm_tty = NULL;
|
||||
|
||||
static const struct tty_operations ops = {
|
||||
.open = ircomm_tty_open,
|
||||
|
@ -1184,7 +1184,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev,
|
||||
printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x "
|
||||
"status=%d aid=%d)\n",
|
||||
dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa),
|
||||
capab_info, status_code, aid & ~(BIT(15) | BIT(14)));
|
||||
capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
|
||||
|
||||
if (status_code != WLAN_STATUS_SUCCESS) {
|
||||
printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
|
||||
@ -2096,7 +2096,8 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
|
||||
{
|
||||
int tmp, hidden_ssid;
|
||||
|
||||
if (!memcmp(ifsta->ssid, ssid, ssid_len))
|
||||
if (ssid_len == ifsta->ssid_len &&
|
||||
!memcmp(ifsta->ssid, ssid, ssid_len))
|
||||
return 1;
|
||||
|
||||
if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL)
|
||||
|
@ -178,11 +178,9 @@ int netlbl_domhsh_init(u32 size)
|
||||
for (iter = 0; iter < hsh_tbl->size; iter++)
|
||||
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&netlbl_domhsh_lock);
|
||||
rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
|
||||
spin_unlock(&netlbl_domhsh_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -222,7 +220,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
|
||||
entry->valid = 1;
|
||||
INIT_RCU_HEAD(&entry->rcu);
|
||||
|
||||
ret_val = 0;
|
||||
rcu_read_lock();
|
||||
if (entry->domain != NULL) {
|
||||
bkt = netlbl_domhsh_hash(entry->domain);
|
||||
@ -233,7 +230,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
|
||||
else
|
||||
ret_val = -EEXIST;
|
||||
spin_unlock(&netlbl_domhsh_lock);
|
||||
} else if (entry->domain == NULL) {
|
||||
} else {
|
||||
INIT_LIST_HEAD(&entry->list);
|
||||
spin_lock(&netlbl_domhsh_def_lock);
|
||||
if (rcu_dereference(netlbl_domhsh_def) == NULL)
|
||||
@ -241,9 +238,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
|
||||
else
|
||||
ret_val = -EEXIST;
|
||||
spin_unlock(&netlbl_domhsh_def_lock);
|
||||
} else
|
||||
ret_val = -EINVAL;
|
||||
|
||||
}
|
||||
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info);
|
||||
if (audit_buf != NULL) {
|
||||
audit_log_format(audit_buf,
|
||||
@ -262,7 +257,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
|
||||
audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
|
||||
audit_log_end(audit_buf);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (ret_val != 0) {
|
||||
@ -313,38 +307,30 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
|
||||
struct audit_buffer *audit_buf;
|
||||
|
||||
rcu_read_lock();
|
||||
if (domain != NULL)
|
||||
entry = netlbl_domhsh_search(domain, 0);
|
||||
else
|
||||
entry = netlbl_domhsh_search(domain, 1);
|
||||
entry = netlbl_domhsh_search(domain, (domain != NULL ? 0 : 1));
|
||||
if (entry == NULL)
|
||||
goto remove_return;
|
||||
switch (entry->type) {
|
||||
case NETLBL_NLTYPE_UNLABELED:
|
||||
break;
|
||||
case NETLBL_NLTYPE_CIPSOV4:
|
||||
ret_val = cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4,
|
||||
entry->domain);
|
||||
if (ret_val != 0)
|
||||
goto remove_return;
|
||||
cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4,
|
||||
entry->domain);
|
||||
break;
|
||||
}
|
||||
ret_val = 0;
|
||||
if (entry != rcu_dereference(netlbl_domhsh_def)) {
|
||||
spin_lock(&netlbl_domhsh_lock);
|
||||
if (entry->valid) {
|
||||
entry->valid = 0;
|
||||
list_del_rcu(&entry->list);
|
||||
} else
|
||||
ret_val = -ENOENT;
|
||||
ret_val = 0;
|
||||
}
|
||||
spin_unlock(&netlbl_domhsh_lock);
|
||||
} else {
|
||||
spin_lock(&netlbl_domhsh_def_lock);
|
||||
if (entry->valid) {
|
||||
entry->valid = 0;
|
||||
rcu_assign_pointer(netlbl_domhsh_def, NULL);
|
||||
} else
|
||||
ret_val = -ENOENT;
|
||||
ret_val = 0;
|
||||
}
|
||||
spin_unlock(&netlbl_domhsh_def_lock);
|
||||
}
|
||||
|
||||
@ -357,11 +343,10 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
|
||||
audit_log_end(audit_buf);
|
||||
}
|
||||
|
||||
if (ret_val == 0)
|
||||
call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
|
||||
|
||||
remove_return:
|
||||
rcu_read_unlock();
|
||||
if (ret_val == 0)
|
||||
call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -85,11 +85,9 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = {
|
||||
*/
|
||||
void netlbl_mgmt_protocount_inc(void)
|
||||
{
|
||||
rcu_read_lock();
|
||||
spin_lock(&netlabel_mgmt_protocount_lock);
|
||||
netlabel_mgmt_protocount++;
|
||||
spin_unlock(&netlabel_mgmt_protocount_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -103,12 +101,10 @@ void netlbl_mgmt_protocount_inc(void)
|
||||
*/
|
||||
void netlbl_mgmt_protocount_dec(void)
|
||||
{
|
||||
rcu_read_lock();
|
||||
spin_lock(&netlabel_mgmt_protocount_lock);
|
||||
if (netlabel_mgmt_protocount > 0)
|
||||
netlabel_mgmt_protocount--;
|
||||
spin_unlock(&netlabel_mgmt_protocount_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -84,12 +84,10 @@ static void netlbl_unlabel_acceptflg_set(u8 value,
|
||||
struct audit_buffer *audit_buf;
|
||||
u8 old_val;
|
||||
|
||||
rcu_read_lock();
|
||||
old_val = netlabel_unlabel_acceptflg;
|
||||
spin_lock(&netlabel_unlabel_acceptflg_lock);
|
||||
old_val = netlabel_unlabel_acceptflg;
|
||||
netlabel_unlabel_acceptflg = value;
|
||||
spin_unlock(&netlabel_unlabel_acceptflg_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW,
|
||||
audit_info);
|
||||
|
@ -166,7 +166,7 @@ bad_mirred:
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
skb2 = skb_act_clone(skb, GFP_ATOMIC);
|
||||
if (skb2 == NULL)
|
||||
goto bad_mirred;
|
||||
if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
|
||||
|
@ -107,7 +107,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)
|
||||
}
|
||||
|
||||
/* Free the shared key stucture */
|
||||
void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
|
||||
static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
|
||||
{
|
||||
BUG_ON(!list_empty(&sh_key->key_list));
|
||||
sctp_auth_key_put(sh_key->key);
|
||||
@ -220,7 +220,7 @@ static struct sctp_auth_bytes *sctp_auth_make_key_vector(
|
||||
|
||||
|
||||
/* Make a key vector based on our local parameters */
|
||||
struct sctp_auth_bytes *sctp_auth_make_local_vector(
|
||||
static struct sctp_auth_bytes *sctp_auth_make_local_vector(
|
||||
const struct sctp_association *asoc,
|
||||
gfp_t gfp)
|
||||
{
|
||||
@ -232,7 +232,7 @@ struct sctp_auth_bytes *sctp_auth_make_local_vector(
|
||||
}
|
||||
|
||||
/* Make a key vector based on peer's parameters */
|
||||
struct sctp_auth_bytes *sctp_auth_make_peer_vector(
|
||||
static struct sctp_auth_bytes *sctp_auth_make_peer_vector(
|
||||
const struct sctp_association *asoc,
|
||||
gfp_t gfp)
|
||||
{
|
||||
|
@ -170,6 +170,7 @@ __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
|
||||
return crc32;
|
||||
}
|
||||
|
||||
#if 0
|
||||
__u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32)
|
||||
{
|
||||
__u32 i;
|
||||
@ -186,6 +187,7 @@ __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32)
|
||||
|
||||
return crc32;
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
||||
__u32 sctp_end_cksum(__u32 crc32)
|
||||
{
|
||||
|
@ -553,7 +553,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
|
||||
sg_set_buf(&sg, skb->data + offset, copy);
|
||||
sg_init_one(&sg, skb->data + offset, copy);
|
||||
|
||||
err = icv_update(desc, &sg, copy);
|
||||
if (unlikely(err))
|
||||
@ -576,8 +576,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
sg_set_page(&sg, frag->page, copy,
|
||||
frag->page_offset + offset-start);
|
||||
frag->page_offset + offset-start);
|
||||
|
||||
err = icv_update(desc, &sg, copy);
|
||||
if (unlikely(err))
|
||||
|
Loading…
Reference in New Issue
Block a user