forked from Minki/linux
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits) [NETPOLL]: Fix local_bh_enable() warning. [IPVS]: Make ip_vs_sync.c <= 80col wide. [IPVS]: Use msleep_interruptable() instead of ssleep() aka msleep() [HAMRADIO]: Fix baycom_epp.c compile failure. [DCCP]: Whitespace cleanups [DCCP] ccid3: Fixup some type conversions related to rtts [DCCP] ccid3: BUG-FIX - conversion errors [DCCP] ccid3: Reorder packet history source file [DCCP] ccid3: Reorder packet history header file [DCCP] ccid3: Make debug output consistent [DCCP] ccid3: Perform history operations only after packet has been sent [DCCP] ccid3: TX history - remove unused field [DCCP] ccid3: Shift window counter computation [DCCP] ccid3: Sanity-check RTT samples [DCCP] ccid3: Initialise RTT values [DCCP] ccid: Deprecate ccid_hc_tx_insert_options [DCCP]: Warn when discarding packet due to internal errors [DCCP]: Only deliver to the CCID rx side in charge [DCCP]: Simplify TFRC calculation [DCCP]: Debug timeval operations ...
This commit is contained in:
commit
4259cb25d4
@ -19,7 +19,8 @@ for real time and multimedia traffic.
|
||||
|
||||
It has a base protocol and pluggable congestion control IDs (CCIDs).
|
||||
|
||||
It is at experimental RFC status and the homepage for DCCP as a protocol is at:
|
||||
It is at proposed standard RFC status and the homepage for DCCP as a protocol
|
||||
is at:
|
||||
http://www.read.cs.ucla.edu/dccp/
|
||||
|
||||
Missing features
|
||||
@ -34,9 +35,6 @@ The known bugs are at:
|
||||
Socket options
|
||||
==============
|
||||
|
||||
DCCP_SOCKOPT_PACKET_SIZE is used for CCID3 to set default packet size for
|
||||
calculations.
|
||||
|
||||
DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
|
||||
service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
|
||||
the socket will fall back to 0 (which means that no meaningful service code
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
#define SHA384_DIGEST_SIZE 48
|
||||
#define SHA512_DIGEST_SIZE 64
|
||||
#define SHA384_HMAC_BLOCK_SIZE 96
|
||||
#define SHA384_HMAC_BLOCK_SIZE 128
|
||||
#define SHA512_HMAC_BLOCK_SIZE 128
|
||||
|
||||
struct sha512_ctx {
|
||||
|
2
drivers/atm/.gitignore
vendored
2
drivers/atm/.gitignore
vendored
@ -2,4 +2,4 @@
|
||||
fore200e_mkfirm
|
||||
fore200e_pca_fw.c
|
||||
pca200e.bin
|
||||
|
||||
pca200e_ecd.bin2
|
||||
|
@ -1177,7 +1177,7 @@ static void baycom_probe(struct net_device *dev)
|
||||
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
|
||||
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &ax25_nocall, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN);
|
||||
dev->tx_queue_len = 16;
|
||||
|
||||
/* New style flags */
|
||||
|
@ -382,7 +382,7 @@ config SDLA
|
||||
|
||||
# Wan router core.
|
||||
config WAN_ROUTER_DRIVERS
|
||||
bool "WAN router drivers"
|
||||
tristate "WAN router drivers"
|
||||
depends on WAN && WAN_ROUTER
|
||||
---help---
|
||||
Connect LAN to WAN via Linux box.
|
||||
@ -393,7 +393,8 @@ config WAN_ROUTER_DRIVERS
|
||||
<file:Documentation/networking/wan-router.txt>.
|
||||
|
||||
Note that the answer to this question won't directly affect the
|
||||
kernel: saying N will just cause the configurator to skip all
|
||||
kernel except for how subordinate drivers may be built:
|
||||
saying N will just cause the configurator to skip all
|
||||
the questions about WAN router drivers.
|
||||
|
||||
If unsure, say N.
|
||||
|
@ -176,20 +176,20 @@ enum {
|
||||
};
|
||||
|
||||
/* DCCP features (RFC 4340 section 6.4) */
|
||||
enum {
|
||||
DCCPF_RESERVED = 0,
|
||||
DCCPF_CCID = 1,
|
||||
enum {
|
||||
DCCPF_RESERVED = 0,
|
||||
DCCPF_CCID = 1,
|
||||
DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */
|
||||
DCCPF_SEQUENCE_WINDOW = 3,
|
||||
DCCPF_SEQUENCE_WINDOW = 3,
|
||||
DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */
|
||||
DCCPF_ACK_RATIO = 5,
|
||||
DCCPF_SEND_ACK_VECTOR = 6,
|
||||
DCCPF_SEND_NDP_COUNT = 7,
|
||||
DCCPF_ACK_RATIO = 5,
|
||||
DCCPF_SEND_ACK_VECTOR = 6,
|
||||
DCCPF_SEND_NDP_COUNT = 7,
|
||||
DCCPF_MIN_CSUM_COVER = 8,
|
||||
DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */
|
||||
/* 10-127 reserved */
|
||||
DCCPF_MIN_CCID_SPECIFIC = 128,
|
||||
DCCPF_MAX_CCID_SPECIFIC = 255,
|
||||
/* 10-127 reserved */
|
||||
DCCPF_MIN_CCID_SPECIFIC = 128,
|
||||
DCCPF_MAX_CCID_SPECIFIC = 255,
|
||||
};
|
||||
|
||||
/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
|
||||
@ -427,7 +427,7 @@ struct dccp_service_list {
|
||||
};
|
||||
|
||||
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
|
||||
#define DCCP_SERVICE_CODE_IS_ABSENT 0
|
||||
#define DCCP_SERVICE_CODE_IS_ABSENT 0
|
||||
|
||||
static inline int dccp_list_has_service(const struct dccp_service_list *sl,
|
||||
const __be32 service)
|
||||
@ -436,7 +436,7 @@ static inline int dccp_list_has_service(const struct dccp_service_list *sl,
|
||||
u32 i = sl->dccpsl_nr;
|
||||
while (i--)
|
||||
if (sl->dccpsl_list[i] == service)
|
||||
return 1;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -511,7 +511,7 @@ struct dccp_sock {
|
||||
__u8 dccps_hc_tx_insert_options:1;
|
||||
struct timer_list dccps_xmit_timer;
|
||||
};
|
||||
|
||||
|
||||
static inline struct dccp_sock *dccp_sk(const struct sock *sk)
|
||||
{
|
||||
return (struct dccp_sock *)sk;
|
||||
|
@ -37,10 +37,14 @@ struct tfrc_rx_info {
|
||||
* @tfrctx_p: current loss event rate (5.4)
|
||||
* @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3)
|
||||
* @tfrctx_ipi: inter-packet interval (4.6)
|
||||
*
|
||||
* Note: X and X_recv are both maintained in units of 64 * bytes/second. This
|
||||
* enables a finer resolution of sending rates and avoids problems with
|
||||
* integer arithmetic; u32 is not sufficient as scaling consumes 6 bits.
|
||||
*/
|
||||
struct tfrc_tx_info {
|
||||
__u32 tfrctx_x;
|
||||
__u32 tfrctx_x_recv;
|
||||
__u64 tfrctx_x;
|
||||
__u64 tfrctx_x_recv;
|
||||
__u32 tfrctx_x_calc;
|
||||
__u32 tfrctx_rtt;
|
||||
__u32 tfrctx_p;
|
||||
|
@ -285,6 +285,8 @@ extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
|
||||
extern const ax25_address ax25_bcast;
|
||||
extern const ax25_address ax25_defaddr;
|
||||
extern const ax25_address null_ax25_address;
|
||||
extern char *ax2asc(char *buf, const ax25_address *);
|
||||
extern void asc2ax(ax25_address *addr, const char *callsign);
|
||||
extern int ax25cmp(const ax25_address *, const ax25_address *);
|
||||
extern int ax25digicmp(const ax25_digi *, const ax25_digi *);
|
||||
extern const unsigned char *ax25_addr_parse(const unsigned char *, int,
|
||||
|
@ -83,7 +83,7 @@ EXPORT_SYMBOL(ax2asc);
|
||||
*/
|
||||
void asc2ax(ax25_address *addr, const char *callsign)
|
||||
{
|
||||
char *s;
|
||||
const char *s;
|
||||
int n;
|
||||
|
||||
for (s = callsign, n = 0; n < 6; n++) {
|
||||
|
@ -242,22 +242,28 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
||||
|
||||
/* don't get messages out of order, and no recursion */
|
||||
if (skb_queue_len(&npinfo->txq) == 0 &&
|
||||
npinfo->poll_owner != smp_processor_id() &&
|
||||
netif_tx_trylock(dev)) {
|
||||
/* try until next clock tick */
|
||||
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
|
||||
if (!netif_queue_stopped(dev))
|
||||
status = dev->hard_start_xmit(skb, dev);
|
||||
npinfo->poll_owner != smp_processor_id()) {
|
||||
unsigned long flags;
|
||||
|
||||
if (status == NETDEV_TX_OK)
|
||||
break;
|
||||
local_irq_save(flags);
|
||||
if (netif_tx_trylock(dev)) {
|
||||
/* try until next clock tick */
|
||||
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
|
||||
tries > 0; --tries) {
|
||||
if (!netif_queue_stopped(dev))
|
||||
status = dev->hard_start_xmit(skb, dev);
|
||||
|
||||
/* tickle device maybe there is some cleanup */
|
||||
netpoll_poll(np);
|
||||
if (status == NETDEV_TX_OK)
|
||||
break;
|
||||
|
||||
udelay(USEC_PER_POLL);
|
||||
/* tickle device maybe there is some cleanup */
|
||||
netpoll_poll(np);
|
||||
|
||||
udelay(USEC_PER_POLL);
|
||||
}
|
||||
netif_tx_unlock(dev);
|
||||
}
|
||||
netif_tx_unlock(dev);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
if (status != NETDEV_TX_OK) {
|
||||
|
@ -223,7 +223,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
|
||||
gap = -new_head;
|
||||
}
|
||||
new_head += DCCP_MAX_ACKVEC_LEN;
|
||||
}
|
||||
}
|
||||
|
||||
av->dccpav_buf_head = new_head;
|
||||
|
||||
@ -336,7 +336,7 @@ out_duplicate:
|
||||
void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len)
|
||||
{
|
||||
dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len,
|
||||
(unsigned long long)ackno);
|
||||
(unsigned long long)ackno);
|
||||
|
||||
while (len--) {
|
||||
const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6;
|
||||
|
@ -43,8 +43,6 @@ struct ccid_operations {
|
||||
unsigned char* value);
|
||||
int (*ccid_hc_rx_insert_options)(struct sock *sk,
|
||||
struct sk_buff *skb);
|
||||
int (*ccid_hc_tx_insert_options)(struct sock *sk,
|
||||
struct sk_buff *skb);
|
||||
void (*ccid_hc_tx_packet_recv)(struct sock *sk,
|
||||
struct sk_buff *skb);
|
||||
int (*ccid_hc_tx_parse_options)(struct sock *sk,
|
||||
@ -146,14 +144,6 @@ static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int ccid_hc_tx_insert_options(struct ccid *ccid, struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (ccid->ccid_ops->ccid_hc_tx_insert_options != NULL)
|
||||
return ccid->ccid_ops->ccid_hc_tx_insert_options(sk, skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
@ -351,7 +351,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
|
||||
|
||||
while (seqp != hctx->ccid2hctx_seqh) {
|
||||
ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
|
||||
(unsigned long long)seqp->ccid2s_seq,
|
||||
(unsigned long long)seqp->ccid2s_seq,
|
||||
seqp->ccid2s_acked, seqp->ccid2s_sent);
|
||||
seqp = seqp->ccid2s_next;
|
||||
}
|
||||
@ -473,7 +473,7 @@ static inline void ccid2_new_ack(struct sock *sk,
|
||||
/* first measurement */
|
||||
if (hctx->ccid2hctx_srtt == -1) {
|
||||
ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
|
||||
r, jiffies,
|
||||
r, jiffies,
|
||||
(unsigned long long)seqp->ccid2s_seq);
|
||||
ccid2_change_srtt(hctx, r);
|
||||
hctx->ccid2hctx_rttvar = r >> 1;
|
||||
@ -518,8 +518,8 @@ static inline void ccid2_new_ack(struct sock *sk,
|
||||
hctx->ccid2hctx_lastrtt = jiffies;
|
||||
|
||||
ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
|
||||
hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar,
|
||||
hctx->ccid2hctx_rto, HZ, r);
|
||||
hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar,
|
||||
hctx->ccid2hctx_rto, HZ, r);
|
||||
hctx->ccid2hctx_sent = 0;
|
||||
}
|
||||
|
||||
@ -667,9 +667,9 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
/* new packet received or marked */
|
||||
if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
|
||||
!seqp->ccid2s_acked) {
|
||||
if (state ==
|
||||
if (state ==
|
||||
DCCP_ACKVEC_STATE_ECN_MARKED) {
|
||||
ccid2_congestion_event(hctx,
|
||||
ccid2_congestion_event(hctx,
|
||||
seqp);
|
||||
} else
|
||||
ccid2_new_ack(sk, seqp,
|
||||
|
@ -41,27 +41,6 @@
|
||||
#include "lib/tfrc.h"
|
||||
#include "ccid3.h"
|
||||
|
||||
/*
|
||||
* Reason for maths here is to avoid 32 bit overflow when a is big.
|
||||
* With this we get close to the limit.
|
||||
*/
|
||||
static u32 usecs_div(const u32 a, const u32 b)
|
||||
{
|
||||
const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
|
||||
a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
|
||||
a < (UINT_MAX / (USEC_PER_SEC / 100)) ? 100 :
|
||||
a < (UINT_MAX / (USEC_PER_SEC / 500)) ? 500 :
|
||||
a < (UINT_MAX / (USEC_PER_SEC / 1000)) ? 1000 :
|
||||
a < (UINT_MAX / (USEC_PER_SEC / 5000)) ? 5000 :
|
||||
a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
|
||||
a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
|
||||
100000;
|
||||
const u32 tmp = a * (USEC_PER_SEC / div);
|
||||
return (b >= 2 * div) ? tmp / (b / div) : tmp;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
|
||||
static int ccid3_debug;
|
||||
#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
|
||||
@ -108,8 +87,9 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
|
||||
{
|
||||
timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
|
||||
|
||||
/* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
|
||||
hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x);
|
||||
/* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
|
||||
hctx->ccid3hctx_t_ipi = scaled_div(hctx->ccid3hctx_s,
|
||||
hctx->ccid3hctx_x >> 6);
|
||||
|
||||
/* Update nominal send time with regard to the new t_ipi */
|
||||
timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
|
||||
@ -128,40 +108,44 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
|
||||
* X = max(min(2 * X, 2 * X_recv), s / R);
|
||||
* tld = now;
|
||||
*
|
||||
* Note: X and X_recv are both stored in units of 64 * bytes/second, to support
|
||||
* fine-grained resolution of sending rates. This requires scaling by 2^6
|
||||
* throughout the code. Only X_calc is unscaled (in bytes/second).
|
||||
*
|
||||
* If X has changed, we also update the scheduled send time t_now,
|
||||
* the inter-packet interval t_ipi, and the delta value.
|
||||
*/
|
||||
*/
|
||||
static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
|
||||
|
||||
{
|
||||
struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
|
||||
const __u32 old_x = hctx->ccid3hctx_x;
|
||||
const __u64 old_x = hctx->ccid3hctx_x;
|
||||
|
||||
if (hctx->ccid3hctx_p > 0) {
|
||||
hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
|
||||
hctx->ccid3hctx_rtt,
|
||||
hctx->ccid3hctx_p);
|
||||
hctx->ccid3hctx_x = max_t(u32, min(hctx->ccid3hctx_x_calc,
|
||||
hctx->ccid3hctx_x_recv * 2),
|
||||
hctx->ccid3hctx_s / TFRC_T_MBI);
|
||||
|
||||
} else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) >=
|
||||
hctx->ccid3hctx_rtt) {
|
||||
hctx->ccid3hctx_x = max(min(hctx->ccid3hctx_x_recv,
|
||||
hctx->ccid3hctx_x ) * 2,
|
||||
usecs_div(hctx->ccid3hctx_s,
|
||||
hctx->ccid3hctx_rtt) );
|
||||
hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6,
|
||||
hctx->ccid3hctx_x_recv * 2);
|
||||
hctx->ccid3hctx_x = max(hctx->ccid3hctx_x,
|
||||
(((__u64)hctx->ccid3hctx_s) << 6) /
|
||||
TFRC_T_MBI);
|
||||
|
||||
} else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) -
|
||||
(suseconds_t)hctx->ccid3hctx_rtt >= 0) {
|
||||
|
||||
hctx->ccid3hctx_x =
|
||||
max(2 * min(hctx->ccid3hctx_x, hctx->ccid3hctx_x_recv),
|
||||
scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
|
||||
hctx->ccid3hctx_rtt));
|
||||
hctx->ccid3hctx_t_ld = *now;
|
||||
} else
|
||||
ccid3_pr_debug("Not changing X\n");
|
||||
}
|
||||
|
||||
if (hctx->ccid3hctx_x != old_x)
|
||||
ccid3_update_send_time(hctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1)
|
||||
* @len: DCCP packet payload size in bytes
|
||||
* Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1)
|
||||
* @len: DCCP packet payload size in bytes
|
||||
*/
|
||||
static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
|
||||
{
|
||||
@ -178,6 +162,33 @@ static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Update Window Counter using the algorithm from [RFC 4342, 8.1].
|
||||
* The algorithm is not applicable if RTT < 4 microseconds.
|
||||
*/
|
||||
static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx,
|
||||
struct timeval *now)
|
||||
{
|
||||
suseconds_t delta;
|
||||
u32 quarter_rtts;
|
||||
|
||||
if (unlikely(hctx->ccid3hctx_rtt < 4)) /* avoid divide-by-zero */
|
||||
return;
|
||||
|
||||
delta = timeval_delta(now, &hctx->ccid3hctx_t_last_win_count);
|
||||
DCCP_BUG_ON(delta < 0);
|
||||
|
||||
quarter_rtts = (u32)delta / (hctx->ccid3hctx_rtt / 4);
|
||||
|
||||
if (quarter_rtts > 0) {
|
||||
hctx->ccid3hctx_t_last_win_count = *now;
|
||||
hctx->ccid3hctx_last_win_count += min_t(u32, quarter_rtts, 5);
|
||||
hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */
|
||||
|
||||
ccid3_pr_debug("now at %#X\n", hctx->ccid3hctx_last_win_count);
|
||||
}
|
||||
}
|
||||
|
||||
static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
|
||||
{
|
||||
struct sock *sk = (struct sock *)data;
|
||||
@ -191,20 +202,20 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
|
||||
goto restart_timer;
|
||||
}
|
||||
|
||||
ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
|
||||
ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
|
||||
ccid3_tx_state_name(hctx->ccid3hctx_state));
|
||||
|
||||
|
||||
switch (hctx->ccid3hctx_state) {
|
||||
case TFRC_SSTATE_NO_FBACK:
|
||||
/* RFC 3448, 4.4: Halve send rate directly */
|
||||
hctx->ccid3hctx_x = min_t(u32, hctx->ccid3hctx_x / 2,
|
||||
hctx->ccid3hctx_s / TFRC_T_MBI);
|
||||
hctx->ccid3hctx_x = max(hctx->ccid3hctx_x / 2,
|
||||
(((__u64)hctx->ccid3hctx_s) << 6) /
|
||||
TFRC_T_MBI);
|
||||
|
||||
ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
|
||||
"bytes/s\n",
|
||||
dccp_role(sk), sk,
|
||||
ccid3_pr_debug("%s(%p, state=%s), updated tx rate to %u "
|
||||
"bytes/s\n", dccp_role(sk), sk,
|
||||
ccid3_tx_state_name(hctx->ccid3hctx_state),
|
||||
hctx->ccid3hctx_x);
|
||||
(unsigned)(hctx->ccid3hctx_x >> 6));
|
||||
/* The value of R is still undefined and so we can not recompute
|
||||
* the timout value. Keep initial value as per [RFC 4342, 5]. */
|
||||
t_nfb = TFRC_INITIAL_TIMEOUT;
|
||||
@ -213,34 +224,46 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
|
||||
case TFRC_SSTATE_FBACK:
|
||||
/*
|
||||
* Check if IDLE since last timeout and recv rate is less than
|
||||
* 4 packets per RTT
|
||||
* 4 packets (in units of 64*bytes/sec) per RTT
|
||||
*/
|
||||
if (!hctx->ccid3hctx_idle ||
|
||||
(hctx->ccid3hctx_x_recv >=
|
||||
4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
|
||||
(hctx->ccid3hctx_x_recv >= 4 *
|
||||
scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
|
||||
hctx->ccid3hctx_rtt))) {
|
||||
struct timeval now;
|
||||
|
||||
ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
|
||||
ccid3_pr_debug("%s(%p, state=%s), not idle\n",
|
||||
dccp_role(sk), sk,
|
||||
ccid3_tx_state_name(hctx->ccid3hctx_state));
|
||||
/* Halve sending rate */
|
||||
ccid3_tx_state_name(hctx->ccid3hctx_state));
|
||||
|
||||
/* If (p == 0 || X_calc > 2 * X_recv)
|
||||
/*
|
||||
* Modify the cached value of X_recv [RFC 3448, 4.4]
|
||||
*
|
||||
* If (p == 0 || X_calc > 2 * X_recv)
|
||||
* X_recv = max(X_recv / 2, s / (2 * t_mbi));
|
||||
* Else
|
||||
* X_recv = X_calc / 4;
|
||||
*
|
||||
* Note that X_recv is scaled by 2^6 while X_calc is not
|
||||
*/
|
||||
BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
|
||||
|
||||
if (hctx->ccid3hctx_p == 0 ||
|
||||
hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
|
||||
hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
|
||||
hctx->ccid3hctx_s / (2 * TFRC_T_MBI));
|
||||
else
|
||||
hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
|
||||
(hctx->ccid3hctx_x_calc >
|
||||
(hctx->ccid3hctx_x_recv >> 5))) {
|
||||
|
||||
/* Update sending rate */
|
||||
dccp_timestamp(sk, &now);
|
||||
hctx->ccid3hctx_x_recv =
|
||||
max(hctx->ccid3hctx_x_recv / 2,
|
||||
(((__u64)hctx->ccid3hctx_s) << 6) /
|
||||
(2 * TFRC_T_MBI));
|
||||
|
||||
if (hctx->ccid3hctx_p == 0)
|
||||
dccp_timestamp(sk, &now);
|
||||
} else {
|
||||
hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc;
|
||||
hctx->ccid3hctx_x_recv <<= 4;
|
||||
}
|
||||
/* Now recalculate X [RFC 3448, 4.3, step (4)] */
|
||||
ccid3_hc_tx_update_x(sk, &now);
|
||||
}
|
||||
/*
|
||||
@ -251,7 +274,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
|
||||
t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
|
||||
break;
|
||||
case TFRC_SSTATE_NO_SENT:
|
||||
DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk);
|
||||
DCCP_BUG("%s(%p) - Illegal state NO_SENT", dccp_role(sk), sk);
|
||||
/* fall through */
|
||||
case TFRC_SSTATE_TERM:
|
||||
goto out;
|
||||
@ -277,9 +300,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
|
||||
struct dccp_tx_hist_entry *new_packet;
|
||||
struct timeval now;
|
||||
long delay;
|
||||
suseconds_t delay;
|
||||
|
||||
BUG_ON(hctx == NULL);
|
||||
|
||||
@ -291,34 +313,21 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
|
||||
if (unlikely(skb->len == 0))
|
||||
return -EBADMSG;
|
||||
|
||||
/* See if last packet allocated was not sent */
|
||||
new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
|
||||
if (new_packet == NULL || new_packet->dccphtx_sent) {
|
||||
new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (unlikely(new_packet == NULL)) {
|
||||
DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
|
||||
"send refused\n", dccp_role(sk), sk);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
|
||||
}
|
||||
|
||||
dccp_timestamp(sk, &now);
|
||||
|
||||
switch (hctx->ccid3hctx_state) {
|
||||
case TFRC_SSTATE_NO_SENT:
|
||||
sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
|
||||
jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
|
||||
(jiffies +
|
||||
usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
|
||||
hctx->ccid3hctx_last_win_count = 0;
|
||||
hctx->ccid3hctx_t_last_win_count = now;
|
||||
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
|
||||
|
||||
/* Set initial sending rate to 1 packet per second */
|
||||
/* Set initial sending rate X/s to 1pps (X is scaled by 2^6) */
|
||||
ccid3_hc_tx_update_s(hctx, skb->len);
|
||||
hctx->ccid3hctx_x = hctx->ccid3hctx_s;
|
||||
hctx->ccid3hctx_x = hctx->ccid3hctx_s;
|
||||
hctx->ccid3hctx_x <<= 6;
|
||||
|
||||
/* First timeout, according to [RFC 3448, 4.2], is 1 second */
|
||||
hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
|
||||
@ -332,77 +341,57 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
|
||||
case TFRC_SSTATE_FBACK:
|
||||
delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
|
||||
/*
|
||||
* Scheduling of packet transmissions [RFC 3448, 4.6]
|
||||
* Scheduling of packet transmissions [RFC 3448, 4.6]
|
||||
*
|
||||
* if (t_now > t_nom - delta)
|
||||
* // send the packet now
|
||||
* else
|
||||
* // send the packet in (t_nom - t_now) milliseconds.
|
||||
*/
|
||||
if (delay - (long)hctx->ccid3hctx_delta >= 0)
|
||||
if (delay - (suseconds_t)hctx->ccid3hctx_delta >= 0)
|
||||
return delay / 1000L;
|
||||
|
||||
ccid3_hc_tx_update_win_count(hctx, &now);
|
||||
break;
|
||||
case TFRC_SSTATE_TERM:
|
||||
DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
|
||||
DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* prepare to send now (add options etc.) */
|
||||
dp->dccps_hc_tx_insert_options = 1;
|
||||
new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval =
|
||||
hctx->ccid3hctx_last_win_count;
|
||||
DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
|
||||
|
||||
/* set the nominal send time for the next following packet */
|
||||
timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
|
||||
static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
|
||||
unsigned int len)
|
||||
{
|
||||
const struct dccp_sock *dp = dccp_sk(sk);
|
||||
struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
|
||||
struct timeval now;
|
||||
unsigned long quarter_rtt;
|
||||
struct dccp_tx_hist_entry *packet;
|
||||
|
||||
BUG_ON(hctx == NULL);
|
||||
|
||||
dccp_timestamp(sk, &now);
|
||||
|
||||
ccid3_hc_tx_update_s(hctx, len);
|
||||
|
||||
packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
|
||||
packet = dccp_tx_hist_entry_new(ccid3_tx_hist, GFP_ATOMIC);
|
||||
if (unlikely(packet == NULL)) {
|
||||
DCCP_WARN("packet doesn't exist in history!\n");
|
||||
return;
|
||||
}
|
||||
if (unlikely(packet->dccphtx_sent)) {
|
||||
DCCP_WARN("no unsent packet in history!\n");
|
||||
DCCP_CRIT("packet history - out of memory!");
|
||||
return;
|
||||
}
|
||||
dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, packet);
|
||||
|
||||
dccp_timestamp(sk, &now);
|
||||
packet->dccphtx_tstamp = now;
|
||||
packet->dccphtx_seqno = dp->dccps_gss;
|
||||
/*
|
||||
* Check if win_count have changed
|
||||
* Algorithm in "8.1. Window Counter Value" in RFC 4342.
|
||||
*/
|
||||
quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
|
||||
if (likely(hctx->ccid3hctx_rtt > 8))
|
||||
quarter_rtt /= hctx->ccid3hctx_rtt / 4;
|
||||
|
||||
if (quarter_rtt > 0) {
|
||||
hctx->ccid3hctx_t_last_win_count = now;
|
||||
hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
|
||||
min_t(unsigned long, quarter_rtt, 5)) % 16;
|
||||
ccid3_pr_debug("%s, sk=%p, window changed from "
|
||||
"%u to %u!\n",
|
||||
dccp_role(sk), sk,
|
||||
packet->dccphtx_ccval,
|
||||
hctx->ccid3hctx_last_win_count);
|
||||
}
|
||||
|
||||
hctx->ccid3hctx_idle = 0;
|
||||
packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
|
||||
packet->dccphtx_sent = 1;
|
||||
packet->dccphtx_seqno = dccp_sk(sk)->dccps_gss;
|
||||
packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
|
||||
packet->dccphtx_sent = 1;
|
||||
hctx->ccid3hctx_idle = 0;
|
||||
}
|
||||
|
||||
static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
@ -414,7 +403,7 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
struct timeval now;
|
||||
unsigned long t_nfb;
|
||||
u32 pinv;
|
||||
long r_sample, t_elapsed;
|
||||
suseconds_t r_sample, t_elapsed;
|
||||
|
||||
BUG_ON(hctx == NULL);
|
||||
|
||||
@ -430,44 +419,44 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
case TFRC_SSTATE_FBACK:
|
||||
/* get packet from history to look up t_recvdata */
|
||||
packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
|
||||
DCCP_SKB_CB(skb)->dccpd_ack_seq);
|
||||
DCCP_SKB_CB(skb)->dccpd_ack_seq);
|
||||
if (unlikely(packet == NULL)) {
|
||||
DCCP_WARN("%s(%p), seqno %llu(%s) doesn't exist "
|
||||
"in history!\n", dccp_role(sk), sk,
|
||||
(unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
|
||||
dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
|
||||
dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
|
||||
return;
|
||||
}
|
||||
|
||||
/* Update receive rate */
|
||||
/* Update receive rate in units of 64 * bytes/second */
|
||||
hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate;
|
||||
hctx->ccid3hctx_x_recv <<= 6;
|
||||
|
||||
/* Update loss event rate */
|
||||
pinv = opt_recv->ccid3or_loss_event_rate;
|
||||
if (pinv == ~0U || pinv == 0)
|
||||
if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */
|
||||
hctx->ccid3hctx_p = 0;
|
||||
else
|
||||
hctx->ccid3hctx_p = 1000000 / pinv;
|
||||
else /* can not exceed 100% */
|
||||
hctx->ccid3hctx_p = 1000000 / pinv;
|
||||
|
||||
dccp_timestamp(sk, &now);
|
||||
|
||||
/*
|
||||
* Calculate new round trip sample as per [RFC 3448, 4.3] by
|
||||
* R_sample = (now - t_recvdata) - t_elapsed
|
||||
* R_sample = (now - t_recvdata) - t_elapsed
|
||||
*/
|
||||
r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
|
||||
t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
|
||||
|
||||
if (unlikely(r_sample <= 0)) {
|
||||
DCCP_WARN("WARNING: R_sample (%ld) <= 0!\n", r_sample);
|
||||
r_sample = 0;
|
||||
} else if (unlikely(r_sample <= t_elapsed))
|
||||
DCCP_WARN("WARNING: r_sample=%ldus <= t_elapsed=%ldus\n",
|
||||
r_sample, t_elapsed);
|
||||
DCCP_BUG_ON(r_sample < 0);
|
||||
if (unlikely(r_sample <= t_elapsed))
|
||||
DCCP_WARN("WARNING: r_sample=%dus <= t_elapsed=%dus\n",
|
||||
(int)r_sample, (int)t_elapsed);
|
||||
else
|
||||
r_sample -= t_elapsed;
|
||||
CCID3_RTT_SANITY_CHECK(r_sample);
|
||||
|
||||
/* Update RTT estimate by
|
||||
/* Update RTT estimate by
|
||||
* If (No feedback recv)
|
||||
* R = R_sample;
|
||||
* Else
|
||||
@ -476,34 +465,45 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
* q is a constant, RFC 3448 recomments 0.9
|
||||
*/
|
||||
if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
|
||||
/* Use Larger Initial Windows [RFC 4342, sec. 5]
|
||||
* We deviate in that we use `s' instead of `MSS'. */
|
||||
u16 w_init = max( 4 * hctx->ccid3hctx_s,
|
||||
max(2 * hctx->ccid3hctx_s, 4380));
|
||||
/*
|
||||
* Larger Initial Windows [RFC 4342, sec. 5]
|
||||
* We deviate in that we use `s' instead of `MSS'.
|
||||
*/
|
||||
__u64 w_init = min(4 * hctx->ccid3hctx_s,
|
||||
max(2 * hctx->ccid3hctx_s, 4380));
|
||||
hctx->ccid3hctx_rtt = r_sample;
|
||||
hctx->ccid3hctx_x = usecs_div(w_init, r_sample);
|
||||
hctx->ccid3hctx_x = scaled_div(w_init << 6, r_sample);
|
||||
hctx->ccid3hctx_t_ld = now;
|
||||
|
||||
ccid3_update_send_time(hctx);
|
||||
|
||||
ccid3_pr_debug("%s(%p), s=%u, w_init=%u, "
|
||||
"R_sample=%ldus, X=%u\n", dccp_role(sk),
|
||||
sk, hctx->ccid3hctx_s, w_init, r_sample,
|
||||
hctx->ccid3hctx_x);
|
||||
ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, "
|
||||
"R_sample=%dus, X=%u\n", dccp_role(sk),
|
||||
sk, hctx->ccid3hctx_s, w_init,
|
||||
(int)r_sample,
|
||||
(unsigned)(hctx->ccid3hctx_x >> 6));
|
||||
|
||||
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
|
||||
} else {
|
||||
hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt +
|
||||
(u32)r_sample ) / 10;
|
||||
(u32)r_sample) / 10;
|
||||
|
||||
/* Update sending rate (step 4 of [RFC 3448, 4.3]) */
|
||||
if (hctx->ccid3hctx_p > 0)
|
||||
hctx->ccid3hctx_x_calc =
|
||||
tfrc_calc_x(hctx->ccid3hctx_s,
|
||||
hctx->ccid3hctx_rtt,
|
||||
hctx->ccid3hctx_p);
|
||||
ccid3_hc_tx_update_x(sk, &now);
|
||||
|
||||
ccid3_pr_debug("%s(%p), RTT=%uus (sample=%ldus), s=%u, "
|
||||
"p=%u, X_calc=%u, X=%u\n", dccp_role(sk),
|
||||
sk, hctx->ccid3hctx_rtt, r_sample,
|
||||
ccid3_pr_debug("%s(%p), RTT=%uus (sample=%dus), s=%u, "
|
||||
"p=%u, X_calc=%u, X_recv=%u, X=%u\n",
|
||||
dccp_role(sk),
|
||||
sk, hctx->ccid3hctx_rtt, (int)r_sample,
|
||||
hctx->ccid3hctx_s, hctx->ccid3hctx_p,
|
||||
hctx->ccid3hctx_x_calc,
|
||||
hctx->ccid3hctx_x);
|
||||
(unsigned)(hctx->ccid3hctx_x_recv >> 6),
|
||||
(unsigned)(hctx->ccid3hctx_x >> 6));
|
||||
}
|
||||
|
||||
/* unschedule no feedback timer */
|
||||
@ -513,57 +513,48 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
dccp_tx_hist_purge_older(ccid3_tx_hist,
|
||||
&hctx->ccid3hctx_hist, packet);
|
||||
/*
|
||||
* As we have calculated new ipi, delta, t_nom it is possible that
|
||||
* we now can send a packet, so wake up dccp_wait_for_ccid
|
||||
* As we have calculated new ipi, delta, t_nom it is possible
|
||||
* that we now can send a packet, so wake up dccp_wait_for_ccid
|
||||
*/
|
||||
sk->sk_write_space(sk);
|
||||
|
||||
/*
|
||||
* Update timeout interval for the nofeedback timer.
|
||||
* We use a configuration option to increase the lower bound.
|
||||
* This can help avoid triggering the nofeedback timer too often
|
||||
* ('spinning') on LANs with small RTTs.
|
||||
* This can help avoid triggering the nofeedback timer too
|
||||
* often ('spinning') on LANs with small RTTs.
|
||||
*/
|
||||
hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
|
||||
CONFIG_IP_DCCP_CCID3_RTO *
|
||||
(USEC_PER_SEC/1000) );
|
||||
(USEC_PER_SEC/1000));
|
||||
/*
|
||||
* Schedule no feedback timer to expire in
|
||||
* max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
|
||||
*/
|
||||
t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
|
||||
|
||||
ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
|
||||
"expire in %lu jiffies (%luus)\n",
|
||||
dccp_role(sk), sk,
|
||||
usecs_to_jiffies(t_nfb), t_nfb);
|
||||
|
||||
sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
|
||||
ccid3_pr_debug("%s(%p), Scheduled no feedback timer to "
|
||||
"expire in %lu jiffies (%luus)\n",
|
||||
dccp_role(sk),
|
||||
sk, usecs_to_jiffies(t_nfb), t_nfb);
|
||||
|
||||
sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
|
||||
jiffies + usecs_to_jiffies(t_nfb));
|
||||
|
||||
/* set idle flag */
|
||||
hctx->ccid3hctx_idle = 1;
|
||||
hctx->ccid3hctx_idle = 1;
|
||||
break;
|
||||
case TFRC_SSTATE_NO_SENT:
|
||||
if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT)
|
||||
DCCP_WARN("Illegal ACK received - no packet sent\n");
|
||||
/*
|
||||
* XXX when implementing bidirectional rx/tx check this again
|
||||
*/
|
||||
DCCP_WARN("Illegal ACK received - no packet sent\n");
|
||||
/* fall through */
|
||||
case TFRC_SSTATE_TERM: /* ignore feedback when closing */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
|
||||
|
||||
BUG_ON(hctx == NULL);
|
||||
|
||||
if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
|
||||
DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
|
||||
unsigned char len, u16 idx,
|
||||
unsigned char *value)
|
||||
@ -588,13 +579,14 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
|
||||
switch (option) {
|
||||
case TFRC_OPT_LOSS_EVENT_RATE:
|
||||
if (unlikely(len != 4)) {
|
||||
DCCP_WARN("%s, sk=%p, invalid len %d "
|
||||
DCCP_WARN("%s(%p), invalid len %d "
|
||||
"for TFRC_OPT_LOSS_EVENT_RATE\n",
|
||||
dccp_role(sk), sk, len);
|
||||
rc = -EINVAL;
|
||||
} else {
|
||||
opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
|
||||
ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
|
||||
opt_recv->ccid3or_loss_event_rate =
|
||||
ntohl(*(__be32 *)value);
|
||||
ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
|
||||
dccp_role(sk), sk,
|
||||
opt_recv->ccid3or_loss_event_rate);
|
||||
}
|
||||
@ -602,20 +594,21 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
|
||||
case TFRC_OPT_LOSS_INTERVALS:
|
||||
opt_recv->ccid3or_loss_intervals_idx = idx;
|
||||
opt_recv->ccid3or_loss_intervals_len = len;
|
||||
ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n",
|
||||
ccid3_pr_debug("%s(%p), LOSS_INTERVALS=(%u, %u)\n",
|
||||
dccp_role(sk), sk,
|
||||
opt_recv->ccid3or_loss_intervals_idx,
|
||||
opt_recv->ccid3or_loss_intervals_len);
|
||||
break;
|
||||
case TFRC_OPT_RECEIVE_RATE:
|
||||
if (unlikely(len != 4)) {
|
||||
DCCP_WARN("%s, sk=%p, invalid len %d "
|
||||
DCCP_WARN("%s(%p), invalid len %d "
|
||||
"for TFRC_OPT_RECEIVE_RATE\n",
|
||||
dccp_role(sk), sk, len);
|
||||
rc = -EINVAL;
|
||||
} else {
|
||||
opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
|
||||
ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
|
||||
opt_recv->ccid3or_receive_rate =
|
||||
ntohl(*(__be32 *)value);
|
||||
ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
|
||||
dccp_role(sk), sk,
|
||||
opt_recv->ccid3or_receive_rate);
|
||||
}
|
||||
@ -630,10 +623,12 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
|
||||
struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
|
||||
|
||||
hctx->ccid3hctx_s = 0;
|
||||
hctx->ccid3hctx_rtt = 0;
|
||||
hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
|
||||
INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
|
||||
|
||||
hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
|
||||
hctx->ccid3hctx_no_feedback_timer.function =
|
||||
ccid3_hc_tx_no_feedback_timer;
|
||||
hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
|
||||
init_timer(&hctx->ccid3hctx_no_feedback_timer);
|
||||
|
||||
@ -698,8 +693,9 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
struct dccp_rx_hist_entry *packet;
|
||||
struct timeval now;
|
||||
suseconds_t delta;
|
||||
|
||||
ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
|
||||
ccid3_pr_debug("%s(%p) - entry \n", dccp_role(sk), sk);
|
||||
|
||||
dccp_timestamp(sk, &now);
|
||||
|
||||
@ -707,21 +703,21 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
|
||||
case TFRC_RSTATE_NO_DATA:
|
||||
hcrx->ccid3hcrx_x_recv = 0;
|
||||
break;
|
||||
case TFRC_RSTATE_DATA: {
|
||||
const u32 delta = timeval_delta(&now,
|
||||
&hcrx->ccid3hcrx_tstamp_last_feedback);
|
||||
hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv,
|
||||
delta);
|
||||
}
|
||||
case TFRC_RSTATE_DATA:
|
||||
delta = timeval_delta(&now,
|
||||
&hcrx->ccid3hcrx_tstamp_last_feedback);
|
||||
DCCP_BUG_ON(delta < 0);
|
||||
hcrx->ccid3hcrx_x_recv =
|
||||
scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
|
||||
break;
|
||||
case TFRC_RSTATE_TERM:
|
||||
DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
|
||||
DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
|
||||
return;
|
||||
}
|
||||
|
||||
packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
|
||||
if (unlikely(packet == NULL)) {
|
||||
DCCP_WARN("%s, sk=%p, no data packet in history!\n",
|
||||
DCCP_WARN("%s(%p), no data packet in history!\n",
|
||||
dccp_role(sk), sk);
|
||||
return;
|
||||
}
|
||||
@ -730,13 +726,19 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
|
||||
hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
|
||||
hcrx->ccid3hcrx_bytes_recv = 0;
|
||||
|
||||
/* Convert to multiples of 10us */
|
||||
hcrx->ccid3hcrx_elapsed_time =
|
||||
timeval_delta(&now, &packet->dccphrx_tstamp) / 10;
|
||||
/* Elapsed time information [RFC 4340, 13.2] in units of 10 * usecs */
|
||||
delta = timeval_delta(&now, &packet->dccphrx_tstamp);
|
||||
DCCP_BUG_ON(delta < 0);
|
||||
hcrx->ccid3hcrx_elapsed_time = delta / 10;
|
||||
|
||||
if (hcrx->ccid3hcrx_p == 0)
|
||||
hcrx->ccid3hcrx_pinv = ~0;
|
||||
else
|
||||
hcrx->ccid3hcrx_pinv = ~0U; /* see RFC 4342, 8.5 */
|
||||
else if (hcrx->ccid3hcrx_p > 1000000) {
|
||||
DCCP_WARN("p (%u) > 100%%\n", hcrx->ccid3hcrx_p);
|
||||
hcrx->ccid3hcrx_pinv = 1; /* use 100% in this case */
|
||||
} else
|
||||
hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
|
||||
|
||||
dp->dccps_hc_rx_insert_options = 1;
|
||||
dccp_send_ack(sk);
|
||||
}
|
||||
@ -764,9 +766,9 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
|
||||
hcrx->ccid3hcrx_elapsed_time)) ||
|
||||
dccp_insert_option_timestamp(sk, skb) ||
|
||||
dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
|
||||
&pinv, sizeof(pinv)) ||
|
||||
&pinv, sizeof(pinv)) ||
|
||||
dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
|
||||
&x_recv, sizeof(x_recv)))
|
||||
&x_recv, sizeof(x_recv)))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
@ -780,12 +782,13 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
|
||||
{
|
||||
struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
|
||||
struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
|
||||
u32 rtt, delta, x_recv, fval, p, tmp2;
|
||||
u32 x_recv, p;
|
||||
suseconds_t rtt, delta;
|
||||
struct timeval tstamp = { 0, };
|
||||
int interval = 0;
|
||||
int win_count = 0;
|
||||
int step = 0;
|
||||
u64 tmp1;
|
||||
u64 fval;
|
||||
|
||||
list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
|
||||
dccphrx_node) {
|
||||
@ -810,13 +813,13 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
|
||||
}
|
||||
|
||||
if (unlikely(step == 0)) {
|
||||
DCCP_WARN("%s, sk=%p, packet history has no data packets!\n",
|
||||
DCCP_WARN("%s(%p), packet history has no data packets!\n",
|
||||
dccp_role(sk), sk);
|
||||
return ~0;
|
||||
}
|
||||
|
||||
if (unlikely(interval == 0)) {
|
||||
DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0."
|
||||
DCCP_WARN("%s(%p), Could not find a win_count interval > 0."
|
||||
"Defaulting to 1\n", dccp_role(sk), sk);
|
||||
interval = 1;
|
||||
}
|
||||
@ -825,41 +828,51 @@ found:
|
||||
DCCP_CRIT("tail is null\n");
|
||||
return ~0;
|
||||
}
|
||||
rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
|
||||
ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
|
||||
dccp_role(sk), sk, rtt);
|
||||
|
||||
if (rtt == 0) {
|
||||
DCCP_WARN("RTT==0, setting to 1\n");
|
||||
rtt = 1;
|
||||
delta = timeval_delta(&tstamp, &tail->dccphrx_tstamp);
|
||||
DCCP_BUG_ON(delta < 0);
|
||||
|
||||
rtt = delta * 4 / interval;
|
||||
ccid3_pr_debug("%s(%p), approximated RTT to %dus\n",
|
||||
dccp_role(sk), sk, (int)rtt);
|
||||
|
||||
/*
|
||||
* Determine the length of the first loss interval via inverse lookup.
|
||||
* Assume that X_recv can be computed by the throughput equation
|
||||
* s
|
||||
* X_recv = --------
|
||||
* R * fval
|
||||
* Find some p such that f(p) = fval; return 1/p [RFC 3448, 6.3.1].
|
||||
*/
|
||||
if (rtt == 0) { /* would result in divide-by-zero */
|
||||
DCCP_WARN("RTT==0, returning 1/p = 1\n");
|
||||
return 1000000;
|
||||
}
|
||||
|
||||
dccp_timestamp(sk, &tstamp);
|
||||
delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
|
||||
x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
|
||||
DCCP_BUG_ON(delta <= 0);
|
||||
|
||||
if (x_recv == 0)
|
||||
x_recv = hcrx->ccid3hcrx_x_recv;
|
||||
|
||||
tmp1 = (u64)x_recv * (u64)rtt;
|
||||
do_div(tmp1,10000000);
|
||||
tmp2 = (u32)tmp1;
|
||||
|
||||
if (!tmp2) {
|
||||
DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt);
|
||||
return ~0;
|
||||
x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
|
||||
if (x_recv == 0) { /* would also trigger divide-by-zero */
|
||||
DCCP_WARN("X_recv==0\n");
|
||||
if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) {
|
||||
DCCP_BUG("stored value of X_recv is zero");
|
||||
return 1000000;
|
||||
}
|
||||
}
|
||||
|
||||
fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
|
||||
/* do not alter order above or you will get overflow on 32 bit */
|
||||
fval = scaled_div(hcrx->ccid3hcrx_s, rtt);
|
||||
fval = scaled_div32(fval, x_recv);
|
||||
p = tfrc_calc_x_reverse_lookup(fval);
|
||||
ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied "
|
||||
|
||||
ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
|
||||
"loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
|
||||
|
||||
if (p == 0)
|
||||
return ~0;
|
||||
else
|
||||
return 1000000 / p;
|
||||
return 1000000 / p;
|
||||
}
|
||||
|
||||
static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
|
||||
@ -913,7 +926,8 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
|
||||
struct dccp_rx_hist_entry *packet)
|
||||
{
|
||||
struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
|
||||
struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
|
||||
struct dccp_rx_hist_entry *rx_hist =
|
||||
dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
|
||||
u64 seqno = packet->dccphrx_seqno;
|
||||
u64 tmp_seqno;
|
||||
int loss = 0;
|
||||
@ -941,7 +955,7 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
|
||||
dccp_inc_seqno(&tmp_seqno);
|
||||
while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
|
||||
tmp_seqno, &ccval)) {
|
||||
hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
|
||||
hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
|
||||
hcrx->ccid3hcrx_ccval_nonloss = ccval;
|
||||
dccp_inc_seqno(&tmp_seqno);
|
||||
}
|
||||
@ -967,7 +981,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
const struct dccp_options_received *opt_recv;
|
||||
struct dccp_rx_hist_entry *packet;
|
||||
struct timeval now;
|
||||
u32 p_prev, rtt_prev, r_sample, t_elapsed;
|
||||
u32 p_prev, rtt_prev;
|
||||
suseconds_t r_sample, t_elapsed;
|
||||
int loss, payload_size;
|
||||
|
||||
BUG_ON(hcrx == NULL);
|
||||
@ -987,11 +1002,13 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
r_sample = timeval_usecs(&now);
|
||||
t_elapsed = opt_recv->dccpor_elapsed_time * 10;
|
||||
|
||||
DCCP_BUG_ON(r_sample < 0);
|
||||
if (unlikely(r_sample <= t_elapsed))
|
||||
DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n",
|
||||
DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n",
|
||||
r_sample, t_elapsed);
|
||||
else
|
||||
r_sample -= t_elapsed;
|
||||
CCID3_RTT_SANITY_CHECK(r_sample);
|
||||
|
||||
if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
|
||||
hcrx->ccid3hcrx_rtt = r_sample;
|
||||
@ -1000,8 +1017,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
r_sample / 10;
|
||||
|
||||
if (rtt_prev != hcrx->ccid3hcrx_rtt)
|
||||
ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
|
||||
dccp_role(sk), hcrx->ccid3hcrx_rtt,
|
||||
ccid3_pr_debug("%s(%p), New RTT=%uus, elapsed time=%u\n",
|
||||
dccp_role(sk), sk, hcrx->ccid3hcrx_rtt,
|
||||
opt_recv->dccpor_elapsed_time);
|
||||
break;
|
||||
case DCCP_PKT_DATA:
|
||||
@ -1013,7 +1030,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
|
||||
skb, GFP_ATOMIC);
|
||||
if (unlikely(packet == NULL)) {
|
||||
DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
|
||||
DCCP_WARN("%s(%p), Not enough mem to add rx packet "
|
||||
"to history, consider it lost!\n", dccp_role(sk), sk);
|
||||
return;
|
||||
}
|
||||
@ -1028,9 +1045,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
switch (hcrx->ccid3hcrx_state) {
|
||||
case TFRC_RSTATE_NO_DATA:
|
||||
ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
|
||||
"feedback\n",
|
||||
dccp_role(sk), sk,
|
||||
ccid3_pr_debug("%s(%p, state=%s), skb=%p, sending initial "
|
||||
"feedback\n", dccp_role(sk), sk,
|
||||
dccp_state_name(sk->sk_state), skb);
|
||||
ccid3_hc_rx_send_feedback(sk);
|
||||
ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
|
||||
@ -1041,19 +1057,19 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
break;
|
||||
|
||||
dccp_timestamp(sk, &now);
|
||||
if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >=
|
||||
hcrx->ccid3hcrx_rtt) {
|
||||
if ((timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) -
|
||||
(suseconds_t)hcrx->ccid3hcrx_rtt) >= 0) {
|
||||
hcrx->ccid3hcrx_tstamp_last_ack = now;
|
||||
ccid3_hc_rx_send_feedback(sk);
|
||||
}
|
||||
return;
|
||||
case TFRC_RSTATE_TERM:
|
||||
DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
|
||||
DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Dealing with packet loss */
|
||||
ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
|
||||
ccid3_pr_debug("%s(%p, state=%s), data loss! Reacting...\n",
|
||||
dccp_role(sk), sk, dccp_state_name(sk->sk_state));
|
||||
|
||||
p_prev = hcrx->ccid3hcrx_p;
|
||||
@ -1078,7 +1094,7 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
|
||||
{
|
||||
struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
|
||||
|
||||
ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
|
||||
ccid3_pr_debug("entry\n");
|
||||
|
||||
hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
|
||||
INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
|
||||
@ -1086,7 +1102,7 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
|
||||
dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
|
||||
hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
|
||||
hcrx->ccid3hcrx_s = 0;
|
||||
hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */
|
||||
hcrx->ccid3hcrx_rtt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1115,9 +1131,9 @@ static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
|
||||
|
||||
BUG_ON(hcrx == NULL);
|
||||
|
||||
info->tcpi_ca_state = hcrx->ccid3hcrx_state;
|
||||
info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
|
||||
info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
|
||||
info->tcpi_ca_state = hcrx->ccid3hcrx_state;
|
||||
info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
|
||||
info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
|
||||
}
|
||||
|
||||
static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
|
||||
@ -1198,7 +1214,6 @@ static struct ccid_operations ccid3 = {
|
||||
.ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
|
||||
.ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
|
||||
.ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
|
||||
.ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
|
||||
.ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
|
||||
.ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
|
||||
.ccid_hc_rx_init = ccid3_hc_rx_init,
|
||||
@ -1210,7 +1225,7 @@ static struct ccid_operations ccid3 = {
|
||||
.ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
|
||||
.ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
|
||||
module_param(ccid3_debug, int, 0444);
|
||||
MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
|
||||
@ -1233,7 +1248,7 @@ static __init int ccid3_module_init(void)
|
||||
goto out_free_tx;
|
||||
|
||||
rc = ccid_register(&ccid3);
|
||||
if (rc != 0)
|
||||
if (rc != 0)
|
||||
goto out_free_loss_interval_history;
|
||||
out:
|
||||
return rc;
|
||||
|
@ -51,6 +51,16 @@
|
||||
/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
|
||||
#define TFRC_T_MBI 64
|
||||
|
||||
/* What we think is a reasonable upper limit on RTT values */
|
||||
#define CCID3_SANE_RTT_MAX ((suseconds_t)(4 * USEC_PER_SEC))
|
||||
|
||||
#define CCID3_RTT_SANITY_CHECK(rtt) do { \
|
||||
if (rtt > CCID3_SANE_RTT_MAX) { \
|
||||
DCCP_CRIT("RTT (%d) too large, substituting %d", \
|
||||
(int)rtt, (int)CCID3_SANE_RTT_MAX); \
|
||||
rtt = CCID3_SANE_RTT_MAX; \
|
||||
} } while (0)
|
||||
|
||||
enum ccid3_options {
|
||||
TFRC_OPT_LOSS_EVENT_RATE = 192,
|
||||
TFRC_OPT_LOSS_INTERVALS = 193,
|
||||
@ -67,7 +77,7 @@ struct ccid3_options_received {
|
||||
|
||||
/* TFRC sender states */
|
||||
enum ccid3_hc_tx_states {
|
||||
TFRC_SSTATE_NO_SENT = 1,
|
||||
TFRC_SSTATE_NO_SENT = 1,
|
||||
TFRC_SSTATE_NO_FBACK,
|
||||
TFRC_SSTATE_FBACK,
|
||||
TFRC_SSTATE_TERM,
|
||||
@ -75,23 +85,23 @@ enum ccid3_hc_tx_states {
|
||||
|
||||
/** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
|
||||
*
|
||||
* @ccid3hctx_x - Current sending rate
|
||||
* @ccid3hctx_x_recv - Receive rate
|
||||
* @ccid3hctx_x_calc - Calculated send rate (RFC 3448, 3.1)
|
||||
* @ccid3hctx_x - Current sending rate in 64 * bytes per second
|
||||
* @ccid3hctx_x_recv - Receive rate in 64 * bytes per second
|
||||
* @ccid3hctx_x_calc - Calculated rate in bytes per second
|
||||
* @ccid3hctx_rtt - Estimate of current round trip time in usecs
|
||||
* @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000
|
||||
* @ccid3hctx_s - Packet size
|
||||
* @ccid3hctx_t_rto - Retransmission Timeout (RFC 3448, 3.1)
|
||||
* @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6)
|
||||
* @ccid3hctx_s - Packet size in bytes
|
||||
* @ccid3hctx_t_rto - Nofeedback Timer setting in usecs
|
||||
* @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) in usecs
|
||||
* @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states
|
||||
* @ccid3hctx_last_win_count - Last window counter sent
|
||||
* @ccid3hctx_t_last_win_count - Timestamp of earliest packet
|
||||
* with last_win_count value sent
|
||||
* with last_win_count value sent
|
||||
* @ccid3hctx_no_feedback_timer - Handle to no feedback timer
|
||||
* @ccid3hctx_idle - Flag indicating that sender is idling
|
||||
* @ccid3hctx_t_ld - Time last doubled during slow start
|
||||
* @ccid3hctx_t_nom - Nominal send time of next packet
|
||||
* @ccid3hctx_delta - Send timer delta
|
||||
* @ccid3hctx_delta - Send timer delta (RFC 3448, 4.6) in usecs
|
||||
* @ccid3hctx_hist - Packet history
|
||||
* @ccid3hctx_options_received - Parsed set of retrieved options
|
||||
*/
|
||||
@ -105,7 +115,7 @@ struct ccid3_hc_tx_sock {
|
||||
#define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto
|
||||
#define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi
|
||||
u16 ccid3hctx_s;
|
||||
enum ccid3_hc_tx_states ccid3hctx_state:8;
|
||||
enum ccid3_hc_tx_states ccid3hctx_state:8;
|
||||
u8 ccid3hctx_last_win_count;
|
||||
u8 ccid3hctx_idle;
|
||||
struct timeval ccid3hctx_t_last_win_count;
|
||||
@ -119,7 +129,7 @@ struct ccid3_hc_tx_sock {
|
||||
|
||||
/* TFRC receiver states */
|
||||
enum ccid3_hc_rx_states {
|
||||
TFRC_RSTATE_NO_DATA = 1,
|
||||
TFRC_RSTATE_NO_DATA = 1,
|
||||
TFRC_RSTATE_DATA,
|
||||
TFRC_RSTATE_TERM = 127,
|
||||
};
|
||||
@ -147,18 +157,18 @@ struct ccid3_hc_rx_sock {
|
||||
#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv
|
||||
#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt
|
||||
#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p
|
||||
u64 ccid3hcrx_seqno_nonloss:48,
|
||||
u64 ccid3hcrx_seqno_nonloss:48,
|
||||
ccid3hcrx_ccval_nonloss:4,
|
||||
ccid3hcrx_ccval_last_counter:4;
|
||||
enum ccid3_hc_rx_states ccid3hcrx_state:8;
|
||||
u32 ccid3hcrx_bytes_recv;
|
||||
struct timeval ccid3hcrx_tstamp_last_feedback;
|
||||
struct timeval ccid3hcrx_tstamp_last_ack;
|
||||
u32 ccid3hcrx_bytes_recv;
|
||||
struct timeval ccid3hcrx_tstamp_last_feedback;
|
||||
struct timeval ccid3hcrx_tstamp_last_ack;
|
||||
struct list_head ccid3hcrx_hist;
|
||||
struct list_head ccid3hcrx_li_hist;
|
||||
u16 ccid3hcrx_s;
|
||||
u32 ccid3hcrx_pinv;
|
||||
u32 ccid3hcrx_elapsed_time;
|
||||
u16 ccid3hcrx_s;
|
||||
u32 ccid3hcrx_pinv;
|
||||
u32 ccid3hcrx_elapsed_time;
|
||||
};
|
||||
|
||||
static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
|
||||
|
@ -36,9 +36,100 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "packet_history.h"
|
||||
|
||||
/*
|
||||
* Transmitter History Routines
|
||||
*/
|
||||
struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
|
||||
{
|
||||
struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
|
||||
static const char dccp_tx_hist_mask[] = "tx_hist_%s";
|
||||
char *slab_name;
|
||||
|
||||
if (hist == NULL)
|
||||
goto out;
|
||||
|
||||
slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
|
||||
GFP_ATOMIC);
|
||||
if (slab_name == NULL)
|
||||
goto out_free_hist;
|
||||
|
||||
sprintf(slab_name, dccp_tx_hist_mask, name);
|
||||
hist->dccptxh_slab = kmem_cache_create(slab_name,
|
||||
sizeof(struct dccp_tx_hist_entry),
|
||||
0, SLAB_HWCACHE_ALIGN,
|
||||
NULL, NULL);
|
||||
if (hist->dccptxh_slab == NULL)
|
||||
goto out_free_slab_name;
|
||||
out:
|
||||
return hist;
|
||||
out_free_slab_name:
|
||||
kfree(slab_name);
|
||||
out_free_hist:
|
||||
kfree(hist);
|
||||
hist = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
|
||||
|
||||
void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
|
||||
{
|
||||
const char* name = kmem_cache_name(hist->dccptxh_slab);
|
||||
|
||||
kmem_cache_destroy(hist->dccptxh_slab);
|
||||
kfree(name);
|
||||
kfree(hist);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
|
||||
|
||||
struct dccp_tx_hist_entry *
|
||||
dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
|
||||
{
|
||||
struct dccp_tx_hist_entry *packet = NULL, *entry;
|
||||
|
||||
list_for_each_entry(entry, list, dccphtx_node)
|
||||
if (entry->dccphtx_seqno == seq) {
|
||||
packet = entry;
|
||||
break;
|
||||
}
|
||||
|
||||
return packet;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
|
||||
|
||||
void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
|
||||
{
|
||||
struct dccp_tx_hist_entry *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, dccphtx_node) {
|
||||
list_del_init(&entry->dccphtx_node);
|
||||
dccp_tx_hist_entry_delete(hist, entry);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
|
||||
|
||||
void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
|
||||
struct list_head *list,
|
||||
struct dccp_tx_hist_entry *packet)
|
||||
{
|
||||
struct dccp_tx_hist_entry *next;
|
||||
|
||||
list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) {
|
||||
list_del_init(&packet->dccphtx_node);
|
||||
dccp_tx_hist_entry_delete(hist, packet);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older);
|
||||
|
||||
/*
|
||||
* Receiver History Routines
|
||||
*/
|
||||
struct dccp_rx_hist *dccp_rx_hist_new(const char *name)
|
||||
{
|
||||
struct dccp_rx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
|
||||
@ -83,18 +174,24 @@ void dccp_rx_hist_delete(struct dccp_rx_hist *hist)
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_rx_hist_delete);
|
||||
|
||||
void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list)
|
||||
int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
|
||||
u8 *ccval)
|
||||
{
|
||||
struct dccp_rx_hist_entry *entry, *next;
|
||||
struct dccp_rx_hist_entry *packet = NULL, *entry;
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, dccphrx_node) {
|
||||
list_del_init(&entry->dccphrx_node);
|
||||
kmem_cache_free(hist->dccprxh_slab, entry);
|
||||
}
|
||||
list_for_each_entry(entry, list, dccphrx_node)
|
||||
if (entry->dccphrx_seqno == seq) {
|
||||
packet = entry;
|
||||
break;
|
||||
}
|
||||
|
||||
if (packet)
|
||||
*ccval = packet->dccphrx_ccval;
|
||||
|
||||
return packet != NULL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_rx_hist_purge);
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
|
||||
struct dccp_rx_hist_entry *
|
||||
dccp_rx_hist_find_data_packet(const struct list_head *list)
|
||||
{
|
||||
@ -184,110 +281,18 @@ void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
|
||||
|
||||
struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
|
||||
void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list)
|
||||
{
|
||||
struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
|
||||
static const char dccp_tx_hist_mask[] = "tx_hist_%s";
|
||||
char *slab_name;
|
||||
struct dccp_rx_hist_entry *entry, *next;
|
||||
|
||||
if (hist == NULL)
|
||||
goto out;
|
||||
|
||||
slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
|
||||
GFP_ATOMIC);
|
||||
if (slab_name == NULL)
|
||||
goto out_free_hist;
|
||||
|
||||
sprintf(slab_name, dccp_tx_hist_mask, name);
|
||||
hist->dccptxh_slab = kmem_cache_create(slab_name,
|
||||
sizeof(struct dccp_tx_hist_entry),
|
||||
0, SLAB_HWCACHE_ALIGN,
|
||||
NULL, NULL);
|
||||
if (hist->dccptxh_slab == NULL)
|
||||
goto out_free_slab_name;
|
||||
out:
|
||||
return hist;
|
||||
out_free_slab_name:
|
||||
kfree(slab_name);
|
||||
out_free_hist:
|
||||
kfree(hist);
|
||||
hist = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
|
||||
|
||||
void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
|
||||
{
|
||||
const char* name = kmem_cache_name(hist->dccptxh_slab);
|
||||
|
||||
kmem_cache_destroy(hist->dccptxh_slab);
|
||||
kfree(name);
|
||||
kfree(hist);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
|
||||
|
||||
struct dccp_tx_hist_entry *
|
||||
dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
|
||||
{
|
||||
struct dccp_tx_hist_entry *packet = NULL, *entry;
|
||||
|
||||
list_for_each_entry(entry, list, dccphtx_node)
|
||||
if (entry->dccphtx_seqno == seq) {
|
||||
packet = entry;
|
||||
break;
|
||||
}
|
||||
|
||||
return packet;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
|
||||
|
||||
int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
|
||||
u8 *ccval)
|
||||
{
|
||||
struct dccp_rx_hist_entry *packet = NULL, *entry;
|
||||
|
||||
list_for_each_entry(entry, list, dccphrx_node)
|
||||
if (entry->dccphrx_seqno == seq) {
|
||||
packet = entry;
|
||||
break;
|
||||
}
|
||||
|
||||
if (packet)
|
||||
*ccval = packet->dccphrx_ccval;
|
||||
|
||||
return packet != NULL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
|
||||
|
||||
void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
|
||||
struct list_head *list,
|
||||
struct dccp_tx_hist_entry *packet)
|
||||
{
|
||||
struct dccp_tx_hist_entry *next;
|
||||
|
||||
list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) {
|
||||
list_del_init(&packet->dccphtx_node);
|
||||
dccp_tx_hist_entry_delete(hist, packet);
|
||||
list_for_each_entry_safe(entry, next, list, dccphrx_node) {
|
||||
list_del_init(&entry->dccphrx_node);
|
||||
kmem_cache_free(hist->dccprxh_slab, entry);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older);
|
||||
EXPORT_SYMBOL_GPL(dccp_rx_hist_purge);
|
||||
|
||||
void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
|
||||
{
|
||||
struct dccp_tx_hist_entry *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, dccphtx_node) {
|
||||
list_del_init(&entry->dccphtx_node);
|
||||
dccp_tx_hist_entry_delete(hist, entry);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
|
||||
|
||||
MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
|
||||
"Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
|
||||
|
@ -49,43 +49,27 @@
|
||||
#define TFRC_WIN_COUNT_PER_RTT 4
|
||||
#define TFRC_WIN_COUNT_LIMIT 16
|
||||
|
||||
/*
|
||||
* Transmitter History data structures and declarations
|
||||
*/
|
||||
struct dccp_tx_hist_entry {
|
||||
struct list_head dccphtx_node;
|
||||
u64 dccphtx_seqno:48,
|
||||
dccphtx_ccval:4,
|
||||
dccphtx_sent:1;
|
||||
u32 dccphtx_rtt;
|
||||
struct timeval dccphtx_tstamp;
|
||||
};
|
||||
|
||||
struct dccp_rx_hist_entry {
|
||||
struct list_head dccphrx_node;
|
||||
u64 dccphrx_seqno:48,
|
||||
dccphrx_ccval:4,
|
||||
dccphrx_type:4;
|
||||
u32 dccphrx_ndp; /* In fact it is from 8 to 24 bits */
|
||||
struct timeval dccphrx_tstamp;
|
||||
};
|
||||
|
||||
struct dccp_tx_hist {
|
||||
struct kmem_cache *dccptxh_slab;
|
||||
};
|
||||
|
||||
extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
|
||||
extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
|
||||
|
||||
struct dccp_rx_hist {
|
||||
struct kmem_cache *dccprxh_slab;
|
||||
};
|
||||
|
||||
extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
|
||||
extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
|
||||
extern struct dccp_rx_hist_entry *
|
||||
dccp_rx_hist_find_data_packet(const struct list_head *list);
|
||||
extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
|
||||
|
||||
static inline struct dccp_tx_hist_entry *
|
||||
dccp_tx_hist_entry_new(struct dccp_tx_hist *hist,
|
||||
const gfp_t prio)
|
||||
dccp_tx_hist_entry_new(struct dccp_tx_hist *hist,
|
||||
const gfp_t prio)
|
||||
{
|
||||
struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab,
|
||||
prio);
|
||||
@ -96,34 +80,8 @@ static inline struct dccp_tx_hist_entry *
|
||||
return entry;
|
||||
}
|
||||
|
||||
static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
|
||||
struct dccp_tx_hist_entry *entry)
|
||||
{
|
||||
if (entry != NULL)
|
||||
kmem_cache_free(hist->dccptxh_slab, entry);
|
||||
}
|
||||
|
||||
extern struct dccp_tx_hist_entry *
|
||||
dccp_tx_hist_find_entry(const struct list_head *list,
|
||||
const u64 seq);
|
||||
extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
|
||||
u8 *ccval);
|
||||
|
||||
static inline void dccp_tx_hist_add_entry(struct list_head *list,
|
||||
struct dccp_tx_hist_entry *entry)
|
||||
{
|
||||
list_add(&entry->dccphtx_node, list);
|
||||
}
|
||||
|
||||
extern void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
|
||||
struct list_head *list,
|
||||
struct dccp_tx_hist_entry *next);
|
||||
|
||||
extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist,
|
||||
struct list_head *list);
|
||||
|
||||
static inline struct dccp_tx_hist_entry *
|
||||
dccp_tx_hist_head(struct list_head *list)
|
||||
dccp_tx_hist_head(struct list_head *list)
|
||||
{
|
||||
struct dccp_tx_hist_entry *head = NULL;
|
||||
|
||||
@ -133,12 +91,55 @@ static inline struct dccp_tx_hist_entry *
|
||||
return head;
|
||||
}
|
||||
|
||||
extern struct dccp_tx_hist_entry *
|
||||
dccp_tx_hist_find_entry(const struct list_head *list,
|
||||
const u64 seq);
|
||||
|
||||
static inline void dccp_tx_hist_add_entry(struct list_head *list,
|
||||
struct dccp_tx_hist_entry *entry)
|
||||
{
|
||||
list_add(&entry->dccphtx_node, list);
|
||||
}
|
||||
|
||||
static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
|
||||
struct dccp_tx_hist_entry *entry)
|
||||
{
|
||||
if (entry != NULL)
|
||||
kmem_cache_free(hist->dccptxh_slab, entry);
|
||||
}
|
||||
|
||||
extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist,
|
||||
struct list_head *list);
|
||||
|
||||
extern void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
|
||||
struct list_head *list,
|
||||
struct dccp_tx_hist_entry *next);
|
||||
|
||||
/*
|
||||
* Receiver History data structures and declarations
|
||||
*/
|
||||
struct dccp_rx_hist_entry {
|
||||
struct list_head dccphrx_node;
|
||||
u64 dccphrx_seqno:48,
|
||||
dccphrx_ccval:4,
|
||||
dccphrx_type:4;
|
||||
u32 dccphrx_ndp; /* In fact it is from 8 to 24 bits */
|
||||
struct timeval dccphrx_tstamp;
|
||||
};
|
||||
|
||||
struct dccp_rx_hist {
|
||||
struct kmem_cache *dccprxh_slab;
|
||||
};
|
||||
|
||||
extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
|
||||
extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
|
||||
|
||||
static inline struct dccp_rx_hist_entry *
|
||||
dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
|
||||
const struct sock *sk,
|
||||
const u32 ndp,
|
||||
const struct sk_buff *skb,
|
||||
const gfp_t prio)
|
||||
dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
|
||||
const struct sock *sk,
|
||||
const u32 ndp,
|
||||
const struct sk_buff *skb,
|
||||
const gfp_t prio)
|
||||
{
|
||||
struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab,
|
||||
prio);
|
||||
@ -156,6 +157,28 @@ static inline struct dccp_rx_hist_entry *
|
||||
return entry;
|
||||
}
|
||||
|
||||
static inline struct dccp_rx_hist_entry *
|
||||
dccp_rx_hist_head(struct list_head *list)
|
||||
{
|
||||
struct dccp_rx_hist_entry *head = NULL;
|
||||
|
||||
if (!list_empty(list))
|
||||
head = list_entry(list->next, struct dccp_rx_hist_entry,
|
||||
dccphrx_node);
|
||||
return head;
|
||||
}
|
||||
|
||||
extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
|
||||
u8 *ccval);
|
||||
extern struct dccp_rx_hist_entry *
|
||||
dccp_rx_hist_find_data_packet(const struct list_head *list);
|
||||
|
||||
extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
|
||||
struct list_head *rx_list,
|
||||
struct list_head *li_list,
|
||||
struct dccp_rx_hist_entry *packet,
|
||||
u64 nonloss_seqno);
|
||||
|
||||
static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
|
||||
struct dccp_rx_hist_entry *entry)
|
||||
{
|
||||
@ -166,17 +189,6 @@ static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
|
||||
extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
|
||||
struct list_head *list);
|
||||
|
||||
static inline struct dccp_rx_hist_entry *
|
||||
dccp_rx_hist_head(struct list_head *list)
|
||||
{
|
||||
struct dccp_rx_hist_entry *head = NULL;
|
||||
|
||||
if (!list_empty(list))
|
||||
head = list_entry(list->next, struct dccp_rx_hist_entry,
|
||||
dccphrx_node);
|
||||
return head;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dccp_rx_hist_entry_data_packet(const struct dccp_rx_hist_entry *entry)
|
||||
{
|
||||
@ -184,12 +196,6 @@ static inline int
|
||||
entry->dccphrx_type == DCCP_PKT_DATAACK;
|
||||
}
|
||||
|
||||
extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
|
||||
struct list_head *rx_list,
|
||||
struct list_head *li_list,
|
||||
struct dccp_rx_hist_entry *packet,
|
||||
u64 nonloss_seqno);
|
||||
|
||||
extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
|
||||
struct list_head *li_list, u8 *win_loss);
|
||||
|
||||
|
@ -13,8 +13,29 @@
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
/* integer-arithmetic divisions of type (a * 1000000)/b */
|
||||
static inline u64 scaled_div(u64 a, u32 b)
|
||||
{
|
||||
BUG_ON(b==0);
|
||||
a *= 1000000;
|
||||
do_div(a, b);
|
||||
return a;
|
||||
}
|
||||
|
||||
static inline u32 scaled_div32(u64 a, u32 b)
|
||||
{
|
||||
u64 result = scaled_div(a, b);
|
||||
|
||||
if (result > UINT_MAX) {
|
||||
DCCP_CRIT("Overflow: a(%llu)/b(%u) > ~0U",
|
||||
(unsigned long long)a, b);
|
||||
return UINT_MAX;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
|
||||
extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
|
||||
|
@ -13,7 +13,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <asm/div64.h>
|
||||
#include "../../dccp.h"
|
||||
#include "tfrc.h"
|
||||
|
||||
@ -616,15 +615,12 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
|
||||
* @R: RTT scaled by 1000000 (i.e., microseconds)
|
||||
* @p: loss ratio estimate scaled by 1000000
|
||||
* Returns X_calc in bytes per second (not scaled).
|
||||
*
|
||||
* Note: DO NOT alter this code unless you run test cases against it,
|
||||
* as the code has been optimized to stop underflow/overflow.
|
||||
*/
|
||||
u32 tfrc_calc_x(u16 s, u32 R, u32 p)
|
||||
{
|
||||
int index;
|
||||
u16 index;
|
||||
u32 f;
|
||||
u64 tmp1, tmp2;
|
||||
u64 result;
|
||||
|
||||
/* check against invalid parameters and divide-by-zero */
|
||||
BUG_ON(p > 1000000); /* p must not exceed 100% */
|
||||
@ -650,15 +646,17 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
|
||||
f = tfrc_calc_x_lookup[index][0];
|
||||
}
|
||||
|
||||
/* The following computes X = s/(R*f(p)) in bytes per second. Since f(p)
|
||||
* and R are both scaled by 1000000, we need to multiply by 1000000^2.
|
||||
* ==> DO NOT alter this unless you test against overflow on 32 bit */
|
||||
tmp1 = ((u64)s * 100000000);
|
||||
tmp2 = ((u64)R * (u64)f);
|
||||
do_div(tmp2, 10000);
|
||||
do_div(tmp1, tmp2);
|
||||
|
||||
return (u32)tmp1;
|
||||
/*
|
||||
* Compute X = s/(R*f(p)) in bytes per second.
|
||||
* Since f(p) and R are both scaled by 1000000, we need to multiply by
|
||||
* 1000000^2. To avoid overflow, the result is computed in two stages.
|
||||
* This works under almost all reasonable operational conditions, for a
|
||||
* wide range of parameters. Yet, should some strange combination of
|
||||
* parameters result in overflow, the use of scaled_div32 will catch
|
||||
* this and return UINT_MAX - which is a logically adequate consequence.
|
||||
*/
|
||||
result = scaled_div(s, R);
|
||||
return scaled_div32(result, f);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(tfrc_calc_x);
|
||||
|
@ -80,8 +80,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
|
||||
|
||||
#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
|
||||
|
||||
#define DCCP_XMIT_TIMEO 30000 /* Time/msecs for blocking transmit per packet */
|
||||
|
||||
/* sysctl variables for DCCP */
|
||||
extern int sysctl_dccp_request_retries;
|
||||
extern int sysctl_dccp_retries1;
|
||||
@ -434,6 +432,7 @@ static inline void timeval_sub_usecs(struct timeval *tv,
|
||||
tv->tv_sec--;
|
||||
tv->tv_usec += USEC_PER_SEC;
|
||||
}
|
||||
DCCP_BUG_ON(tv->tv_sec < 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
@ -329,7 +329,7 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
|
||||
switch (type) {
|
||||
case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break;
|
||||
case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break;
|
||||
default: DCCP_WARN("invalid type %d\n", type); return;
|
||||
default: DCCP_WARN("invalid type %d\n", type); return;
|
||||
|
||||
}
|
||||
opt->dccpop_feat = feature;
|
||||
@ -427,7 +427,7 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature,
|
||||
switch (type) {
|
||||
case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break;
|
||||
case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break;
|
||||
default: DCCP_WARN("invalid type %d\n", type);
|
||||
default: DCCP_WARN("invalid type %d\n", type);
|
||||
return 1;
|
||||
|
||||
}
|
||||
@ -610,7 +610,7 @@ const char *dccp_feat_typename(const u8 type)
|
||||
case DCCPO_CHANGE_R: return("ChangeR");
|
||||
case DCCPO_CONFIRM_R: return("ConfirmR");
|
||||
/* the following case must not appear in feature negotation */
|
||||
default: dccp_pr_debug("unknown type %d [BUG!]\n", type);
|
||||
default: dccp_pr_debug("unknown type %d [BUG!]\n", type);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* net/dccp/input.c
|
||||
*
|
||||
*
|
||||
* An implementation of the DCCP protocol
|
||||
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
||||
*
|
||||
@ -82,7 +82,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
|
||||
* Otherwise,
|
||||
* Drop packet and return
|
||||
*/
|
||||
if (dh->dccph_type == DCCP_PKT_SYNC ||
|
||||
if (dh->dccph_type == DCCP_PKT_SYNC ||
|
||||
dh->dccph_type == DCCP_PKT_SYNCACK) {
|
||||
if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
|
||||
dp->dccps_awl, dp->dccps_awh) &&
|
||||
@ -185,8 +185,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
dccp_rcv_close(sk, skb);
|
||||
return 0;
|
||||
case DCCP_PKT_REQUEST:
|
||||
/* Step 7
|
||||
* or (S.is_server and P.type == Response)
|
||||
/* Step 7
|
||||
* or (S.is_server and P.type == Response)
|
||||
* or (S.is_client and P.type == Request)
|
||||
* or (S.state >= OPEN and P.type == Request
|
||||
* and P.seqno >= S.OSR)
|
||||
@ -248,8 +248,18 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
DCCP_ACKVEC_STATE_RECEIVED))
|
||||
goto discard;
|
||||
|
||||
ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
|
||||
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
|
||||
/*
|
||||
* Deliver to the CCID module in charge.
|
||||
* FIXME: Currently DCCP operates one-directional only, i.e. a listening
|
||||
* server is not at the same time a connecting client. There is
|
||||
* not much sense in delivering to both rx/tx sides at the moment
|
||||
* (only one is active at a time); when moving to bidirectional
|
||||
* service, this needs to be revised.
|
||||
*/
|
||||
if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
|
||||
ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
|
||||
else
|
||||
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
|
||||
|
||||
return __dccp_rcv_established(sk, skb, dh, len);
|
||||
discard:
|
||||
@ -264,7 +274,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
|
||||
const struct dccp_hdr *dh,
|
||||
const unsigned len)
|
||||
{
|
||||
/*
|
||||
/*
|
||||
* Step 4: Prepare sequence numbers in REQUEST
|
||||
* If S.state == REQUEST,
|
||||
* If (P.type == Response or P.type == Reset)
|
||||
@ -332,7 +342,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
|
||||
* from the Response * /
|
||||
* S.state := PARTOPEN
|
||||
* Set PARTOPEN timer
|
||||
* Continue with S.state == PARTOPEN
|
||||
* Continue with S.state == PARTOPEN
|
||||
* / * Step 12 will send the Ack completing the
|
||||
* three-way handshake * /
|
||||
*/
|
||||
@ -363,7 +373,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
|
||||
*/
|
||||
__kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
dccp_send_ack(sk);
|
||||
return -1;
|
||||
}
|
||||
@ -371,7 +381,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
|
||||
out_invalid_packet:
|
||||
/* dccp_v4_do_rcv will send a reset */
|
||||
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
|
||||
return 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
|
||||
@ -478,14 +488,17 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
|
||||
dccp_event_ack_recv(sk, skb);
|
||||
|
||||
if (dccp_msk(sk)->dccpms_send_ack_vector &&
|
||||
if (dccp_msk(sk)->dccpms_send_ack_vector &&
|
||||
dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
|
||||
DCCP_SKB_CB(skb)->dccpd_seq,
|
||||
DCCP_ACKVEC_STATE_RECEIVED))
|
||||
goto discard;
|
||||
DCCP_SKB_CB(skb)->dccpd_seq,
|
||||
DCCP_ACKVEC_STATE_RECEIVED))
|
||||
goto discard;
|
||||
|
||||
ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
|
||||
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
|
||||
/* XXX see the comments in dccp_rcv_established about this */
|
||||
if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
|
||||
ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
|
||||
else
|
||||
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -567,7 +580,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
}
|
||||
|
||||
if (!queued) {
|
||||
if (!queued) {
|
||||
discard:
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
|
||||
/* We don't check in the destentry if pmtu discovery is forbidden
|
||||
* on this route. We just assume that no packet_to_big packets
|
||||
* are send back when pmtu discovery is not active.
|
||||
* There is a small race when the user changes this flag in the
|
||||
* There is a small race when the user changes this flag in the
|
||||
* route, but I think that's acceptable.
|
||||
*/
|
||||
if ((dst = __sk_dst_check(sk, 0)) == NULL)
|
||||
@ -467,7 +467,7 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
|
||||
.uli_u = { .ports =
|
||||
{ .sport = dccp_hdr(skb)->dccph_dport,
|
||||
.dport = dccp_hdr(skb)->dccph_sport }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
security_skb_classify_flow(skb, &fl);
|
||||
@ -595,7 +595,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
struct inet_request_sock *ireq;
|
||||
struct request_sock *req;
|
||||
struct dccp_request_sock *dreq;
|
||||
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
|
||||
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
|
||||
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
|
||||
__u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
|
||||
|
||||
@ -609,7 +609,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
if (dccp_bad_service_code(sk, service)) {
|
||||
reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* TW buckets are converted to open requests without
|
||||
* limitations, they conserve resources and peer is
|
||||
@ -644,7 +644,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
ireq->rmt_addr = skb->nh.iph->saddr;
|
||||
ireq->opt = NULL;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Step 3: Process LISTEN state
|
||||
*
|
||||
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
|
||||
@ -846,15 +846,15 @@ static int dccp_v4_rcv(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/* Step 2:
|
||||
* Look up flow ID in table and get corresponding socket */
|
||||
* Look up flow ID in table and get corresponding socket */
|
||||
sk = __inet_lookup(&dccp_hashinfo,
|
||||
skb->nh.iph->saddr, dh->dccph_sport,
|
||||
skb->nh.iph->daddr, dh->dccph_dport,
|
||||
inet_iif(skb));
|
||||
|
||||
/*
|
||||
/*
|
||||
* Step 2:
|
||||
* If no socket ...
|
||||
* If no socket ...
|
||||
*/
|
||||
if (sk == NULL) {
|
||||
dccp_pr_debug("failed to look up flow ID in table and "
|
||||
@ -862,9 +862,9 @@ static int dccp_v4_rcv(struct sk_buff *skb)
|
||||
goto no_dccp_socket;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Step 2:
|
||||
* ... or S.state == TIMEWAIT,
|
||||
* ... or S.state == TIMEWAIT,
|
||||
* Generate Reset(No Connection) unless P.type == Reset
|
||||
* Drop packet and return
|
||||
*/
|
||||
@ -876,8 +876,8 @@ static int dccp_v4_rcv(struct sk_buff *skb)
|
||||
|
||||
/*
|
||||
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
|
||||
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
|
||||
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
|
||||
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
|
||||
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
|
||||
*/
|
||||
min_cov = dccp_sk(sk)->dccps_pcrlen;
|
||||
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
|
||||
@ -900,7 +900,7 @@ no_dccp_socket:
|
||||
goto discard_it;
|
||||
/*
|
||||
* Step 2:
|
||||
* If no socket ...
|
||||
* If no socket ...
|
||||
* Generate Reset(No Connection) unless P.type == Reset
|
||||
* Drop packet and return
|
||||
*/
|
||||
|
@ -77,7 +77,7 @@ static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
|
||||
}
|
||||
|
||||
static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
|
||||
__be16 sport, __be16 dport )
|
||||
__be16 sport, __be16 dport )
|
||||
{
|
||||
return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
|
||||
}
|
||||
@ -329,7 +329,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
|
||||
skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
|
||||
GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
return;
|
||||
return;
|
||||
|
||||
skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
|
||||
|
||||
@ -353,7 +353,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
|
||||
|
||||
dccp_csum_outgoing(skb);
|
||||
dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr,
|
||||
&rxskb->nh.ipv6h->daddr);
|
||||
&rxskb->nh.ipv6h->daddr);
|
||||
|
||||
memset(&fl, 0, sizeof(fl));
|
||||
ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
|
||||
@ -424,7 +424,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
struct dccp_request_sock *dreq;
|
||||
struct inet6_request_sock *ireq6;
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
|
||||
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
|
||||
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
|
||||
__u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
|
||||
|
||||
@ -437,7 +437,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
if (dccp_bad_service_code(sk, service)) {
|
||||
reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* There are no SYN attacks on IPv6, yet...
|
||||
*/
|
||||
@ -787,7 +787,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
* otherwise we just shortcircuit this and continue with
|
||||
* the new socket..
|
||||
*/
|
||||
if (nsk != sk) {
|
||||
if (nsk != sk) {
|
||||
if (dccp_child_process(sk, nsk, skb))
|
||||
goto reset;
|
||||
if (opt_skb != NULL)
|
||||
@ -843,14 +843,14 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
|
||||
DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
|
||||
|
||||
/* Step 2:
|
||||
* Look up flow ID in table and get corresponding socket */
|
||||
* Look up flow ID in table and get corresponding socket */
|
||||
sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
|
||||
dh->dccph_sport,
|
||||
&skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
|
||||
inet6_iif(skb));
|
||||
/*
|
||||
* Step 2:
|
||||
* If no socket ...
|
||||
* If no socket ...
|
||||
*/
|
||||
if (sk == NULL) {
|
||||
dccp_pr_debug("failed to look up flow ID in table and "
|
||||
@ -860,7 +860,7 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
|
||||
|
||||
/*
|
||||
* Step 2:
|
||||
* ... or S.state == TIMEWAIT,
|
||||
* ... or S.state == TIMEWAIT,
|
||||
* Generate Reset(No Connection) unless P.type == Reset
|
||||
* Drop packet and return
|
||||
*/
|
||||
@ -872,8 +872,8 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
|
||||
|
||||
/*
|
||||
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
|
||||
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
|
||||
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
|
||||
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
|
||||
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
|
||||
*/
|
||||
min_cov = dccp_sk(sk)->dccps_pcrlen;
|
||||
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
|
||||
@ -893,7 +893,7 @@ no_dccp_socket:
|
||||
goto discard_it;
|
||||
/*
|
||||
* Step 2:
|
||||
* If no socket ...
|
||||
* If no socket ...
|
||||
* Generate Reset(No Connection) unless P.type == Reset
|
||||
* Drop packet and return
|
||||
*/
|
||||
|
@ -182,7 +182,7 @@ out_free:
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Process an incoming packet for RESPOND sockets represented
|
||||
* as an request_sock.
|
||||
*/
|
||||
|
@ -557,11 +557,6 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
|
||||
return -1;
|
||||
dp->dccps_hc_rx_insert_options = 0;
|
||||
}
|
||||
if (dp->dccps_hc_tx_insert_options) {
|
||||
if (ccid_hc_tx_insert_options(dp->dccps_hc_tx_ccid, sk, skb))
|
||||
return -1;
|
||||
dp->dccps_hc_tx_insert_options = 0;
|
||||
}
|
||||
|
||||
/* Feature negotiation */
|
||||
/* Data packets can't do feat negotiation */
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* net/dccp/output.c
|
||||
*
|
||||
*
|
||||
* An implementation of the DCCP protocol
|
||||
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
||||
*
|
||||
@ -175,14 +175,12 @@ void dccp_write_space(struct sock *sk)
|
||||
/**
|
||||
* dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
|
||||
* @sk: socket to wait for
|
||||
* @timeo: for how long
|
||||
*/
|
||||
static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
|
||||
long *timeo)
|
||||
static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
DEFINE_WAIT(wait);
|
||||
long delay;
|
||||
unsigned long delay;
|
||||
int rc;
|
||||
|
||||
while (1) {
|
||||
@ -190,8 +188,6 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
if (sk->sk_err)
|
||||
goto do_error;
|
||||
if (!*timeo)
|
||||
goto do_nonblock;
|
||||
if (signal_pending(current))
|
||||
goto do_interrupted;
|
||||
|
||||
@ -199,12 +195,9 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
|
||||
if (rc <= 0)
|
||||
break;
|
||||
delay = msecs_to_jiffies(rc);
|
||||
if (delay > *timeo || delay < 0)
|
||||
goto do_nonblock;
|
||||
|
||||
sk->sk_write_pending++;
|
||||
release_sock(sk);
|
||||
*timeo -= schedule_timeout(delay);
|
||||
schedule_timeout(delay);
|
||||
lock_sock(sk);
|
||||
sk->sk_write_pending--;
|
||||
}
|
||||
@ -215,11 +208,8 @@ out:
|
||||
do_error:
|
||||
rc = -EPIPE;
|
||||
goto out;
|
||||
do_nonblock:
|
||||
rc = -EAGAIN;
|
||||
goto out;
|
||||
do_interrupted:
|
||||
rc = sock_intr_errno(*timeo);
|
||||
rc = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -240,8 +230,6 @@ void dccp_write_xmit(struct sock *sk, int block)
|
||||
{
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
long timeo = DCCP_XMIT_TIMEO; /* If a packet is taking longer than
|
||||
this we have other issues */
|
||||
|
||||
while ((skb = skb_peek(&sk->sk_write_queue))) {
|
||||
int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
|
||||
@ -251,11 +239,9 @@ void dccp_write_xmit(struct sock *sk, int block)
|
||||
sk_reset_timer(sk, &dp->dccps_xmit_timer,
|
||||
msecs_to_jiffies(err)+jiffies);
|
||||
break;
|
||||
} else {
|
||||
err = dccp_wait_for_ccid(sk, skb, &timeo);
|
||||
timeo = DCCP_XMIT_TIMEO;
|
||||
}
|
||||
if (err)
|
||||
} else
|
||||
err = dccp_wait_for_ccid(sk, skb);
|
||||
if (err && err != -EINTR)
|
||||
DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
|
||||
}
|
||||
|
||||
@ -281,8 +267,10 @@ void dccp_write_xmit(struct sock *sk, int block)
|
||||
if (err)
|
||||
DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
|
||||
err);
|
||||
} else
|
||||
} else {
|
||||
dccp_pr_debug("packet discarded\n");
|
||||
kfree(skb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -350,7 +338,6 @@ EXPORT_SYMBOL_GPL(dccp_make_response);
|
||||
|
||||
static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
|
||||
const enum dccp_reset_codes code)
|
||||
|
||||
{
|
||||
struct dccp_hdr *dh;
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
@ -431,14 +418,14 @@ static inline void dccp_connect_init(struct sock *sk)
|
||||
|
||||
dccp_sync_mss(sk, dst_mtu(dst));
|
||||
|
||||
/*
|
||||
/*
|
||||
* SWL and AWL are initially adjusted so that they are not less than
|
||||
* the initial Sequence Numbers received and sent, respectively:
|
||||
* SWL := max(GSR + 1 - floor(W/4), ISR),
|
||||
* AWL := max(GSS - W' + 1, ISS).
|
||||
* These adjustments MUST be applied only at the beginning of the
|
||||
* connection.
|
||||
*/
|
||||
*/
|
||||
dccp_update_gss(sk, dp->dccps_iss);
|
||||
dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
|
||||
|
||||
|
@ -196,7 +196,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
|
||||
sk, GFP_KERNEL);
|
||||
dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
|
||||
sk, GFP_KERNEL);
|
||||
if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
|
||||
if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
|
||||
dp->dccps_hc_tx_ccid == NULL)) {
|
||||
ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
|
||||
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
|
||||
@ -390,7 +390,7 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
struct dccp_service_list *sl = NULL;
|
||||
|
||||
if (service == DCCP_SERVICE_INVALID_VALUE ||
|
||||
if (service == DCCP_SERVICE_INVALID_VALUE ||
|
||||
optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
|
||||
return -EINVAL;
|
||||
|
||||
@ -830,7 +830,7 @@ EXPORT_SYMBOL_GPL(inet_dccp_listen);
|
||||
static const unsigned char dccp_new_state[] = {
|
||||
/* current state: new state: action: */
|
||||
[0] = DCCP_CLOSED,
|
||||
[DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
|
||||
[DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
|
||||
[DCCP_REQUESTING] = DCCP_CLOSED,
|
||||
[DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
|
||||
[DCCP_LISTEN] = DCCP_CLOSED,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* net/dccp/timer.c
|
||||
*
|
||||
*
|
||||
* An implementation of the DCCP protocol
|
||||
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
||||
*
|
||||
@ -102,13 +102,13 @@ static void dccp_retransmit_timer(struct sock *sk)
|
||||
* sk->sk_send_head has to have one skb with
|
||||
* DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
|
||||
* packet types. The only packets eligible for retransmission are:
|
||||
* -- Requests in client-REQUEST state (sec. 8.1.1)
|
||||
* -- Acks in client-PARTOPEN state (sec. 8.1.5)
|
||||
* -- CloseReq in server-CLOSEREQ state (sec. 8.3)
|
||||
* -- Close in node-CLOSING state (sec. 8.3) */
|
||||
* -- Requests in client-REQUEST state (sec. 8.1.1)
|
||||
* -- Acks in client-PARTOPEN state (sec. 8.1.5)
|
||||
* -- CloseReq in server-CLOSEREQ state (sec. 8.3)
|
||||
* -- Close in node-CLOSING state (sec. 8.3) */
|
||||
BUG_TRAP(sk->sk_send_head != NULL);
|
||||
|
||||
/*
|
||||
/*
|
||||
* More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
|
||||
* sent, no need to retransmit, this sock is dead.
|
||||
*/
|
||||
@ -200,7 +200,7 @@ static void dccp_keepalive_timer(unsigned long data)
|
||||
/* Only process if socket is not in use. */
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk)) {
|
||||
/* Try again later. */
|
||||
/* Try again later. */
|
||||
inet_csk_reset_keepalive_timer(sk, HZ / 20);
|
||||
goto out;
|
||||
}
|
||||
|
@ -657,7 +657,7 @@ static void sync_master_loop(void)
|
||||
if (stop_master_sync)
|
||||
break;
|
||||
|
||||
ssleep(1);
|
||||
msleep_interruptible(1000);
|
||||
}
|
||||
|
||||
/* clean up the sync_buff queue */
|
||||
@ -714,7 +714,7 @@ static void sync_backup_loop(void)
|
||||
if (stop_backup_sync)
|
||||
break;
|
||||
|
||||
ssleep(1);
|
||||
msleep_interruptible(1000);
|
||||
}
|
||||
|
||||
/* release the sending multicast socket */
|
||||
@ -826,7 +826,7 @@ static int fork_sync_thread(void *startup)
|
||||
if ((pid = kernel_thread(sync_thread, startup, 0)) < 0) {
|
||||
IP_VS_ERR("could not create sync_thread due to %d... "
|
||||
"retrying.\n", pid);
|
||||
ssleep(1);
|
||||
msleep_interruptible(1000);
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
@ -849,10 +849,12 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
|
||||
|
||||
ip_vs_sync_state |= state;
|
||||
if (state == IP_VS_STATE_MASTER) {
|
||||
strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, sizeof(ip_vs_master_mcast_ifn));
|
||||
strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
|
||||
sizeof(ip_vs_master_mcast_ifn));
|
||||
ip_vs_master_syncid = syncid;
|
||||
} else {
|
||||
strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn, sizeof(ip_vs_backup_mcast_ifn));
|
||||
strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
|
||||
sizeof(ip_vs_backup_mcast_ifn));
|
||||
ip_vs_backup_syncid = syncid;
|
||||
}
|
||||
|
||||
@ -860,7 +862,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
|
||||
if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
|
||||
IP_VS_ERR("could not create fork_sync_thread due to %d... "
|
||||
"retrying.\n", pid);
|
||||
ssleep(1);
|
||||
msleep_interruptible(1000);
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
@ -880,7 +882,8 @@ int stop_sync_thread(int state)
|
||||
|
||||
IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
|
||||
IP_VS_INFO("stopping sync thread %d ...\n",
|
||||
(state == IP_VS_STATE_MASTER) ? sync_master_pid : sync_backup_pid);
|
||||
(state == IP_VS_STATE_MASTER) ?
|
||||
sync_master_pid : sync_backup_pid);
|
||||
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
add_wait_queue(&stop_sync_wait, &wait);
|
||||
|
Loading…
Reference in New Issue
Block a user