mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
net: add rb_to_skb() and other rb tree helpers
Geeralize private netem_rb_to_skb() TCP rtx queue will soon be converted to rb-tree, so we will need skb_rbtree_walk() helpers. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
f5333f80c3
commit
18a4c0eab2
@ -3158,6 +3158,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
|
||||
return __skb_grow(skb, len);
|
||||
}
|
||||
|
||||
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
|
||||
#define skb_rb_first(root) rb_to_skb(rb_first(root))
|
||||
#define skb_rb_last(root) rb_to_skb(rb_last(root))
|
||||
#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
|
||||
#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
|
||||
|
||||
#define skb_queue_walk(queue, skb) \
|
||||
for (skb = (queue)->next; \
|
||||
skb != (struct sk_buff *)(queue); \
|
||||
@ -3172,6 +3178,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
|
||||
for (; skb != (struct sk_buff *)(queue); \
|
||||
skb = skb->next)
|
||||
|
||||
#define skb_rbtree_walk(skb, root) \
|
||||
for (skb = skb_rb_first(root); skb != NULL; \
|
||||
skb = skb_rb_next(skb))
|
||||
|
||||
#define skb_rbtree_walk_from(skb) \
|
||||
for (; skb != NULL; \
|
||||
skb = skb_rb_next(skb))
|
||||
|
||||
#define skb_rbtree_walk_from_safe(skb, tmp) \
|
||||
for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
|
||||
skb = tmp)
|
||||
|
||||
#define skb_queue_walk_from_safe(queue, skb, tmp) \
|
||||
for (tmp = skb->next; \
|
||||
skb != (struct sk_buff *)(queue); \
|
||||
|
@ -465,17 +465,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
|
||||
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct rb_node *p;
|
||||
struct sk_buff *skb;
|
||||
struct dst_entry *dst;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!tp->syn_fastopen)
|
||||
return;
|
||||
|
||||
if (!tp->data_segs_in) {
|
||||
p = rb_first(&tp->out_of_order_queue);
|
||||
if (p && !rb_next(p)) {
|
||||
skb = rb_entry(p, struct sk_buff, rbnode);
|
||||
skb = skb_rb_first(&tp->out_of_order_queue);
|
||||
if (skb && !skb_rb_next(skb)) {
|
||||
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
|
||||
tcp_fastopen_active_disable(sk);
|
||||
return;
|
||||
|
@ -4335,7 +4335,7 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||
|
||||
p = rb_first(&tp->out_of_order_queue);
|
||||
while (p) {
|
||||
skb = rb_entry(p, struct sk_buff, rbnode);
|
||||
skb = rb_to_skb(p);
|
||||
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
|
||||
break;
|
||||
|
||||
@ -4399,7 +4399,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
|
||||
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct rb_node **p, *q, *parent;
|
||||
struct rb_node **p, *parent;
|
||||
struct sk_buff *skb1;
|
||||
u32 seq, end_seq;
|
||||
bool fragstolen;
|
||||
@ -4458,7 +4458,7 @@ coalesce_done:
|
||||
parent = NULL;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
skb1 = rb_entry(parent, struct sk_buff, rbnode);
|
||||
skb1 = rb_to_skb(parent);
|
||||
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
|
||||
p = &parent->rb_left;
|
||||
continue;
|
||||
@ -4503,9 +4503,7 @@ insert:
|
||||
|
||||
merge_right:
|
||||
/* Remove other segments covered by skb. */
|
||||
while ((q = rb_next(&skb->rbnode)) != NULL) {
|
||||
skb1 = rb_entry(q, struct sk_buff, rbnode);
|
||||
|
||||
while ((skb1 = skb_rb_next(skb)) != NULL) {
|
||||
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
|
||||
break;
|
||||
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
@ -4520,7 +4518,7 @@ merge_right:
|
||||
tcp_drop(sk, skb1);
|
||||
}
|
||||
/* If there is no skb after us, we are the last_skb ! */
|
||||
if (!q)
|
||||
if (!skb1)
|
||||
tp->ooo_last_skb = skb;
|
||||
|
||||
add_sack:
|
||||
@ -4706,7 +4704,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
|
||||
if (list)
|
||||
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
|
||||
|
||||
return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
|
||||
return skb_rb_next(skb);
|
||||
}
|
||||
|
||||
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
|
||||
@ -4735,7 +4733,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
|
||||
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
skb1 = rb_entry(parent, struct sk_buff, rbnode);
|
||||
skb1 = rb_to_skb(parent);
|
||||
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
|
||||
p = &parent->rb_left;
|
||||
else
|
||||
@ -4854,26 +4852,19 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb, *head;
|
||||
struct rb_node *p;
|
||||
u32 start, end;
|
||||
|
||||
p = rb_first(&tp->out_of_order_queue);
|
||||
skb = rb_entry_safe(p, struct sk_buff, rbnode);
|
||||
skb = skb_rb_first(&tp->out_of_order_queue);
|
||||
new_range:
|
||||
if (!skb) {
|
||||
p = rb_last(&tp->out_of_order_queue);
|
||||
/* Note: This is possible p is NULL here. We do not
|
||||
* use rb_entry_safe(), as ooo_last_skb is valid only
|
||||
* if rbtree is not empty.
|
||||
*/
|
||||
tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
|
||||
tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
|
||||
return;
|
||||
}
|
||||
start = TCP_SKB_CB(skb)->seq;
|
||||
end = TCP_SKB_CB(skb)->end_seq;
|
||||
|
||||
for (head = skb;;) {
|
||||
skb = tcp_skb_next(skb, NULL);
|
||||
skb = skb_rb_next(skb);
|
||||
|
||||
/* Range is terminated when we see a gap or when
|
||||
* we are at the queue end.
|
||||
@ -4916,14 +4907,14 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
|
||||
do {
|
||||
prev = rb_prev(node);
|
||||
rb_erase(node, &tp->out_of_order_queue);
|
||||
tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
|
||||
tcp_drop(sk, rb_to_skb(node));
|
||||
sk_mem_reclaim(sk);
|
||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
||||
!tcp_under_memory_pressure(sk))
|
||||
break;
|
||||
node = prev;
|
||||
} while (node);
|
||||
tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
|
||||
tp->ooo_last_skb = rb_to_skb(prev);
|
||||
|
||||
/* Reset SACK state. A conforming SACK implementation will
|
||||
* do the same at a timeout based retransmit. When a connection
|
||||
|
@ -148,12 +148,6 @@ struct netem_skb_cb {
|
||||
psched_time_t time_to_send;
|
||||
};
|
||||
|
||||
|
||||
static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
|
||||
{
|
||||
return rb_entry(rb, struct sk_buff, rbnode);
|
||||
}
|
||||
|
||||
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
|
||||
{
|
||||
/* we assume we can use skb next/prev/tstamp as storage for rb_node */
|
||||
@ -364,7 +358,7 @@ static void tfifo_reset(struct Qdisc *sch)
|
||||
struct rb_node *p = rb_first(&q->t_root);
|
||||
|
||||
while (p) {
|
||||
struct sk_buff *skb = netem_rb_to_skb(p);
|
||||
struct sk_buff *skb = rb_to_skb(p);
|
||||
|
||||
p = rb_next(p);
|
||||
rb_erase(&skb->rbnode, &q->t_root);
|
||||
@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
||||
struct sk_buff *skb;
|
||||
|
||||
parent = *p;
|
||||
skb = netem_rb_to_skb(parent);
|
||||
skb = rb_to_skb(parent);
|
||||
if (tnext >= netem_skb_cb(skb)->time_to_send)
|
||||
p = &parent->rb_right;
|
||||
else
|
||||
@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff *t_skb;
|
||||
struct netem_skb_cb *t_last;
|
||||
|
||||
t_skb = netem_rb_to_skb(rb_last(&q->t_root));
|
||||
t_skb = skb_rb_last(&q->t_root);
|
||||
t_last = netem_skb_cb(t_skb);
|
||||
if (!last ||
|
||||
t_last->time_to_send > last->time_to_send) {
|
||||
@ -617,7 +611,7 @@ deliver:
|
||||
if (p) {
|
||||
psched_time_t time_to_send;
|
||||
|
||||
skb = netem_rb_to_skb(p);
|
||||
skb = rb_to_skb(p);
|
||||
|
||||
/* if more time remaining? */
|
||||
time_to_send = netem_skb_cb(skb)->time_to_send;
|
||||
|
Loading…
Reference in New Issue
Block a user