Skip to content

Commit 18a4c0e

Browse files
Eric Dumazetdavem330
authored andcommitted
net: add rb_to_skb() and other rb tree helpers
Geeralize private netem_rb_to_skb() TCP rtx queue will soon be converted to rb-tree, so we will need skb_rbtree_walk() helpers. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent f5333f8 commit 18a4c0e

File tree

4 files changed

+37
-36
lines changed

4 files changed

+37
-36
lines changed

include/linux/skbuff.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3158,6 +3158,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
31583158
return __skb_grow(skb, len);
31593159
}
31603160

3161+
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3162+
#define skb_rb_first(root) rb_to_skb(rb_first(root))
3163+
#define skb_rb_last(root) rb_to_skb(rb_last(root))
3164+
#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3165+
#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3166+
31613167
#define skb_queue_walk(queue, skb) \
31623168
for (skb = (queue)->next; \
31633169
skb != (struct sk_buff *)(queue); \
@@ -3172,6 +3178,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
31723178
for (; skb != (struct sk_buff *)(queue); \
31733179
skb = skb->next)
31743180

3181+
#define skb_rbtree_walk(skb, root) \
3182+
for (skb = skb_rb_first(root); skb != NULL; \
3183+
skb = skb_rb_next(skb))
3184+
3185+
#define skb_rbtree_walk_from(skb) \
3186+
for (; skb != NULL; \
3187+
skb = skb_rb_next(skb))
3188+
3189+
#define skb_rbtree_walk_from_safe(skb, tmp) \
3190+
for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3191+
skb = tmp)
3192+
31753193
#define skb_queue_walk_from_safe(queue, skb, tmp) \
31763194
for (tmp = skb->next; \
31773195
skb != (struct sk_buff *)(queue); \

net/ipv4/tcp_fastopen.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -465,17 +465,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
465465
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
466466
{
467467
struct tcp_sock *tp = tcp_sk(sk);
468-
struct rb_node *p;
469-
struct sk_buff *skb;
470468
struct dst_entry *dst;
469+
struct sk_buff *skb;
471470

472471
if (!tp->syn_fastopen)
473472
return;
474473

475474
if (!tp->data_segs_in) {
476-
p = rb_first(&tp->out_of_order_queue);
477-
if (p && !rb_next(p)) {
478-
skb = rb_entry(p, struct sk_buff, rbnode);
475+
skb = skb_rb_first(&tp->out_of_order_queue);
476+
if (skb && !skb_rb_next(skb)) {
479477
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
480478
tcp_fastopen_active_disable(sk);
481479
return;

net/ipv4/tcp_input.c

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -4335,7 +4335,7 @@ static void tcp_ofo_queue(struct sock *sk)
43354335

43364336
p = rb_first(&tp->out_of_order_queue);
43374337
while (p) {
4338-
skb = rb_entry(p, struct sk_buff, rbnode);
4338+
skb = rb_to_skb(p);
43394339
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
43404340
break;
43414341

@@ -4399,7 +4399,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
43994399
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
44004400
{
44014401
struct tcp_sock *tp = tcp_sk(sk);
4402-
struct rb_node **p, *q, *parent;
4402+
struct rb_node **p, *parent;
44034403
struct sk_buff *skb1;
44044404
u32 seq, end_seq;
44054405
bool fragstolen;
@@ -4458,7 +4458,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
44584458
parent = NULL;
44594459
while (*p) {
44604460
parent = *p;
4461-
skb1 = rb_entry(parent, struct sk_buff, rbnode);
4461+
skb1 = rb_to_skb(parent);
44624462
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
44634463
p = &parent->rb_left;
44644464
continue;
@@ -4503,9 +4503,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
45034503

45044504
merge_right:
45054505
/* Remove other segments covered by skb. */
4506-
while ((q = rb_next(&skb->rbnode)) != NULL) {
4507-
skb1 = rb_entry(q, struct sk_buff, rbnode);
4508-
4506+
while ((skb1 = skb_rb_next(skb)) != NULL) {
45094507
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
45104508
break;
45114509
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@@ -4520,7 +4518,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
45204518
tcp_drop(sk, skb1);
45214519
}
45224520
/* If there is no skb after us, we are the last_skb ! */
4523-
if (!q)
4521+
if (!skb1)
45244522
tp->ooo_last_skb = skb;
45254523

45264524
add_sack:
@@ -4706,7 +4704,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
47064704
if (list)
47074705
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
47084706

4709-
return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
4707+
return skb_rb_next(skb);
47104708
}
47114709

47124710
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
@@ -4735,7 +4733,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
47354733

47364734
while (*p) {
47374735
parent = *p;
4738-
skb1 = rb_entry(parent, struct sk_buff, rbnode);
4736+
skb1 = rb_to_skb(parent);
47394737
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
47404738
p = &parent->rb_left;
47414739
else
@@ -4854,26 +4852,19 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
48544852
{
48554853
struct tcp_sock *tp = tcp_sk(sk);
48564854
struct sk_buff *skb, *head;
4857-
struct rb_node *p;
48584855
u32 start, end;
48594856

4860-
p = rb_first(&tp->out_of_order_queue);
4861-
skb = rb_entry_safe(p, struct sk_buff, rbnode);
4857+
skb = skb_rb_first(&tp->out_of_order_queue);
48624858
new_range:
48634859
if (!skb) {
4864-
p = rb_last(&tp->out_of_order_queue);
4865-
/* Note: This is possible p is NULL here. We do not
4866-
* use rb_entry_safe(), as ooo_last_skb is valid only
4867-
* if rbtree is not empty.
4868-
*/
4869-
tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
4860+
tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
48704861
return;
48714862
}
48724863
start = TCP_SKB_CB(skb)->seq;
48734864
end = TCP_SKB_CB(skb)->end_seq;
48744865

48754866
for (head = skb;;) {
4876-
skb = tcp_skb_next(skb, NULL);
4867+
skb = skb_rb_next(skb);
48774868

48784869
/* Range is terminated when we see a gap or when
48794870
* we are at the queue end.
@@ -4916,14 +4907,14 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
49164907
do {
49174908
prev = rb_prev(node);
49184909
rb_erase(node, &tp->out_of_order_queue);
4919-
tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
4910+
tcp_drop(sk, rb_to_skb(node));
49204911
sk_mem_reclaim(sk);
49214912
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
49224913
!tcp_under_memory_pressure(sk))
49234914
break;
49244915
node = prev;
49254916
} while (node);
4926-
tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
4917+
tp->ooo_last_skb = rb_to_skb(prev);
49274918

49284919
/* Reset SACK state. A conforming SACK implementation will
49294920
* do the same at a timeout based retransmit. When a connection

net/sched/sch_netem.c

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -148,12 +148,6 @@ struct netem_skb_cb {
148148
psched_time_t time_to_send;
149149
};
150150

151-
152-
static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
153-
{
154-
return rb_entry(rb, struct sk_buff, rbnode);
155-
}
156-
157151
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
158152
{
159153
/* we assume we can use skb next/prev/tstamp as storage for rb_node */
@@ -364,7 +358,7 @@ static void tfifo_reset(struct Qdisc *sch)
364358
struct rb_node *p = rb_first(&q->t_root);
365359

366360
while (p) {
367-
struct sk_buff *skb = netem_rb_to_skb(p);
361+
struct sk_buff *skb = rb_to_skb(p);
368362

369363
p = rb_next(p);
370364
rb_erase(&skb->rbnode, &q->t_root);
@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
382376
struct sk_buff *skb;
383377

384378
parent = *p;
385-
skb = netem_rb_to_skb(parent);
379+
skb = rb_to_skb(parent);
386380
if (tnext >= netem_skb_cb(skb)->time_to_send)
387381
p = &parent->rb_right;
388382
else
@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
538532
struct sk_buff *t_skb;
539533
struct netem_skb_cb *t_last;
540534

541-
t_skb = netem_rb_to_skb(rb_last(&q->t_root));
535+
t_skb = skb_rb_last(&q->t_root);
542536
t_last = netem_skb_cb(t_skb);
543537
if (!last ||
544538
t_last->time_to_send > last->time_to_send) {
@@ -617,7 +611,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
617611
if (p) {
618612
psched_time_t time_to_send;
619613

620-
skb = netem_rb_to_skb(p);
614+
skb = rb_to_skb(p);
621615

622616
/* if more time remaining? */
623617
time_to_send = netem_skb_cb(skb)->time_to_send;

0 commit comments

Comments
 (0)