|
57 | 57 | */
|
58 | 58 | static const char ip_frag_cache_name[] = "ip4-frags";
|
59 | 59 |
|
| 60 | +/* Use skb->cb to track consecutive/adjacent fragments coming at |
| 61 | + * the end of the queue. Nodes in the rb-tree queue will |
| 62 | + * contain "runs" of one or more adjacent fragments. |
| 63 | + * |
| 64 | + * Invariants: |
| 65 | + * - next_frag is NULL at the tail of a "run"; |
| 66 | + * - the head of a "run" has the sum of all fragment lengths in frag_run_len. |
| 67 | + */ |
| 68 | +struct ipfrag_skb_cb { |
| 69 | + struct inet_skb_parm h; |
| 70 | + struct sk_buff *next_frag; |
| 71 | + int frag_run_len; |
| 72 | +}; |
| 73 | + |
| 74 | +#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) |
| 75 | + |
| 76 | +static void ip4_frag_init_run(struct sk_buff *skb) |
| 77 | +{ |
| 78 | + BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb)); |
| 79 | + |
| 80 | + FRAG_CB(skb)->next_frag = NULL; |
| 81 | + FRAG_CB(skb)->frag_run_len = skb->len; |
| 82 | +} |
| 83 | + |
| 84 | +/* Append skb to the last "run". */ |
| 85 | +static void ip4_frag_append_to_last_run(struct inet_frag_queue *q, |
| 86 | + struct sk_buff *skb) |
| 87 | +{ |
| 88 | + RB_CLEAR_NODE(&skb->rbnode); |
| 89 | + FRAG_CB(skb)->next_frag = NULL; |
| 90 | + |
| 91 | + FRAG_CB(q->last_run_head)->frag_run_len += skb->len; |
| 92 | + FRAG_CB(q->fragments_tail)->next_frag = skb; |
| 93 | + q->fragments_tail = skb; |
| 94 | +} |
| 95 | + |
| 96 | +/* Create a new "run" with the skb. */ |
| 97 | +static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb) |
| 98 | +{ |
| 99 | + if (q->last_run_head) |
| 100 | + rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, |
| 101 | + &q->last_run_head->rbnode.rb_right); |
| 102 | + else |
| 103 | + rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); |
| 104 | + rb_insert_color(&skb->rbnode, &q->rb_fragments); |
| 105 | + |
| 106 | + ip4_frag_init_run(skb); |
| 107 | + q->fragments_tail = skb; |
| 108 | + q->last_run_head = skb; |
| 109 | +} |
| 110 | + |
60 | 111 | /* Describe an entry in the "incomplete datagrams" queue. */
|
61 | 112 | struct ipq {
|
62 | 113 | struct inet_frag_queue q;
|
@@ -654,6 +705,28 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
|
654 | 705 | }
|
655 | 706 | EXPORT_SYMBOL(ip_check_defrag);
|
656 | 707 |
|
| 708 | +unsigned int inet_frag_rbtree_purge(struct rb_root *root) |
| 709 | +{ |
| 710 | + struct rb_node *p = rb_first(root); |
| 711 | + unsigned int sum = 0; |
| 712 | + |
| 713 | + while (p) { |
| 714 | + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); |
| 715 | + |
| 716 | + p = rb_next(p); |
| 717 | + rb_erase(&skb->rbnode, root); |
| 718 | + while (skb) { |
| 719 | + struct sk_buff *next = FRAG_CB(skb)->next_frag; |
| 720 | + |
| 721 | + sum += skb->truesize; |
| 722 | + kfree_skb(skb); |
| 723 | + skb = next; |
| 724 | + } |
| 725 | + } |
| 726 | + return sum; |
| 727 | +} |
| 728 | +EXPORT_SYMBOL(inet_frag_rbtree_purge); |
| 729 | + |
657 | 730 | #ifdef CONFIG_SYSCTL
|
658 | 731 | static int dist_min;
|
659 | 732 |
|
|
0 commit comments