|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ |
| 3 | +#include <linux/interval_tree_generic.h> |
| 4 | +#include <linux/slab.h> |
| 5 | +#include <linux/bpf_mem_alloc.h> |
| 6 | +#include <linux/bpf.h> |
| 7 | +#include "range_tree.h" |
| 8 | + |
| 9 | +/* |
| 10 | + * struct range_tree is a data structure used to allocate contiguous memory |
| 11 | + * ranges in bpf arena. It's a large bitmap. The contiguous sequence of bits is |
| 12 | + * represented by struct range_node or 'rn' for short. |
| 13 | + * rn->rn_rbnode links it into an interval tree while |
| 14 | + * rn->rb_range_size links it into a second rbtree sorted by size of the range. |
| 15 | + * __find_range() performs binary search and best fit algorithm to find the |
| 16 | + * range less or equal requested size. |
| 17 | + * range_tree_clear/set() clears or sets a range of bits in this bitmap. The |
| 18 | + * adjacent ranges are merged or split at the same time. |
| 19 | + * |
| 20 | + * The split/merge logic is based/borrowed from XFS's xbitmap32 added |
| 21 | + * in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree"). |
| 22 | + * |
| 23 | + * The implementation relies on external lock to protect rbtree-s. |
| 24 | + * The alloc/free of range_node-s is done via bpf_mem_alloc. |
| 25 | + * |
| 26 | + * bpf arena is using range_tree to represent unallocated slots. |
| 27 | + * At init time: |
| 28 | + * range_tree_set(rt, 0, max); |
| 29 | + * Then: |
| 30 | + * start = range_tree_find(rt, len); |
| 31 | + * if (start >= 0) |
| 32 | + * range_tree_clear(rt, start, len); |
| 33 | + * to find free range and mark slots as allocated and later: |
| 34 | + * range_tree_set(rt, start, len); |
| 35 | + * to mark as unallocated after use. |
| 36 | + */ |
| 37 | +struct range_node { |
| 38 | + struct rb_node rn_rbnode; |
| 39 | + struct rb_node rb_range_size; |
| 40 | + u32 rn_start; |
| 41 | + u32 rn_last; /* inclusive */ |
| 42 | + u32 __rn_subtree_last; |
| 43 | +}; |
| 44 | + |
| 45 | +static struct range_node *rb_to_range_node(struct rb_node *rb) |
| 46 | +{ |
| 47 | + return rb_entry(rb, struct range_node, rb_range_size); |
| 48 | +} |
| 49 | + |
| 50 | +static u32 rn_size(struct range_node *rn) |
| 51 | +{ |
| 52 | + return rn->rn_last - rn->rn_start + 1; |
| 53 | +} |
| 54 | + |
| 55 | +/* Find range that fits best to requested size */ |
| 56 | +static inline struct range_node *__find_range(struct range_tree *rt, u32 len) |
| 57 | +{ |
| 58 | + struct rb_node *rb = rt->range_size_root.rb_root.rb_node; |
| 59 | + struct range_node *best = NULL; |
| 60 | + |
| 61 | + while (rb) { |
| 62 | + struct range_node *rn = rb_to_range_node(rb); |
| 63 | + |
| 64 | + if (len <= rn_size(rn)) { |
| 65 | + best = rn; |
| 66 | + rb = rb->rb_right; |
| 67 | + } else { |
| 68 | + rb = rb->rb_left; |
| 69 | + } |
| 70 | + } |
| 71 | + |
| 72 | + return best; |
| 73 | +} |
| 74 | + |
| 75 | +s64 range_tree_find(struct range_tree *rt, u32 len) |
| 76 | +{ |
| 77 | + struct range_node *rn; |
| 78 | + |
| 79 | + rn = __find_range(rt, len); |
| 80 | + if (!rn) |
| 81 | + return -ENOENT; |
| 82 | + return rn->rn_start; |
| 83 | +} |
| 84 | + |
| 85 | +/* Insert the range into rbtree sorted by the range size */ |
| 86 | +static inline void __range_size_insert(struct range_node *rn, |
| 87 | + struct rb_root_cached *root) |
| 88 | +{ |
| 89 | + struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; |
| 90 | + u64 size = rn_size(rn); |
| 91 | + bool leftmost = true; |
| 92 | + |
| 93 | + while (*link) { |
| 94 | + rb = *link; |
| 95 | + if (size > rn_size(rb_to_range_node(rb))) { |
| 96 | + link = &rb->rb_left; |
| 97 | + } else { |
| 98 | + link = &rb->rb_right; |
| 99 | + leftmost = false; |
| 100 | + } |
| 101 | + } |
| 102 | + |
| 103 | + rb_link_node(&rn->rb_range_size, rb, link); |
| 104 | + rb_insert_color_cached(&rn->rb_range_size, root, leftmost); |
| 105 | +} |
| 106 | + |
| 107 | +#define START(node) ((node)->rn_start) |
| 108 | +#define LAST(node) ((node)->rn_last) |
| 109 | + |
| 110 | +INTERVAL_TREE_DEFINE(struct range_node, rn_rbnode, u32, |
| 111 | + __rn_subtree_last, START, LAST, |
| 112 | + static inline __maybe_unused, |
| 113 | + __range_it) |
| 114 | + |
| 115 | +static inline __maybe_unused void |
| 116 | +range_it_insert(struct range_node *rn, struct range_tree *rt) |
| 117 | +{ |
| 118 | + __range_size_insert(rn, &rt->range_size_root); |
| 119 | + __range_it_insert(rn, &rt->it_root); |
| 120 | +} |
| 121 | + |
| 122 | +static inline __maybe_unused void |
| 123 | +range_it_remove(struct range_node *rn, struct range_tree *rt) |
| 124 | +{ |
| 125 | + rb_erase_cached(&rn->rb_range_size, &rt->range_size_root); |
| 126 | + RB_CLEAR_NODE(&rn->rb_range_size); |
| 127 | + __range_it_remove(rn, &rt->it_root); |
| 128 | +} |
| 129 | + |
| 130 | +static inline __maybe_unused struct range_node * |
| 131 | +range_it_iter_first(struct range_tree *rt, u32 start, u32 last) |
| 132 | +{ |
| 133 | + return __range_it_iter_first(&rt->it_root, start, last); |
| 134 | +} |
| 135 | + |
| 136 | +/* Clear the range in this range tree */ |
| 137 | +int range_tree_clear(struct range_tree *rt, u32 start, u32 len) |
| 138 | +{ |
| 139 | + u32 last = start + len - 1; |
| 140 | + struct range_node *new_rn; |
| 141 | + struct range_node *rn; |
| 142 | + |
| 143 | + while ((rn = range_it_iter_first(rt, start, last))) { |
| 144 | + if (rn->rn_start < start && rn->rn_last > last) { |
| 145 | + u32 old_last = rn->rn_last; |
| 146 | + |
| 147 | + /* Overlaps with the entire clearing range */ |
| 148 | + range_it_remove(rn, rt); |
| 149 | + rn->rn_last = start - 1; |
| 150 | + range_it_insert(rn, rt); |
| 151 | + |
| 152 | + /* Add a range */ |
| 153 | + new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node)); |
| 154 | + if (!new_rn) |
| 155 | + return -ENOMEM; |
| 156 | + new_rn->rn_start = last + 1; |
| 157 | + new_rn->rn_last = old_last; |
| 158 | + range_it_insert(new_rn, rt); |
| 159 | + } else if (rn->rn_start < start) { |
| 160 | + /* Overlaps with the left side of the clearing range */ |
| 161 | + range_it_remove(rn, rt); |
| 162 | + rn->rn_last = start - 1; |
| 163 | + range_it_insert(rn, rt); |
| 164 | + } else if (rn->rn_last > last) { |
| 165 | + /* Overlaps with the right side of the clearing range */ |
| 166 | + range_it_remove(rn, rt); |
| 167 | + rn->rn_start = last + 1; |
| 168 | + range_it_insert(rn, rt); |
| 169 | + break; |
| 170 | + } else { |
| 171 | + /* in the middle of the clearing range */ |
| 172 | + range_it_remove(rn, rt); |
| 173 | + bpf_mem_free(&bpf_global_ma, rn); |
| 174 | + } |
| 175 | + } |
| 176 | + return 0; |
| 177 | +} |
| 178 | + |
| 179 | +/* Is the whole range set ? */ |
| 180 | +int is_range_tree_set(struct range_tree *rt, u32 start, u32 len) |
| 181 | +{ |
| 182 | + u32 last = start + len - 1; |
| 183 | + struct range_node *left; |
| 184 | + |
| 185 | + /* Is this whole range set ? */ |
| 186 | + left = range_it_iter_first(rt, start, last); |
| 187 | + if (left && left->rn_start <= start && left->rn_last >= last) |
| 188 | + return 0; |
| 189 | + return -ESRCH; |
| 190 | +} |
| 191 | + |
| 192 | +/* Set the range in this range tree */ |
| 193 | +int range_tree_set(struct range_tree *rt, u32 start, u32 len) |
| 194 | +{ |
| 195 | + u32 last = start + len - 1; |
| 196 | + struct range_node *right; |
| 197 | + struct range_node *left; |
| 198 | + int err; |
| 199 | + |
| 200 | + /* Is this whole range already set ? */ |
| 201 | + left = range_it_iter_first(rt, start, last); |
| 202 | + if (left && left->rn_start <= start && left->rn_last >= last) |
| 203 | + return 0; |
| 204 | + |
| 205 | + /* Clear out everything in the range we want to set. */ |
| 206 | + err = range_tree_clear(rt, start, len); |
| 207 | + if (err) |
| 208 | + return err; |
| 209 | + |
| 210 | + /* Do we have a left-adjacent range ? */ |
| 211 | + left = range_it_iter_first(rt, start - 1, start - 1); |
| 212 | + if (left && left->rn_last + 1 != start) |
| 213 | + return -EFAULT; |
| 214 | + |
| 215 | + /* Do we have a right-adjacent range ? */ |
| 216 | + right = range_it_iter_first(rt, last + 1, last + 1); |
| 217 | + if (right && right->rn_start != last + 1) |
| 218 | + return -EFAULT; |
| 219 | + |
| 220 | + if (left && right) { |
| 221 | + /* Combine left and right adjacent ranges */ |
| 222 | + range_it_remove(left, rt); |
| 223 | + range_it_remove(right, rt); |
| 224 | + left->rn_last = right->rn_last; |
| 225 | + range_it_insert(left, rt); |
| 226 | + bpf_mem_free(&bpf_global_ma, right); |
| 227 | + } else if (left) { |
| 228 | + /* Combine with the left range */ |
| 229 | + range_it_remove(left, rt); |
| 230 | + left->rn_last = last; |
| 231 | + range_it_insert(left, rt); |
| 232 | + } else if (right) { |
| 233 | + /* Combine with the right range */ |
| 234 | + range_it_remove(right, rt); |
| 235 | + right->rn_start = start; |
| 236 | + range_it_insert(right, rt); |
| 237 | + } else { |
| 238 | + left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node)); |
| 239 | + if (!left) |
| 240 | + return -ENOMEM; |
| 241 | + left->rn_start = start; |
| 242 | + left->rn_last = last; |
| 243 | + range_it_insert(left, rt); |
| 244 | + } |
| 245 | + return 0; |
| 246 | +} |
| 247 | + |
| 248 | +void range_tree_destroy(struct range_tree *rt) |
| 249 | +{ |
| 250 | + struct range_node *rn; |
| 251 | + |
| 252 | + while ((rn = range_it_iter_first(rt, 0, -1U))) { |
| 253 | + range_it_remove(rn, rt); |
| 254 | + bpf_mem_free(&bpf_global_ma, rn); |
| 255 | + } |
| 256 | +} |
| 257 | + |
| 258 | +void range_tree_init(struct range_tree *rt) |
| 259 | +{ |
| 260 | + rt->it_root = RB_ROOT_CACHED; |
| 261 | + rt->range_size_root = RB_ROOT_CACHED; |
| 262 | +} |
0 commit comments