Skip to content

Commit b795379

Browse files
Alexei Starovoitovanakryiko
authored andcommitted
bpf: Introduce range_tree data structure and use it in bpf arena
Introduce range_tree data structure and use it in bpf arena to track ranges of allocated pages. range_tree is a large bitmap that is implemented as interval tree plus rbtree. The contiguous sequence of bits represents unallocated pages. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Acked-by: Kumar Kartikeya Dwivedi <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 8714381 commit b795379

File tree

4 files changed

+304
-15
lines changed

4 files changed

+304
-15
lines changed

kernel/bpf/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ obj-$(CONFIG_BPF_SYSCALL) += disasm.o mprog.o
1616
obj-$(CONFIG_BPF_JIT) += trampoline.o
1717
obj-$(CONFIG_BPF_SYSCALL) += btf.o memalloc.o
1818
ifeq ($(CONFIG_MMU)$(CONFIG_64BIT),yy)
19-
obj-$(CONFIG_BPF_SYSCALL) += arena.o
19+
obj-$(CONFIG_BPF_SYSCALL) += arena.o range_tree.o
2020
endif
2121
obj-$(CONFIG_BPF_JIT) += dispatcher.o
2222
ifeq ($(CONFIG_NET),y)

kernel/bpf/arena.c

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <linux/btf_ids.h>
77
#include <linux/vmalloc.h>
88
#include <linux/pagemap.h>
9+
#include "range_tree.h"
910

1011
/*
1112
* bpf_arena is a sparsely populated shared memory region between bpf program and
@@ -45,7 +46,7 @@ struct bpf_arena {
4546
u64 user_vm_start;
4647
u64 user_vm_end;
4748
struct vm_struct *kern_vm;
48-
struct maple_tree mt;
49+
struct range_tree rt;
4950
struct list_head vma_list;
5051
struct mutex lock;
5152
};
@@ -132,7 +133,8 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
132133

133134
INIT_LIST_HEAD(&arena->vma_list);
134135
bpf_map_init_from_attr(&arena->map, attr);
135-
mt_init_flags(&arena->mt, MT_FLAGS_ALLOC_RANGE);
136+
range_tree_init(&arena->rt);
137+
range_tree_set(&arena->rt, 0, attr->max_entries);
136138
mutex_init(&arena->lock);
137139

138140
return &arena->map;
@@ -183,7 +185,7 @@ static void arena_map_free(struct bpf_map *map)
183185
apply_to_existing_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena),
184186
KERN_VM_SZ - GUARD_SZ, existing_page_cb, NULL);
185187
free_vm_area(arena->kern_vm);
186-
mtree_destroy(&arena->mt);
188+
range_tree_destroy(&arena->rt);
187189
bpf_map_area_free(arena);
188190
}
189191

@@ -274,20 +276,20 @@ static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
274276
/* User space requested to segfault when page is not allocated by bpf prog */
275277
return VM_FAULT_SIGSEGV;
276278

277-
ret = mtree_insert(&arena->mt, vmf->pgoff, MT_ENTRY, GFP_KERNEL);
279+
ret = range_tree_clear(&arena->rt, vmf->pgoff, 1);
278280
if (ret)
279281
return VM_FAULT_SIGSEGV;
280282

281283
/* Account into memcg of the process that created bpf_arena */
282284
ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page);
283285
if (ret) {
284-
mtree_erase(&arena->mt, vmf->pgoff);
286+
range_tree_set(&arena->rt, vmf->pgoff, 1);
285287
return VM_FAULT_SIGSEGV;
286288
}
287289

288290
ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page);
289291
if (ret) {
290-
mtree_erase(&arena->mt, vmf->pgoff);
292+
range_tree_set(&arena->rt, vmf->pgoff, 1);
291293
__free_page(page);
292294
return VM_FAULT_SIGSEGV;
293295
}
@@ -444,12 +446,16 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
444446

445447
guard(mutex)(&arena->lock);
446448

447-
if (uaddr)
448-
ret = mtree_insert_range(&arena->mt, pgoff, pgoff + page_cnt - 1,
449-
MT_ENTRY, GFP_KERNEL);
450-
else
451-
ret = mtree_alloc_range(&arena->mt, &pgoff, MT_ENTRY,
452-
page_cnt, 0, page_cnt_max - 1, GFP_KERNEL);
449+
if (uaddr) {
450+
ret = is_range_tree_set(&arena->rt, pgoff, page_cnt);
451+
if (ret)
452+
goto out_free_pages;
453+
ret = range_tree_clear(&arena->rt, pgoff, page_cnt);
454+
} else {
455+
ret = pgoff = range_tree_find(&arena->rt, page_cnt);
456+
if (pgoff >= 0)
457+
ret = range_tree_clear(&arena->rt, pgoff, page_cnt);
458+
}
453459
if (ret)
454460
goto out_free_pages;
455461

@@ -476,7 +482,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
476482
kvfree(pages);
477483
return clear_lo32(arena->user_vm_start) + uaddr32;
478484
out:
479-
mtree_erase(&arena->mt, pgoff);
485+
range_tree_set(&arena->rt, pgoff, page_cnt);
480486
out_free_pages:
481487
kvfree(pages);
482488
return 0;
@@ -516,7 +522,7 @@ static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
516522

517523
pgoff = compute_pgoff(arena, uaddr);
518524
/* clear range */
519-
mtree_store_range(&arena->mt, pgoff, pgoff + page_cnt - 1, NULL, GFP_KERNEL);
525+
range_tree_set(&arena->rt, pgoff, page_cnt);
520526

521527
if (page_cnt > 1)
522528
/* bulk zap if multiple pages being freed */

kernel/bpf/range_tree.c

Lines changed: 262 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,262 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3+
#include <linux/interval_tree_generic.h>
4+
#include <linux/slab.h>
5+
#include <linux/bpf_mem_alloc.h>
6+
#include <linux/bpf.h>
7+
#include "range_tree.h"
8+
9+
/*
10+
* struct range_tree is a data structure used to allocate contiguous memory
11+
* ranges in bpf arena. It's a large bitmap. The contiguous sequence of bits is
12+
* represented by struct range_node or 'rn' for short.
13+
* rn->rn_rbnode links it into an interval tree while
14+
* rn->rb_range_size links it into a second rbtree sorted by size of the range.
15+
* __find_range() performs binary search and best fit algorithm to find the
16+
* range less or equal requested size.
17+
* range_tree_clear/set() clears or sets a range of bits in this bitmap. The
18+
* adjacent ranges are merged or split at the same time.
19+
*
20+
* The split/merge logic is based/borrowed from XFS's xbitmap32 added
21+
* in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree").
22+
*
23+
* The implementation relies on external lock to protect rbtree-s.
24+
* The alloc/free of range_node-s is done via bpf_mem_alloc.
25+
*
26+
* bpf arena is using range_tree to represent unallocated slots.
27+
* At init time:
28+
* range_tree_set(rt, 0, max);
29+
* Then:
30+
* start = range_tree_find(rt, len);
31+
* if (start >= 0)
32+
* range_tree_clear(rt, start, len);
33+
* to find free range and mark slots as allocated and later:
34+
* range_tree_set(rt, start, len);
35+
* to mark as unallocated after use.
36+
*/
37+
struct range_node {
38+
struct rb_node rn_rbnode;
39+
struct rb_node rb_range_size;
40+
u32 rn_start;
41+
u32 rn_last; /* inclusive */
42+
u32 __rn_subtree_last;
43+
};
44+
45+
static struct range_node *rb_to_range_node(struct rb_node *rb)
46+
{
47+
return rb_entry(rb, struct range_node, rb_range_size);
48+
}
49+
50+
static u32 rn_size(struct range_node *rn)
51+
{
52+
return rn->rn_last - rn->rn_start + 1;
53+
}
54+
55+
/* Find range that fits best to requested size */
56+
static inline struct range_node *__find_range(struct range_tree *rt, u32 len)
57+
{
58+
struct rb_node *rb = rt->range_size_root.rb_root.rb_node;
59+
struct range_node *best = NULL;
60+
61+
while (rb) {
62+
struct range_node *rn = rb_to_range_node(rb);
63+
64+
if (len <= rn_size(rn)) {
65+
best = rn;
66+
rb = rb->rb_right;
67+
} else {
68+
rb = rb->rb_left;
69+
}
70+
}
71+
72+
return best;
73+
}
74+
75+
s64 range_tree_find(struct range_tree *rt, u32 len)
76+
{
77+
struct range_node *rn;
78+
79+
rn = __find_range(rt, len);
80+
if (!rn)
81+
return -ENOENT;
82+
return rn->rn_start;
83+
}
84+
85+
/* Insert the range into rbtree sorted by the range size */
86+
static inline void __range_size_insert(struct range_node *rn,
87+
struct rb_root_cached *root)
88+
{
89+
struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
90+
u64 size = rn_size(rn);
91+
bool leftmost = true;
92+
93+
while (*link) {
94+
rb = *link;
95+
if (size > rn_size(rb_to_range_node(rb))) {
96+
link = &rb->rb_left;
97+
} else {
98+
link = &rb->rb_right;
99+
leftmost = false;
100+
}
101+
}
102+
103+
rb_link_node(&rn->rb_range_size, rb, link);
104+
rb_insert_color_cached(&rn->rb_range_size, root, leftmost);
105+
}
106+
107+
#define START(node) ((node)->rn_start)
108+
#define LAST(node) ((node)->rn_last)
109+
110+
INTERVAL_TREE_DEFINE(struct range_node, rn_rbnode, u32,
111+
__rn_subtree_last, START, LAST,
112+
static inline __maybe_unused,
113+
__range_it)
114+
115+
static inline __maybe_unused void
116+
range_it_insert(struct range_node *rn, struct range_tree *rt)
117+
{
118+
__range_size_insert(rn, &rt->range_size_root);
119+
__range_it_insert(rn, &rt->it_root);
120+
}
121+
122+
static inline __maybe_unused void
123+
range_it_remove(struct range_node *rn, struct range_tree *rt)
124+
{
125+
rb_erase_cached(&rn->rb_range_size, &rt->range_size_root);
126+
RB_CLEAR_NODE(&rn->rb_range_size);
127+
__range_it_remove(rn, &rt->it_root);
128+
}
129+
130+
static inline __maybe_unused struct range_node *
131+
range_it_iter_first(struct range_tree *rt, u32 start, u32 last)
132+
{
133+
return __range_it_iter_first(&rt->it_root, start, last);
134+
}
135+
136+
/* Clear the range in this range tree */
137+
int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
138+
{
139+
u32 last = start + len - 1;
140+
struct range_node *new_rn;
141+
struct range_node *rn;
142+
143+
while ((rn = range_it_iter_first(rt, start, last))) {
144+
if (rn->rn_start < start && rn->rn_last > last) {
145+
u32 old_last = rn->rn_last;
146+
147+
/* Overlaps with the entire clearing range */
148+
range_it_remove(rn, rt);
149+
rn->rn_last = start - 1;
150+
range_it_insert(rn, rt);
151+
152+
/* Add a range */
153+
new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
154+
if (!new_rn)
155+
return -ENOMEM;
156+
new_rn->rn_start = last + 1;
157+
new_rn->rn_last = old_last;
158+
range_it_insert(new_rn, rt);
159+
} else if (rn->rn_start < start) {
160+
/* Overlaps with the left side of the clearing range */
161+
range_it_remove(rn, rt);
162+
rn->rn_last = start - 1;
163+
range_it_insert(rn, rt);
164+
} else if (rn->rn_last > last) {
165+
/* Overlaps with the right side of the clearing range */
166+
range_it_remove(rn, rt);
167+
rn->rn_start = last + 1;
168+
range_it_insert(rn, rt);
169+
break;
170+
} else {
171+
/* in the middle of the clearing range */
172+
range_it_remove(rn, rt);
173+
bpf_mem_free(&bpf_global_ma, rn);
174+
}
175+
}
176+
return 0;
177+
}
178+
179+
/* Is the whole range set ? */
180+
int is_range_tree_set(struct range_tree *rt, u32 start, u32 len)
181+
{
182+
u32 last = start + len - 1;
183+
struct range_node *left;
184+
185+
/* Is this whole range set ? */
186+
left = range_it_iter_first(rt, start, last);
187+
if (left && left->rn_start <= start && left->rn_last >= last)
188+
return 0;
189+
return -ESRCH;
190+
}
191+
192+
/* Set the range in this range tree */
193+
int range_tree_set(struct range_tree *rt, u32 start, u32 len)
194+
{
195+
u32 last = start + len - 1;
196+
struct range_node *right;
197+
struct range_node *left;
198+
int err;
199+
200+
/* Is this whole range already set ? */
201+
left = range_it_iter_first(rt, start, last);
202+
if (left && left->rn_start <= start && left->rn_last >= last)
203+
return 0;
204+
205+
/* Clear out everything in the range we want to set. */
206+
err = range_tree_clear(rt, start, len);
207+
if (err)
208+
return err;
209+
210+
/* Do we have a left-adjacent range ? */
211+
left = range_it_iter_first(rt, start - 1, start - 1);
212+
if (left && left->rn_last + 1 != start)
213+
return -EFAULT;
214+
215+
/* Do we have a right-adjacent range ? */
216+
right = range_it_iter_first(rt, last + 1, last + 1);
217+
if (right && right->rn_start != last + 1)
218+
return -EFAULT;
219+
220+
if (left && right) {
221+
/* Combine left and right adjacent ranges */
222+
range_it_remove(left, rt);
223+
range_it_remove(right, rt);
224+
left->rn_last = right->rn_last;
225+
range_it_insert(left, rt);
226+
bpf_mem_free(&bpf_global_ma, right);
227+
} else if (left) {
228+
/* Combine with the left range */
229+
range_it_remove(left, rt);
230+
left->rn_last = last;
231+
range_it_insert(left, rt);
232+
} else if (right) {
233+
/* Combine with the right range */
234+
range_it_remove(right, rt);
235+
right->rn_start = start;
236+
range_it_insert(right, rt);
237+
} else {
238+
left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
239+
if (!left)
240+
return -ENOMEM;
241+
left->rn_start = start;
242+
left->rn_last = last;
243+
range_it_insert(left, rt);
244+
}
245+
return 0;
246+
}
247+
248+
void range_tree_destroy(struct range_tree *rt)
249+
{
250+
struct range_node *rn;
251+
252+
while ((rn = range_it_iter_first(rt, 0, -1U))) {
253+
range_it_remove(rn, rt);
254+
bpf_mem_free(&bpf_global_ma, rn);
255+
}
256+
}
257+
258+
void range_tree_init(struct range_tree *rt)
259+
{
260+
rt->it_root = RB_ROOT_CACHED;
261+
rt->range_size_root = RB_ROOT_CACHED;
262+
}

kernel/bpf/range_tree.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3+
#ifndef _RANGE_TREE_H
4+
#define _RANGE_TREE_H 1
5+
6+
struct range_tree {
7+
/* root of interval tree */
8+
struct rb_root_cached it_root;
9+
/* root of rbtree of interval sizes */
10+
struct rb_root_cached range_size_root;
11+
};
12+
13+
void range_tree_init(struct range_tree *rt);
14+
void range_tree_destroy(struct range_tree *rt);
15+
16+
int range_tree_clear(struct range_tree *rt, u32 start, u32 len);
17+
int range_tree_set(struct range_tree *rt, u32 start, u32 len);
18+
int is_range_tree_set(struct range_tree *rt, u32 start, u32 len);
19+
s64 range_tree_find(struct range_tree *rt, u32 len);
20+
21+
#endif

0 commit comments

Comments
 (0)