4
4
#include <linux/bpf.h>
5
5
#include <linux/filter.h>
6
6
#include <linux/ftrace.h>
7
+ #include <linux/rbtree_latch.h>
7
8
8
9
/* dummy _ops. The verifier will operate on target program's ops. */
9
10
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
@@ -16,11 +17,12 @@ const struct bpf_prog_ops bpf_extension_prog_ops = {
16
17
#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
17
18
18
19
static struct hlist_head trampoline_table [TRAMPOLINE_TABLE_SIZE ];
20
+ static struct latch_tree_root image_tree __cacheline_aligned ;
19
21
20
- /* serializes access to trampoline_table */
22
+ /* serializes access to trampoline_table and image_tree */
21
23
static DEFINE_MUTEX (trampoline_mutex );
22
24
23
- void * bpf_jit_alloc_exec_page (void )
25
+ static void * bpf_jit_alloc_exec_page (void )
24
26
{
25
27
void * image ;
26
28
@@ -36,6 +38,64 @@ void *bpf_jit_alloc_exec_page(void)
36
38
return image ;
37
39
}
38
40
41
+ static __always_inline bool image_tree_less (struct latch_tree_node * a ,
42
+ struct latch_tree_node * b )
43
+ {
44
+ struct bpf_image * ia = container_of (a , struct bpf_image , tnode );
45
+ struct bpf_image * ib = container_of (b , struct bpf_image , tnode );
46
+
47
+ return ia < ib ;
48
+ }
49
+
50
+ static __always_inline int image_tree_comp (void * addr , struct latch_tree_node * n )
51
+ {
52
+ void * image = container_of (n , struct bpf_image , tnode );
53
+
54
+ if (addr < image )
55
+ return -1 ;
56
+ if (addr >= image + PAGE_SIZE )
57
+ return 1 ;
58
+
59
+ return 0 ;
60
+ }
61
+
62
+ static const struct latch_tree_ops image_tree_ops = {
63
+ .less = image_tree_less ,
64
+ .comp = image_tree_comp ,
65
+ };
66
+
67
+ static void * __bpf_image_alloc (bool lock )
68
+ {
69
+ struct bpf_image * image ;
70
+
71
+ image = bpf_jit_alloc_exec_page ();
72
+ if (!image )
73
+ return NULL ;
74
+
75
+ if (lock )
76
+ mutex_lock (& trampoline_mutex );
77
+ latch_tree_insert (& image -> tnode , & image_tree , & image_tree_ops );
78
+ if (lock )
79
+ mutex_unlock (& trampoline_mutex );
80
+ return image -> data ;
81
+ }
82
+
83
+ void * bpf_image_alloc (void )
84
+ {
85
+ return __bpf_image_alloc (true);
86
+ }
87
+
88
+ bool is_bpf_image_address (unsigned long addr )
89
+ {
90
+ bool ret ;
91
+
92
+ rcu_read_lock ();
93
+ ret = latch_tree_find ((void * ) addr , & image_tree , & image_tree_ops ) != NULL ;
94
+ rcu_read_unlock ();
95
+
96
+ return ret ;
97
+ }
98
+
39
99
struct bpf_trampoline * bpf_trampoline_lookup (u64 key )
40
100
{
41
101
struct bpf_trampoline * tr ;
@@ -56,7 +116,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
56
116
goto out ;
57
117
58
118
/* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
59
- image = bpf_jit_alloc_exec_page ( );
119
+ image = __bpf_image_alloc (false );
60
120
if (!image ) {
61
121
kfree (tr );
62
122
tr = NULL ;
@@ -131,14 +191,14 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
131
191
}
132
192
133
193
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
134
- * bytes on x86. Pick a number to fit into PAGE_SIZE / 2
194
+ * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
135
195
*/
136
196
#define BPF_MAX_TRAMP_PROGS 40
137
197
138
198
static int bpf_trampoline_update (struct bpf_trampoline * tr )
139
199
{
140
- void * old_image = tr -> image + ((tr -> selector + 1 ) & 1 ) * PAGE_SIZE /2 ;
141
- void * new_image = tr -> image + (tr -> selector & 1 ) * PAGE_SIZE /2 ;
200
+ void * old_image = tr -> image + ((tr -> selector + 1 ) & 1 ) * BPF_IMAGE_SIZE /2 ;
201
+ void * new_image = tr -> image + (tr -> selector & 1 ) * BPF_IMAGE_SIZE /2 ;
142
202
struct bpf_prog * progs_to_run [BPF_MAX_TRAMP_PROGS ];
143
203
int fentry_cnt = tr -> progs_cnt [BPF_TRAMP_FENTRY ];
144
204
int fexit_cnt = tr -> progs_cnt [BPF_TRAMP_FEXIT ];
@@ -174,7 +234,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
174
234
*/
175
235
synchronize_rcu_tasks ();
176
236
177
- err = arch_prepare_bpf_trampoline (new_image , new_image + PAGE_SIZE / 2 ,
237
+ err = arch_prepare_bpf_trampoline (new_image , new_image + BPF_IMAGE_SIZE / 2 ,
178
238
& tr -> func .model , flags ,
179
239
fentry , fentry_cnt ,
180
240
fexit , fexit_cnt ,
@@ -284,6 +344,8 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
284
344
285
345
void bpf_trampoline_put (struct bpf_trampoline * tr )
286
346
{
347
+ struct bpf_image * image ;
348
+
287
349
if (!tr )
288
350
return ;
289
351
mutex_lock (& trampoline_mutex );
@@ -294,9 +356,11 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
294
356
goto out ;
295
357
if (WARN_ON_ONCE (!hlist_empty (& tr -> progs_hlist [BPF_TRAMP_FEXIT ])))
296
358
goto out ;
359
+ image = container_of (tr -> image , struct bpf_image , data );
360
+ latch_tree_erase (& image -> tnode , & image_tree , & image_tree_ops );
297
361
/* wait for tasks to get out of trampoline before freeing it */
298
362
synchronize_rcu_tasks ();
299
- bpf_jit_free_exec (tr -> image );
363
+ bpf_jit_free_exec (image );
300
364
hlist_del (& tr -> hlist );
301
365
kfree (tr );
302
366
out :
0 commit comments