3
3
#ifndef __SKEL_INTERNAL_H
4
4
#define __SKEL_INTERNAL_H
5
5
6
+ #ifdef __KERNEL__
7
+ #include <linux/fdtable.h>
8
+ #include <linux/mm.h>
9
+ #include <linux/mman.h>
10
+ #include <linux/slab.h>
11
+ #include <linux/bpf.h>
12
+ #else
6
13
#include <unistd.h>
7
14
#include <sys/syscall.h>
8
15
#include <sys/mman.h>
16
+ #include <stdlib.h>
17
+ #include "bpf.h"
18
+ #endif
9
19
10
20
#ifndef __NR_bpf
11
21
# if defined(__mips__ ) && defined(_ABIO32 )
25
35
* requested during loader program generation.
26
36
*/
27
37
struct bpf_map_desc {
28
- union {
29
- /* input for the loader prog */
30
- struct {
31
- __aligned_u64 initial_value ;
32
- __u32 max_entries ;
33
- };
34
- /* output of the loader prog */
35
- struct {
36
- int map_fd ;
37
- };
38
- };
38
+ /* output of the loader prog */
39
+ int map_fd ;
40
+ /* input for the loader prog */
41
+ __u32 max_entries ;
42
+ __aligned_u64 initial_value ;
39
43
};
40
44
struct bpf_prog_desc {
41
45
int prog_fd ;
42
46
};
43
47
48
+ enum {
49
+ BPF_SKEL_KERNEL = (1ULL << 0 ),
50
+ };
51
+
44
52
struct bpf_loader_ctx {
45
- size_t sz ;
53
+ __u32 sz ;
54
+ __u32 flags ;
46
55
__u32 log_level ;
47
56
__u32 log_size ;
48
57
__u64 log_buf ;
@@ -57,12 +66,144 @@ struct bpf_load_and_run_opts {
57
66
const char * errstr ;
58
67
};
59
68
69
+ long bpf_sys_bpf (__u32 cmd , void * attr , __u32 attr_size );
70
+
60
71
static inline int skel_sys_bpf (enum bpf_cmd cmd , union bpf_attr * attr ,
61
72
unsigned int size )
62
73
{
74
+ #ifdef __KERNEL__
75
+ return bpf_sys_bpf (cmd , attr , size );
76
+ #else
63
77
return syscall (__NR_bpf , cmd , attr , size );
78
+ #endif
79
+ }
80
+
81
+ #ifdef __KERNEL__
82
+ static inline int close (int fd )
83
+ {
84
+ return close_fd (fd );
85
+ }
86
+
87
+ static inline void * skel_alloc (size_t size )
88
+ {
89
+ struct bpf_loader_ctx * ctx = kzalloc (size , GFP_KERNEL );
90
+
91
+ if (!ctx )
92
+ return NULL ;
93
+ ctx -> flags |= BPF_SKEL_KERNEL ;
94
+ return ctx ;
95
+ }
96
+
97
+ static inline void skel_free (const void * p )
98
+ {
99
+ kfree (p );
100
+ }
101
+
102
+ /* skel->bss/rodata maps are populated the following way:
103
+ *
104
+ * For kernel use:
105
+ * skel_prep_map_data() allocates kernel memory that kernel module can directly access.
106
+ * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
107
+ * The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
108
+ * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
109
+ * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
110
+ * is not nessary.
111
+ *
112
+ * For user space:
113
+ * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
114
+ * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
115
+ * The loader program will perform copy_from_user() from maps.rodata.initial_value.
116
+ * skel_finalize_map_data() remaps bpf array map value from the kernel memory into
117
+ * skel->rodata address.
118
+ *
119
+ * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
120
+ * both kernel and user space. The generated loader program does
121
+ * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
122
+ * depending on bpf_loader_ctx->flags.
123
+ */
124
+ static inline void skel_free_map_data (void * p , __u64 addr , size_t sz )
125
+ {
126
+ if (addr != ~0ULL )
127
+ kvfree (p );
128
+ /* When addr == ~0ULL the 'p' points to
129
+ * ((struct bpf_array *)map)->value. See skel_finalize_map_data.
130
+ */
64
131
}
65
132
133
+ static inline void * skel_prep_map_data (const void * val , size_t mmap_sz , size_t val_sz )
134
+ {
135
+ void * addr ;
136
+
137
+ addr = kvmalloc (val_sz , GFP_KERNEL );
138
+ if (!addr )
139
+ return NULL ;
140
+ memcpy (addr , val , val_sz );
141
+ return addr ;
142
+ }
143
+
144
+ static inline void * skel_finalize_map_data (__u64 * init_val , size_t mmap_sz , int flags , int fd )
145
+ {
146
+ struct bpf_map * map ;
147
+ void * addr = NULL ;
148
+
149
+ kvfree ((void * ) (long ) * init_val );
150
+ * init_val = ~0ULL ;
151
+
152
+ /* At this point bpf_load_and_run() finished without error and
153
+ * 'fd' is a valid bpf map FD. All sanity checks below should succeed.
154
+ */
155
+ map = bpf_map_get (fd );
156
+ if (IS_ERR (map ))
157
+ return NULL ;
158
+ if (map -> map_type != BPF_MAP_TYPE_ARRAY )
159
+ goto out ;
160
+ addr = ((struct bpf_array * )map )-> value ;
161
+ /* the addr stays valid, since FD is not closed */
162
+ out :
163
+ bpf_map_put (map );
164
+ return addr ;
165
+ }
166
+
167
+ #else
168
+
169
+ static inline void * skel_alloc (size_t size )
170
+ {
171
+ return calloc (1 , size );
172
+ }
173
+
174
+ static inline void skel_free (void * p )
175
+ {
176
+ free (p );
177
+ }
178
+
179
+ static inline void skel_free_map_data (void * p , __u64 addr , size_t sz )
180
+ {
181
+ munmap (p , sz );
182
+ }
183
+
184
+ static inline void * skel_prep_map_data (const void * val , size_t mmap_sz , size_t val_sz )
185
+ {
186
+ void * addr ;
187
+
188
+ addr = mmap (NULL , mmap_sz , PROT_READ | PROT_WRITE ,
189
+ MAP_SHARED | MAP_ANONYMOUS , -1 , 0 );
190
+ if (addr == (void * ) -1 )
191
+ return NULL ;
192
+ memcpy (addr , val , val_sz );
193
+ return addr ;
194
+ }
195
+
196
+ static inline void * skel_finalize_map_data (__u64 * init_val , size_t mmap_sz , int flags , int fd )
197
+ {
198
+ void * addr ;
199
+
200
+ addr = mmap ((void * ) (long ) * init_val , mmap_sz , flags , MAP_SHARED | MAP_FIXED , fd , 0 );
201
+ if (addr == (void * ) -1 )
202
+ return NULL ;
203
+ return addr ;
204
+ }
205
+ #endif
206
+
66
207
static inline int skel_closenz (int fd )
67
208
{
68
209
if (fd > 0 )
@@ -136,22 +277,28 @@ static inline int skel_link_create(int prog_fd, int target_fd,
136
277
return skel_sys_bpf (BPF_LINK_CREATE , & attr , attr_sz );
137
278
}
138
279
280
+ #ifdef __KERNEL__
281
+ #define set_err
282
+ #else
283
+ #define set_err err = -errno
284
+ #endif
285
+
139
286
static inline int bpf_load_and_run (struct bpf_load_and_run_opts * opts )
140
287
{
141
288
int map_fd = -1 , prog_fd = -1 , key = 0 , err ;
142
289
union bpf_attr attr ;
143
290
144
- map_fd = skel_map_create (BPF_MAP_TYPE_ARRAY , "__loader.map" , 4 , opts -> data_sz , 1 );
291
+ err = map_fd = skel_map_create (BPF_MAP_TYPE_ARRAY , "__loader.map" , 4 , opts -> data_sz , 1 );
145
292
if (map_fd < 0 ) {
146
293
opts -> errstr = "failed to create loader map" ;
147
- err = - errno ;
294
+ set_err ;
148
295
goto out ;
149
296
}
150
297
151
298
err = skel_map_update_elem (map_fd , & key , opts -> data , 0 );
152
299
if (err < 0 ) {
153
300
opts -> errstr = "failed to update loader map" ;
154
- err = - errno ;
301
+ set_err ;
155
302
goto out ;
156
303
}
157
304
@@ -166,10 +313,10 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
166
313
attr .log_size = opts -> ctx -> log_size ;
167
314
attr .log_buf = opts -> ctx -> log_buf ;
168
315
attr .prog_flags = BPF_F_SLEEPABLE ;
169
- prog_fd = skel_sys_bpf (BPF_PROG_LOAD , & attr , sizeof (attr ));
316
+ err = prog_fd = skel_sys_bpf (BPF_PROG_LOAD , & attr , sizeof (attr ));
170
317
if (prog_fd < 0 ) {
171
318
opts -> errstr = "failed to load loader prog" ;
172
- err = - errno ;
319
+ set_err ;
173
320
goto out ;
174
321
}
175
322
@@ -181,10 +328,12 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
181
328
if (err < 0 || (int )attr .test .retval < 0 ) {
182
329
opts -> errstr = "failed to execute loader prog" ;
183
330
if (err < 0 ) {
184
- err = - errno ;
331
+ set_err ;
185
332
} else {
186
333
err = (int )attr .test .retval ;
334
+ #ifndef __KERNEL__
187
335
errno = - err ;
336
+ #endif
188
337
}
189
338
goto out ;
190
339
}
0 commit comments