Skip to content

Commit 6fe65f1

Browse files
Alexei Starovoitovborkmann
authored andcommitted
libbpf: Prepare light skeleton for the kernel.
Prepare light skeleton to be used in the kernel module and in the user space. The look and feel of lskel.h is mostly the same with the difference that for user space the skel->rodata is the same pointer before and after skel_load operation, while in the kernel the skel->rodata after skel_open and the skel->rodata after skel_load are different pointers. Typical usage of skeleton remains the same for kernel and user space: skel = my_bpf__open(); skel->rodata->my_global_var = init_val; err = my_bpf__load(skel); err = my_bpf__attach(skel); // access skel->rodata->my_global_var; // access skel->bss->another_var; Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Yonghong Song <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent b1d18a7 commit 6fe65f1

File tree

2 files changed

+179
-21
lines changed

2 files changed

+179
-21
lines changed

tools/lib/bpf/gen_loader.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1043,18 +1043,27 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
10431043
value = add_data(gen, pvalue, value_size);
10441044
key = add_data(gen, &zero, sizeof(zero));
10451045

1046-
/* if (map_desc[map_idx].initial_value)
1047-
* copy_from_user(value, initial_value, value_size);
1046+
/* if (map_desc[map_idx].initial_value) {
1047+
* if (ctx->flags & BPF_SKEL_KERNEL)
1048+
* bpf_probe_read_kernel(value, value_size, initial_value);
1049+
* else
1050+
* bpf_copy_from_user(value, value_size, initial_value);
1051+
* }
10481052
*/
10491053
emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
10501054
sizeof(struct bpf_loader_ctx) +
10511055
sizeof(struct bpf_map_desc) * map_idx +
10521056
offsetof(struct bpf_map_desc, initial_value)));
1053-
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4));
1057+
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
10541058
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
10551059
0, 0, 0, value));
10561060
emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
1061+
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
1062+
offsetof(struct bpf_loader_ctx, flags)));
1063+
emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
10571064
emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
1065+
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
1066+
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
10581067

10591068
map_update_attr = add_data(gen, &attr, attr_size);
10601069
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,

tools/lib/bpf/skel_internal.h

Lines changed: 167 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,19 @@
33
#ifndef __SKEL_INTERNAL_H
44
#define __SKEL_INTERNAL_H
55

6+
#ifdef __KERNEL__
7+
#include <linux/fdtable.h>
8+
#include <linux/mm.h>
9+
#include <linux/mman.h>
10+
#include <linux/slab.h>
11+
#include <linux/bpf.h>
12+
#else
613
#include <unistd.h>
714
#include <sys/syscall.h>
815
#include <sys/mman.h>
16+
#include <stdlib.h>
17+
#include "bpf.h"
18+
#endif
919

1020
#ifndef __NR_bpf
1121
# if defined(__mips__) && defined(_ABIO32)
@@ -25,24 +35,23 @@
2535
* requested during loader program generation.
2636
*/
2737
struct bpf_map_desc {
28-
union {
29-
/* input for the loader prog */
30-
struct {
31-
__aligned_u64 initial_value;
32-
__u32 max_entries;
33-
};
34-
/* output of the loader prog */
35-
struct {
36-
int map_fd;
37-
};
38-
};
38+
/* output of the loader prog */
39+
int map_fd;
40+
/* input for the loader prog */
41+
__u32 max_entries;
42+
__aligned_u64 initial_value;
3943
};
4044
struct bpf_prog_desc {
4145
int prog_fd;
4246
};
4347

48+
enum {
49+
BPF_SKEL_KERNEL = (1ULL << 0),
50+
};
51+
4452
struct bpf_loader_ctx {
45-
size_t sz;
53+
__u32 sz;
54+
__u32 flags;
4655
__u32 log_level;
4756
__u32 log_size;
4857
__u64 log_buf;
@@ -57,12 +66,144 @@ struct bpf_load_and_run_opts {
5766
const char *errstr;
5867
};
5968

69+
long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
70+
6071
static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
6172
unsigned int size)
6273
{
74+
#ifdef __KERNEL__
75+
return bpf_sys_bpf(cmd, attr, size);
76+
#else
6377
return syscall(__NR_bpf, cmd, attr, size);
78+
#endif
79+
}
80+
81+
#ifdef __KERNEL__
82+
static inline int close(int fd)
83+
{
84+
return close_fd(fd);
85+
}
86+
87+
static inline void *skel_alloc(size_t size)
88+
{
89+
struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL);
90+
91+
if (!ctx)
92+
return NULL;
93+
ctx->flags |= BPF_SKEL_KERNEL;
94+
return ctx;
95+
}
96+
97+
static inline void skel_free(const void *p)
98+
{
99+
kfree(p);
100+
}
101+
102+
/* skel->bss/rodata maps are populated the following way:
103+
*
104+
* For kernel use:
105+
* skel_prep_map_data() allocates kernel memory that kernel module can directly access.
106+
* Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
107+
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
108+
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
109+
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
110+
* is not nessary.
111+
*
112+
* For user space:
113+
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
114+
* Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
115+
* The loader program will perform copy_from_user() from maps.rodata.initial_value.
116+
* skel_finalize_map_data() remaps bpf array map value from the kernel memory into
117+
* skel->rodata address.
118+
*
119+
* The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
120+
* both kernel and user space. The generated loader program does
121+
* either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
122+
* depending on bpf_loader_ctx->flags.
123+
*/
124+
static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
125+
{
126+
if (addr != ~0ULL)
127+
kvfree(p);
128+
/* When addr == ~0ULL the 'p' points to
129+
* ((struct bpf_array *)map)->value. See skel_finalize_map_data.
130+
*/
64131
}
65132

133+
static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
134+
{
135+
void *addr;
136+
137+
addr = kvmalloc(val_sz, GFP_KERNEL);
138+
if (!addr)
139+
return NULL;
140+
memcpy(addr, val, val_sz);
141+
return addr;
142+
}
143+
144+
static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
145+
{
146+
struct bpf_map *map;
147+
void *addr = NULL;
148+
149+
kvfree((void *) (long) *init_val);
150+
*init_val = ~0ULL;
151+
152+
/* At this point bpf_load_and_run() finished without error and
153+
* 'fd' is a valid bpf map FD. All sanity checks below should succeed.
154+
*/
155+
map = bpf_map_get(fd);
156+
if (IS_ERR(map))
157+
return NULL;
158+
if (map->map_type != BPF_MAP_TYPE_ARRAY)
159+
goto out;
160+
addr = ((struct bpf_array *)map)->value;
161+
/* the addr stays valid, since FD is not closed */
162+
out:
163+
bpf_map_put(map);
164+
return addr;
165+
}
166+
167+
#else
168+
169+
static inline void *skel_alloc(size_t size)
170+
{
171+
return calloc(1, size);
172+
}
173+
174+
static inline void skel_free(void *p)
175+
{
176+
free(p);
177+
}
178+
179+
static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
180+
{
181+
munmap(p, sz);
182+
}
183+
184+
static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
185+
{
186+
void *addr;
187+
188+
addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
189+
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
190+
if (addr == (void *) -1)
191+
return NULL;
192+
memcpy(addr, val, val_sz);
193+
return addr;
194+
}
195+
196+
static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
197+
{
198+
void *addr;
199+
200+
addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
201+
if (addr == (void *) -1)
202+
return NULL;
203+
return addr;
204+
}
205+
#endif
206+
66207
static inline int skel_closenz(int fd)
67208
{
68209
if (fd > 0)
@@ -136,22 +277,28 @@ static inline int skel_link_create(int prog_fd, int target_fd,
136277
return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
137278
}
138279

280+
#ifdef __KERNEL__
281+
#define set_err
282+
#else
283+
#define set_err err = -errno
284+
#endif
285+
139286
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
140287
{
141288
int map_fd = -1, prog_fd = -1, key = 0, err;
142289
union bpf_attr attr;
143290

144-
map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
291+
err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
145292
if (map_fd < 0) {
146293
opts->errstr = "failed to create loader map";
147-
err = -errno;
294+
set_err;
148295
goto out;
149296
}
150297

151298
err = skel_map_update_elem(map_fd, &key, opts->data, 0);
152299
if (err < 0) {
153300
opts->errstr = "failed to update loader map";
154-
err = -errno;
301+
set_err;
155302
goto out;
156303
}
157304

@@ -166,10 +313,10 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
166313
attr.log_size = opts->ctx->log_size;
167314
attr.log_buf = opts->ctx->log_buf;
168315
attr.prog_flags = BPF_F_SLEEPABLE;
169-
prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
316+
err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
170317
if (prog_fd < 0) {
171318
opts->errstr = "failed to load loader prog";
172-
err = -errno;
319+
set_err;
173320
goto out;
174321
}
175322

@@ -181,10 +328,12 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
181328
if (err < 0 || (int)attr.test.retval < 0) {
182329
opts->errstr = "failed to execute loader prog";
183330
if (err < 0) {
184-
err = -errno;
331+
set_err;
185332
} else {
186333
err = (int)attr.test.retval;
334+
#ifndef __KERNEL__
187335
errno = -err;
336+
#endif
188337
}
189338
goto out;
190339
}

0 commit comments

Comments
 (0)