1
1
// SPDX-License-Identifier: GPL-2.0
2
2
#include <test_progs.h>
3
+ #include "test_attach_kprobe_sleepable.skel.h"
3
4
#include "test_attach_probe.skel.h"
4
5
5
6
/* this is how USDT semaphore is actually defined, except volatile modifier */
@@ -23,52 +24,24 @@ static noinline void trigger_func3(void)
23
24
asm volatile ("" );
24
25
}
25
26
27
+ /* attach point for ref_ctr */
28
+ static noinline void trigger_func4 (void )
29
+ {
30
+ asm volatile ("" );
31
+ }
32
+
26
33
static char test_data [] = "test_data" ;
27
34
28
- void test_attach_probe (void )
35
+ /* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
36
+ static void test_attach_probe_manual (struct test_attach_probe * skel )
29
37
{
30
38
DECLARE_LIBBPF_OPTS (bpf_uprobe_opts , uprobe_opts );
31
39
struct bpf_link * kprobe_link , * kretprobe_link ;
32
40
struct bpf_link * uprobe_link , * uretprobe_link ;
33
- struct test_attach_probe * skel ;
34
- ssize_t uprobe_offset , ref_ctr_offset ;
35
- struct bpf_link * uprobe_err_link ;
36
- FILE * devnull ;
37
- bool legacy ;
38
-
39
- /* Check if new-style kprobe/uprobe API is supported.
40
- * Kernels that support new FD-based kprobe and uprobe BPF attachment
41
- * through perf_event_open() syscall expose
42
- * /sys/bus/event_source/devices/kprobe/type and
43
- * /sys/bus/event_source/devices/uprobe/type files, respectively. They
44
- * contain magic numbers that are passed as "type" field of
45
- * perf_event_attr. Lack of such file in the system indicates legacy
46
- * kernel with old-style kprobe/uprobe attach interface through
47
- * creating per-probe event through tracefs. For such cases
48
- * ref_ctr_offset feature is not supported, so we don't test it.
49
- */
50
- legacy = access ("/sys/bus/event_source/devices/kprobe/type" , F_OK ) != 0 ;
41
+ ssize_t uprobe_offset ;
51
42
52
43
uprobe_offset = get_uprobe_offset (& trigger_func );
53
44
if (!ASSERT_GE (uprobe_offset , 0 , "uprobe_offset" ))
54
- return ;
55
-
56
- ref_ctr_offset = get_rel_offset ((uintptr_t )& uprobe_ref_ctr );
57
- if (!ASSERT_GE (ref_ctr_offset , 0 , "ref_ctr_offset" ))
58
- return ;
59
-
60
- skel = test_attach_probe__open ();
61
- if (!ASSERT_OK_PTR (skel , "skel_open" ))
62
- return ;
63
-
64
- /* sleepable kprobe test case needs flags set before loading */
65
- if (!ASSERT_OK (bpf_program__set_flags (skel -> progs .handle_kprobe_sleepable ,
66
- BPF_F_SLEEPABLE ), "kprobe_sleepable_flags" ))
67
- goto cleanup ;
68
-
69
- if (!ASSERT_OK (test_attach_probe__load (skel ), "skel_load" ))
70
- goto cleanup ;
71
- if (!ASSERT_OK_PTR (skel -> bss , "check_bss" ))
72
45
goto cleanup ;
73
46
74
47
/* manual-attach kprobe/kretprobe */
@@ -86,18 +59,9 @@ void test_attach_probe(void)
86
59
goto cleanup ;
87
60
skel -> links .handle_kretprobe = kretprobe_link ;
88
61
89
- /* auto-attachable kprobe and kretprobe */
90
- skel -> links .handle_kprobe_auto = bpf_program__attach (skel -> progs .handle_kprobe_auto );
91
- ASSERT_OK_PTR (skel -> links .handle_kprobe_auto , "attach_kprobe_auto" );
92
-
93
- skel -> links .handle_kretprobe_auto = bpf_program__attach (skel -> progs .handle_kretprobe_auto );
94
- ASSERT_OK_PTR (skel -> links .handle_kretprobe_auto , "attach_kretprobe_auto" );
95
-
96
- if (!legacy )
97
- ASSERT_EQ (uprobe_ref_ctr , 0 , "uprobe_ref_ctr_before" );
98
-
62
+ /* manual-attach uprobe/uretprobe */
63
+ uprobe_opts .ref_ctr_offset = 0 ;
99
64
uprobe_opts .retprobe = false;
100
- uprobe_opts .ref_ctr_offset = legacy ? 0 : ref_ctr_offset ;
101
65
uprobe_link = bpf_program__attach_uprobe_opts (skel -> progs .handle_uprobe ,
102
66
0 /* self pid */ ,
103
67
"/proc/self/exe" ,
@@ -107,12 +71,7 @@ void test_attach_probe(void)
107
71
goto cleanup ;
108
72
skel -> links .handle_uprobe = uprobe_link ;
109
73
110
- if (!legacy )
111
- ASSERT_GT (uprobe_ref_ctr , 0 , "uprobe_ref_ctr_after" );
112
-
113
- /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
114
74
uprobe_opts .retprobe = true;
115
- uprobe_opts .ref_ctr_offset = legacy ? 0 : ref_ctr_offset ;
116
75
uretprobe_link = bpf_program__attach_uprobe_opts (skel -> progs .handle_uretprobe ,
117
76
-1 /* any pid */ ,
118
77
"/proc/self/exe" ,
@@ -121,12 +80,7 @@ void test_attach_probe(void)
121
80
goto cleanup ;
122
81
skel -> links .handle_uretprobe = uretprobe_link ;
123
82
124
- /* verify auto-attach fails for old-style uprobe definition */
125
- uprobe_err_link = bpf_program__attach (skel -> progs .handle_uprobe_byname );
126
- if (!ASSERT_EQ (libbpf_get_error (uprobe_err_link ), - EOPNOTSUPP ,
127
- "auto-attach should fail for old-style name" ))
128
- goto cleanup ;
129
-
83
+ /* attach uprobe by function name manually */
130
84
uprobe_opts .func_name = "trigger_func2" ;
131
85
uprobe_opts .retprobe = false;
132
86
uprobe_opts .ref_ctr_offset = 0 ;
@@ -138,11 +92,62 @@ void test_attach_probe(void)
138
92
if (!ASSERT_OK_PTR (skel -> links .handle_uprobe_byname , "attach_uprobe_byname" ))
139
93
goto cleanup ;
140
94
95
+ /* trigger & validate kprobe && kretprobe */
96
+ usleep (1 );
97
+
98
+ /* trigger & validate uprobe & uretprobe */
99
+ trigger_func ();
100
+
101
+ /* trigger & validate uprobe attached by name */
102
+ trigger_func2 ();
103
+
104
+ ASSERT_EQ (skel -> bss -> kprobe_res , 1 , "check_kprobe_res" );
105
+ ASSERT_EQ (skel -> bss -> kretprobe_res , 2 , "check_kretprobe_res" );
106
+ ASSERT_EQ (skel -> bss -> uprobe_res , 3 , "check_uprobe_res" );
107
+ ASSERT_EQ (skel -> bss -> uretprobe_res , 4 , "check_uretprobe_res" );
108
+ ASSERT_EQ (skel -> bss -> uprobe_byname_res , 5 , "check_uprobe_byname_res" );
109
+
110
+ cleanup :
111
+ }
112
+
113
+ static void test_attach_probe_auto (struct test_attach_probe * skel )
114
+ {
115
+ struct bpf_link * uprobe_err_link ;
116
+
117
+ /* auto-attachable kprobe and kretprobe */
118
+ skel -> links .handle_kprobe_auto = bpf_program__attach (skel -> progs .handle_kprobe_auto );
119
+ ASSERT_OK_PTR (skel -> links .handle_kprobe_auto , "attach_kprobe_auto" );
120
+
121
+ skel -> links .handle_kretprobe_auto = bpf_program__attach (skel -> progs .handle_kretprobe_auto );
122
+ ASSERT_OK_PTR (skel -> links .handle_kretprobe_auto , "attach_kretprobe_auto" );
123
+
124
+ /* verify auto-attach fails for old-style uprobe definition */
125
+ uprobe_err_link = bpf_program__attach (skel -> progs .handle_uprobe_byname );
126
+ if (!ASSERT_EQ (libbpf_get_error (uprobe_err_link ), - EOPNOTSUPP ,
127
+ "auto-attach should fail for old-style name" ))
128
+ return ;
129
+
141
130
/* verify auto-attach works */
142
131
skel -> links .handle_uretprobe_byname =
143
132
bpf_program__attach (skel -> progs .handle_uretprobe_byname );
144
133
if (!ASSERT_OK_PTR (skel -> links .handle_uretprobe_byname , "attach_uretprobe_byname" ))
145
- goto cleanup ;
134
+ return ;
135
+
136
+ /* trigger & validate kprobe && kretprobe */
137
+ usleep (1 );
138
+
139
+ /* trigger & validate uprobe attached by name */
140
+ trigger_func2 ();
141
+
142
+ ASSERT_EQ (skel -> bss -> kprobe2_res , 11 , "check_kprobe_auto_res" );
143
+ ASSERT_EQ (skel -> bss -> kretprobe2_res , 22 , "check_kretprobe_auto_res" );
144
+ ASSERT_EQ (skel -> bss -> uretprobe_byname_res , 6 , "check_uretprobe_byname_res" );
145
+ }
146
+
147
+ static void test_uprobe_lib (struct test_attach_probe * skel )
148
+ {
149
+ DECLARE_LIBBPF_OPTS (bpf_uprobe_opts , uprobe_opts );
150
+ FILE * devnull ;
146
151
147
152
/* test attach by name for a library function, using the library
148
153
* as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
@@ -155,7 +160,7 @@ void test_attach_probe(void)
155
160
"libc.so.6" ,
156
161
0 , & uprobe_opts );
157
162
if (!ASSERT_OK_PTR (skel -> links .handle_uprobe_byname2 , "attach_uprobe_byname2" ))
158
- goto cleanup ;
163
+ return ;
159
164
160
165
uprobe_opts .func_name = "fclose" ;
161
166
uprobe_opts .retprobe = true;
@@ -165,62 +170,137 @@ void test_attach_probe(void)
165
170
"libc.so.6" ,
166
171
0 , & uprobe_opts );
167
172
if (!ASSERT_OK_PTR (skel -> links .handle_uretprobe_byname2 , "attach_uretprobe_byname2" ))
173
+ return ;
174
+
175
+ /* trigger & validate shared library u[ret]probes attached by name */
176
+ devnull = fopen ("/dev/null" , "r" );
177
+ fclose (devnull );
178
+
179
+ ASSERT_EQ (skel -> bss -> uprobe_byname2_res , 7 , "check_uprobe_byname2_res" );
180
+ ASSERT_EQ (skel -> bss -> uretprobe_byname2_res , 8 , "check_uretprobe_byname2_res" );
181
+ }
182
+
183
+ static void test_uprobe_ref_ctr (struct test_attach_probe * skel )
184
+ {
185
+ DECLARE_LIBBPF_OPTS (bpf_uprobe_opts , uprobe_opts );
186
+ struct bpf_link * uprobe_link , * uretprobe_link ;
187
+ ssize_t uprobe_offset , ref_ctr_offset ;
188
+
189
+ uprobe_offset = get_uprobe_offset (& trigger_func4 );
190
+ if (!ASSERT_GE (uprobe_offset , 0 , "uprobe_offset_ref_ctr" ))
191
+ return ;
192
+
193
+ ref_ctr_offset = get_rel_offset ((uintptr_t )& uprobe_ref_ctr );
194
+ if (!ASSERT_GE (ref_ctr_offset , 0 , "ref_ctr_offset" ))
195
+ return ;
196
+
197
+ ASSERT_EQ (uprobe_ref_ctr , 0 , "uprobe_ref_ctr_before" );
198
+
199
+ uprobe_opts .retprobe = false;
200
+ uprobe_opts .ref_ctr_offset = ref_ctr_offset ;
201
+ uprobe_link = bpf_program__attach_uprobe_opts (skel -> progs .handle_uprobe_ref_ctr ,
202
+ 0 /* self pid */ ,
203
+ "/proc/self/exe" ,
204
+ uprobe_offset ,
205
+ & uprobe_opts );
206
+ if (!ASSERT_OK_PTR (uprobe_link , "attach_uprobe_ref_ctr" ))
207
+ return ;
208
+ skel -> links .handle_uprobe_ref_ctr = uprobe_link ;
209
+
210
+ ASSERT_GT (uprobe_ref_ctr , 0 , "uprobe_ref_ctr_after" );
211
+
212
+ /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
213
+ uprobe_opts .retprobe = true;
214
+ uprobe_opts .ref_ctr_offset = ref_ctr_offset ;
215
+ uretprobe_link = bpf_program__attach_uprobe_opts (skel -> progs .handle_uretprobe_ref_ctr ,
216
+ -1 /* any pid */ ,
217
+ "/proc/self/exe" ,
218
+ uprobe_offset , & uprobe_opts );
219
+ if (!ASSERT_OK_PTR (uretprobe_link , "attach_uretprobe_ref_ctr" ))
220
+ return ;
221
+ skel -> links .handle_uretprobe_ref_ctr = uretprobe_link ;
222
+ }
223
+
224
+ static void test_kprobe_sleepable (void )
225
+ {
226
+ struct test_attach_kprobe_sleepable * skel ;
227
+
228
+ skel = test_attach_kprobe_sleepable__open ();
229
+ if (!ASSERT_OK_PTR (skel , "skel_kprobe_sleepable_open" ))
230
+ return ;
231
+
232
+ /* sleepable kprobe test case needs flags set before loading */
233
+ if (!ASSERT_OK (bpf_program__set_flags (skel -> progs .handle_kprobe_sleepable ,
234
+ BPF_F_SLEEPABLE ), "kprobe_sleepable_flags" ))
235
+ goto cleanup ;
236
+
237
+ if (!ASSERT_OK (test_attach_kprobe_sleepable__load (skel ),
238
+ "skel_kprobe_sleepable_load" ))
168
239
goto cleanup ;
169
240
170
241
/* sleepable kprobes should not attach successfully */
171
242
skel -> links .handle_kprobe_sleepable = bpf_program__attach (skel -> progs .handle_kprobe_sleepable );
172
- if (!ASSERT_ERR_PTR (skel -> links .handle_kprobe_sleepable , "attach_kprobe_sleepable" ))
173
- goto cleanup ;
243
+ ASSERT_ERR_PTR (skel -> links .handle_kprobe_sleepable , "attach_kprobe_sleepable" );
174
244
245
+ cleanup :
246
+ test_attach_kprobe_sleepable__destroy (skel );
247
+ }
248
+
249
+ static void test_uprobe_sleepable (struct test_attach_probe * skel )
250
+ {
175
251
/* test sleepable uprobe and uretprobe variants */
176
252
skel -> links .handle_uprobe_byname3_sleepable = bpf_program__attach (skel -> progs .handle_uprobe_byname3_sleepable );
177
253
if (!ASSERT_OK_PTR (skel -> links .handle_uprobe_byname3_sleepable , "attach_uprobe_byname3_sleepable" ))
178
- goto cleanup ;
254
+ return ;
179
255
180
256
skel -> links .handle_uprobe_byname3 = bpf_program__attach (skel -> progs .handle_uprobe_byname3 );
181
257
if (!ASSERT_OK_PTR (skel -> links .handle_uprobe_byname3 , "attach_uprobe_byname3" ))
182
- goto cleanup ;
258
+ return ;
183
259
184
260
skel -> links .handle_uretprobe_byname3_sleepable = bpf_program__attach (skel -> progs .handle_uretprobe_byname3_sleepable );
185
261
if (!ASSERT_OK_PTR (skel -> links .handle_uretprobe_byname3_sleepable , "attach_uretprobe_byname3_sleepable" ))
186
- goto cleanup ;
262
+ return ;
187
263
188
264
skel -> links .handle_uretprobe_byname3 = bpf_program__attach (skel -> progs .handle_uretprobe_byname3 );
189
265
if (!ASSERT_OK_PTR (skel -> links .handle_uretprobe_byname3 , "attach_uretprobe_byname3" ))
190
- goto cleanup ;
266
+ return ;
191
267
192
268
skel -> bss -> user_ptr = test_data ;
193
269
194
- /* trigger & validate kprobe && kretprobe */
195
- usleep (1 );
196
-
197
- /* trigger & validate shared library u[ret]probes attached by name */
198
- devnull = fopen ("/dev/null" , "r" );
199
- fclose (devnull );
200
-
201
- /* trigger & validate uprobe & uretprobe */
202
- trigger_func ();
203
-
204
- /* trigger & validate uprobe attached by name */
205
- trigger_func2 ();
206
-
207
270
/* trigger & validate sleepable uprobe attached by name */
208
271
trigger_func3 ();
209
272
210
- ASSERT_EQ (skel -> bss -> kprobe_res , 1 , "check_kprobe_res" );
211
- ASSERT_EQ (skel -> bss -> kprobe2_res , 11 , "check_kprobe_auto_res" );
212
- ASSERT_EQ (skel -> bss -> kretprobe_res , 2 , "check_kretprobe_res" );
213
- ASSERT_EQ (skel -> bss -> kretprobe2_res , 22 , "check_kretprobe_auto_res" );
214
- ASSERT_EQ (skel -> bss -> uprobe_res , 3 , "check_uprobe_res" );
215
- ASSERT_EQ (skel -> bss -> uretprobe_res , 4 , "check_uretprobe_res" );
216
- ASSERT_EQ (skel -> bss -> uprobe_byname_res , 5 , "check_uprobe_byname_res" );
217
- ASSERT_EQ (skel -> bss -> uretprobe_byname_res , 6 , "check_uretprobe_byname_res" );
218
- ASSERT_EQ (skel -> bss -> uprobe_byname2_res , 7 , "check_uprobe_byname2_res" );
219
- ASSERT_EQ (skel -> bss -> uretprobe_byname2_res , 8 , "check_uretprobe_byname2_res" );
220
273
ASSERT_EQ (skel -> bss -> uprobe_byname3_sleepable_res , 9 , "check_uprobe_byname3_sleepable_res" );
221
274
ASSERT_EQ (skel -> bss -> uprobe_byname3_res , 10 , "check_uprobe_byname3_res" );
222
275
ASSERT_EQ (skel -> bss -> uretprobe_byname3_sleepable_res , 11 , "check_uretprobe_byname3_sleepable_res" );
223
276
ASSERT_EQ (skel -> bss -> uretprobe_byname3_res , 12 , "check_uretprobe_byname3_res" );
277
+ }
278
+
279
+ void test_attach_probe (void )
280
+ {
281
+ struct test_attach_probe * skel ;
282
+
283
+ skel = test_attach_probe__open ();
284
+ if (!ASSERT_OK_PTR (skel , "skel_open" ))
285
+ return ;
286
+
287
+ if (!ASSERT_OK (test_attach_probe__load (skel ), "skel_load" ))
288
+ goto cleanup ;
289
+ if (!ASSERT_OK_PTR (skel -> bss , "check_bss" ))
290
+ goto cleanup ;
291
+
292
+ if (test__start_subtest ("manual" ))
293
+ test_attach_probe_manual (skel );
294
+ if (test__start_subtest ("auto" ))
295
+ test_attach_probe_auto (skel );
296
+ if (test__start_subtest ("kprobe-sleepable" ))
297
+ test_kprobe_sleepable ();
298
+ if (test__start_subtest ("uprobe-lib" ))
299
+ test_uprobe_lib (skel );
300
+ if (test__start_subtest ("uprobe-sleepable" ))
301
+ test_uprobe_sleepable (skel );
302
+ if (test__start_subtest ("uprobe-ref_ctr" ))
303
+ test_uprobe_ref_ctr (skel );
224
304
225
305
cleanup :
226
306
test_attach_probe__destroy (skel );
0 commit comments