Skip to content

Commit 3ced9b6

Browse files
yonghong-songborkmann
authored andcommitted
tools/bpf: add a bpf selftest for stacktrace
Added a bpf selftest in test_progs at tools directory for stacktrace. The test will populate a hashtable map and a stacktrace map at the same time with the same key, stackid. The user space will compare both maps, using BPF_MAP_LOOKUP_ELEM command and BPF_MAP_GET_NEXT_KEY command, to ensure that both have the same set of keys. Signed-off-by: Yonghong Song <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
1 parent 16f07c5 commit 3ced9b6

File tree

3 files changed

+190
-1
lines changed

3 files changed

+190
-1
lines changed

tools/testing/selftests/bpf/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
1919
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
2020
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
2121
sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
22-
test_l4lb_noinline.o test_xdp_noinline.o
22+
test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o
2323

2424
TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
2525
test_offload.py

tools/testing/selftests/bpf/test_progs.c

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -837,6 +837,132 @@ static void test_tp_attach_query(void)
837837
free(query);
838838
}
839839

840+
static int compare_map_keys(int map1_fd, int map2_fd)
841+
{
842+
__u32 key, next_key;
843+
char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)];
844+
int err;
845+
846+
err = bpf_map_get_next_key(map1_fd, NULL, &key);
847+
if (err)
848+
return err;
849+
err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
850+
if (err)
851+
return err;
852+
853+
while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
854+
err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
855+
if (err)
856+
return err;
857+
858+
key = next_key;
859+
}
860+
if (errno != ENOENT)
861+
return -1;
862+
863+
return 0;
864+
}
865+
866+
static void test_stacktrace_map()
867+
{
868+
int control_map_fd, stackid_hmap_fd, stackmap_fd;
869+
const char *file = "./test_stacktrace_map.o";
870+
int bytes, efd, err, pmu_fd, prog_fd;
871+
struct perf_event_attr attr = {};
872+
__u32 key, val, duration = 0;
873+
struct bpf_object *obj;
874+
char buf[256];
875+
876+
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
877+
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
878+
goto out;
879+
880+
/* Get the ID for the sched/sched_switch tracepoint */
881+
snprintf(buf, sizeof(buf),
882+
"/sys/kernel/debug/tracing/events/sched/sched_switch/id");
883+
efd = open(buf, O_RDONLY, 0);
884+
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
885+
goto close_prog;
886+
887+
bytes = read(efd, buf, sizeof(buf));
888+
close(efd);
889+
if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
890+
"read", "bytes %d errno %d\n", bytes, errno))
891+
goto close_prog;
892+
893+
/* Open the perf event and attach bpf progrram */
894+
attr.config = strtol(buf, NULL, 0);
895+
attr.type = PERF_TYPE_TRACEPOINT;
896+
attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
897+
attr.sample_period = 1;
898+
attr.wakeup_events = 1;
899+
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
900+
0 /* cpu 0 */, -1 /* group id */,
901+
0 /* flags */);
902+
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
903+
pmu_fd, errno))
904+
goto close_prog;
905+
906+
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
907+
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
908+
err, errno))
909+
goto close_pmu;
910+
911+
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
912+
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
913+
err, errno))
914+
goto disable_pmu;
915+
916+
/* find map fds */
917+
control_map_fd = bpf_find_map(__func__, obj, "control_map");
918+
if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
919+
"err %d errno %d\n", err, errno))
920+
goto disable_pmu;
921+
922+
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
923+
if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
924+
"err %d errno %d\n", err, errno))
925+
goto disable_pmu;
926+
927+
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
928+
if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
929+
err, errno))
930+
goto disable_pmu;
931+
932+
/* give some time for bpf program run */
933+
sleep(1);
934+
935+
/* disable stack trace collection */
936+
key = 0;
937+
val = 1;
938+
bpf_map_update_elem(control_map_fd, &key, &val, 0);
939+
940+
/* for every element in stackid_hmap, we can find a corresponding one
941+
* in stackmap, and vise versa.
942+
*/
943+
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
944+
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
945+
"err %d errno %d\n", err, errno))
946+
goto disable_pmu;
947+
948+
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
949+
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
950+
"err %d errno %d\n", err, errno))
951+
; /* fall through */
952+
953+
disable_pmu:
954+
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
955+
956+
close_pmu:
957+
close(pmu_fd);
958+
959+
close_prog:
960+
bpf_object__close(obj);
961+
962+
out:
963+
return;
964+
}
965+
840966
int main(void)
841967
{
842968
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -852,6 +978,7 @@ int main(void)
852978
test_pkt_md_access();
853979
test_obj_name();
854980
test_tp_attach_query();
981+
test_stacktrace_map();
855982

856983
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
857984
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
// Copyright (c) 2018 Facebook
3+
4+
#include <linux/bpf.h>
5+
#include "bpf_helpers.h"
6+
7+
#ifndef PERF_MAX_STACK_DEPTH
8+
#define PERF_MAX_STACK_DEPTH 127
9+
#endif
10+
11+
struct bpf_map_def SEC("maps") control_map = {
12+
.type = BPF_MAP_TYPE_ARRAY,
13+
.key_size = sizeof(__u32),
14+
.value_size = sizeof(__u32),
15+
.max_entries = 1,
16+
};
17+
18+
struct bpf_map_def SEC("maps") stackid_hmap = {
19+
.type = BPF_MAP_TYPE_HASH,
20+
.key_size = sizeof(__u32),
21+
.value_size = sizeof(__u32),
22+
.max_entries = 10000,
23+
};
24+
25+
struct bpf_map_def SEC("maps") stackmap = {
26+
.type = BPF_MAP_TYPE_STACK_TRACE,
27+
.key_size = sizeof(__u32),
28+
.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
29+
.max_entries = 10000,
30+
};
31+
32+
/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
33+
struct sched_switch_args {
34+
unsigned long long pad;
35+
char prev_comm[16];
36+
int prev_pid;
37+
int prev_prio;
38+
long long prev_state;
39+
char next_comm[16];
40+
int next_pid;
41+
int next_prio;
42+
};
43+
44+
SEC("tracepoint/sched/sched_switch")
45+
int oncpu(struct sched_switch_args *ctx)
46+
{
47+
__u32 key = 0, val = 0, *value_p;
48+
49+
value_p = bpf_map_lookup_elem(&control_map, &key);
50+
if (value_p && *value_p)
51+
return 0; /* skip if non-zero *value_p */
52+
53+
/* The size of stackmap and stackid_hmap should be the same */
54+
key = bpf_get_stackid(ctx, &stackmap, 0);
55+
if ((int)key >= 0)
56+
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
57+
58+
return 0;
59+
}
60+
61+
char _license[] SEC("license") = "GPL";
62+
__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */

0 commit comments

Comments
 (0)