Skip to content

Commit bbaf602

Browse files
Maciej Fijalkowskiborkmann
authored andcommitted
samples/bpf: Convert XDP samples to libbpf usage
Some of XDP samples that are attaching the bpf program to the interface via libbpf's bpf_set_link_xdp_fd are still using the bpf_load.c for loading and manipulating the ebpf program and maps. Convert them to do this through libbpf usage and remove bpf_load from the picture. While at it remove what looks like debug leftover in xdp_redirect_map_user.c In xdp_redirect_cpu, change the way that the program to be loaded onto interface is chosen - user now needs to pass the program's section name instead of the relative number. In case of typo print out the section names to choose from. Signed-off-by: Maciej Fijalkowski <[email protected]> Reviewed-by: Jakub Kicinski <[email protected]> Acked-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
1 parent 7313798 commit bbaf602

File tree

6 files changed

+253
-103
lines changed

6 files changed

+253
-103
lines changed

samples/bpf/Makefile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -87,18 +87,18 @@ test_cgrp2_sock2-objs := bpf_load.o test_cgrp2_sock2.o
8787
xdp1-objs := xdp1_user.o
8888
# reuse xdp1 source intentionally
8989
xdp2-objs := xdp1_user.o
90-
xdp_router_ipv4-objs := bpf_load.o xdp_router_ipv4_user.o
90+
xdp_router_ipv4-objs := xdp_router_ipv4_user.o
9191
test_current_task_under_cgroup-objs := bpf_load.o $(CGROUP_HELPERS) \
9292
test_current_task_under_cgroup_user.o
9393
trace_event-objs := bpf_load.o trace_event_user.o $(TRACE_HELPERS)
9494
sampleip-objs := bpf_load.o sampleip_user.o $(TRACE_HELPERS)
9595
tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
9696
lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
97-
xdp_tx_iptunnel-objs := bpf_load.o xdp_tx_iptunnel_user.o
97+
xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
9898
test_map_in_map-objs := bpf_load.o test_map_in_map_user.o
9999
per_socket_stats_example-objs := cookie_uid_helper_example.o
100-
xdp_redirect-objs := bpf_load.o xdp_redirect_user.o
101-
xdp_redirect_map-objs := bpf_load.o xdp_redirect_map_user.o
100+
xdp_redirect-objs := xdp_redirect_user.o
101+
xdp_redirect_map-objs := xdp_redirect_map_user.o
102102
xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
103103
xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
104104
xdp_rxq_info-objs := xdp_rxq_info_user.o

samples/bpf/xdp_redirect_cpu_user.c

Lines changed: 102 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,8 @@ static const char *__doc__ =
2424
/* How many xdp_progs are defined in _kern.c */
2525
#define MAX_PROG 6
2626

27-
/* Wanted to get rid of bpf_load.h and fake-"libbpf.h" (and instead
28-
* use bpf/libbpf.h), but cannot as (currently) needed for XDP
29-
* attaching to a device via bpf_set_link_xdp_fd()
30-
*/
3127
#include <bpf/bpf.h>
32-
#include "bpf_load.h"
28+
#include "bpf/libbpf.h"
3329

3430
#include "bpf_util.h"
3531

@@ -38,6 +34,15 @@ static char ifname_buf[IF_NAMESIZE];
3834
static char *ifname;
3935

4036
static __u32 xdp_flags;
37+
static int cpu_map_fd;
38+
static int rx_cnt_map_fd;
39+
static int redirect_err_cnt_map_fd;
40+
static int cpumap_enqueue_cnt_map_fd;
41+
static int cpumap_kthread_cnt_map_fd;
42+
static int cpus_available_map_fd;
43+
static int cpus_count_map_fd;
44+
static int cpus_iterator_map_fd;
45+
static int exception_cnt_map_fd;
4146

4247
/* Exit return codes */
4348
#define EXIT_OK 0
@@ -52,7 +57,7 @@ static const struct option long_options[] = {
5257
{"dev", required_argument, NULL, 'd' },
5358
{"skb-mode", no_argument, NULL, 'S' },
5459
{"sec", required_argument, NULL, 's' },
55-
{"prognum", required_argument, NULL, 'p' },
60+
{"progname", required_argument, NULL, 'p' },
5661
{"qsize", required_argument, NULL, 'q' },
5762
{"cpu", required_argument, NULL, 'c' },
5863
{"stress-mode", no_argument, NULL, 'x' },
@@ -70,7 +75,17 @@ static void int_exit(int sig)
7075
exit(EXIT_OK);
7176
}
7277

73-
static void usage(char *argv[])
78+
static void print_avail_progs(struct bpf_object *obj)
79+
{
80+
struct bpf_program *pos;
81+
82+
bpf_object__for_each_program(pos, obj) {
83+
if (bpf_program__is_xdp(pos))
84+
printf(" %s\n", bpf_program__title(pos, false));
85+
}
86+
}
87+
88+
static void usage(char *argv[], struct bpf_object *obj)
7489
{
7590
int i;
7691

@@ -88,6 +103,8 @@ static void usage(char *argv[])
88103
long_options[i].val);
89104
printf("\n");
90105
}
106+
printf("\n Programs to be used for --progname:\n");
107+
print_avail_progs(obj);
91108
printf("\n");
92109
}
93110

@@ -262,7 +279,7 @@ static __u64 calc_errs_pps(struct datarec *r,
262279

263280
static void stats_print(struct stats_record *stats_rec,
264281
struct stats_record *stats_prev,
265-
int prog_num)
282+
char *prog_name)
266283
{
267284
unsigned int nr_cpus = bpf_num_possible_cpus();
268285
double pps = 0, drop = 0, err = 0;
@@ -272,7 +289,7 @@ static void stats_print(struct stats_record *stats_rec,
272289
int i;
273290

274291
/* Header */
275-
printf("Running XDP/eBPF prog_num:%d\n", prog_num);
292+
printf("Running XDP/eBPF prog_name:%s\n", prog_name);
276293
printf("%-15s %-7s %-14s %-11s %-9s\n",
277294
"XDP-cpumap", "CPU:to", "pps", "drop-pps", "extra-info");
278295

@@ -423,20 +440,20 @@ static void stats_collect(struct stats_record *rec)
423440
{
424441
int fd, i;
425442

426-
fd = map_fd[1]; /* map: rx_cnt */
443+
fd = rx_cnt_map_fd;
427444
map_collect_percpu(fd, 0, &rec->rx_cnt);
428445

429-
fd = map_fd[2]; /* map: redirect_err_cnt */
446+
fd = redirect_err_cnt_map_fd;
430447
map_collect_percpu(fd, 1, &rec->redir_err);
431448

432-
fd = map_fd[3]; /* map: cpumap_enqueue_cnt */
449+
fd = cpumap_enqueue_cnt_map_fd;
433450
for (i = 0; i < MAX_CPUS; i++)
434451
map_collect_percpu(fd, i, &rec->enq[i]);
435452

436-
fd = map_fd[4]; /* map: cpumap_kthread_cnt */
453+
fd = cpumap_kthread_cnt_map_fd;
437454
map_collect_percpu(fd, 0, &rec->kthread);
438455

439-
fd = map_fd[8]; /* map: exception_cnt */
456+
fd = exception_cnt_map_fd;
440457
map_collect_percpu(fd, 0, &rec->exception);
441458
}
442459

@@ -461,7 +478,7 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
461478
/* Add a CPU entry to cpumap, as this allocate a cpu entry in
462479
* the kernel for the cpu.
463480
*/
464-
ret = bpf_map_update_elem(map_fd[0], &cpu, &queue_size, 0);
481+
ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
465482
if (ret) {
466483
fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
467484
exit(EXIT_FAIL_BPF);
@@ -470,23 +487,22 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
470487
/* Inform bpf_prog's that a new CPU is available to select
471488
* from via some control maps.
472489
*/
473-
/* map_fd[5] = cpus_available */
474-
ret = bpf_map_update_elem(map_fd[5], &avail_idx, &cpu, 0);
490+
ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
475491
if (ret) {
476492
fprintf(stderr, "Add to avail CPUs failed\n");
477493
exit(EXIT_FAIL_BPF);
478494
}
479495

480496
/* When not replacing/updating existing entry, bump the count */
481-
/* map_fd[6] = cpus_count */
482-
ret = bpf_map_lookup_elem(map_fd[6], &key, &curr_cpus_count);
497+
ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
483498
if (ret) {
484499
fprintf(stderr, "Failed reading curr cpus_count\n");
485500
exit(EXIT_FAIL_BPF);
486501
}
487502
if (new) {
488503
curr_cpus_count++;
489-
ret = bpf_map_update_elem(map_fd[6], &key, &curr_cpus_count, 0);
504+
ret = bpf_map_update_elem(cpus_count_map_fd, &key,
505+
&curr_cpus_count, 0);
490506
if (ret) {
491507
fprintf(stderr, "Failed write curr cpus_count\n");
492508
exit(EXIT_FAIL_BPF);
@@ -509,8 +525,8 @@ static void mark_cpus_unavailable(void)
509525
int ret, i;
510526

511527
for (i = 0; i < MAX_CPUS; i++) {
512-
/* map_fd[5] = cpus_available */
513-
ret = bpf_map_update_elem(map_fd[5], &i, &invalid_cpu, 0);
528+
ret = bpf_map_update_elem(cpus_available_map_fd, &i,
529+
&invalid_cpu, 0);
514530
if (ret) {
515531
fprintf(stderr, "Failed marking CPU unavailable\n");
516532
exit(EXIT_FAIL_BPF);
@@ -530,7 +546,7 @@ static void stress_cpumap(void)
530546
create_cpu_entry(1, 16000, 0, false);
531547
}
532548

533-
static void stats_poll(int interval, bool use_separators, int prog_num,
549+
static void stats_poll(int interval, bool use_separators, char *prog_name,
534550
bool stress_mode)
535551
{
536552
struct stats_record *record, *prev;
@@ -546,7 +562,7 @@ static void stats_poll(int interval, bool use_separators, int prog_num,
546562
while (1) {
547563
swap(&prev, &record);
548564
stats_collect(record);
549-
stats_print(record, prev, prog_num);
565+
stats_print(record, prev, prog_name);
550566
sleep(interval);
551567
if (stress_mode)
552568
stress_cpumap();
@@ -556,17 +572,51 @@ static void stats_poll(int interval, bool use_separators, int prog_num,
556572
free_stats_record(prev);
557573
}
558574

575+
static int init_map_fds(struct bpf_object *obj)
576+
{
577+
cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
578+
rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
579+
redirect_err_cnt_map_fd =
580+
bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt");
581+
cpumap_enqueue_cnt_map_fd =
582+
bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
583+
cpumap_kthread_cnt_map_fd =
584+
bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt");
585+
cpus_available_map_fd =
586+
bpf_object__find_map_fd_by_name(obj, "cpus_available");
587+
cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
588+
cpus_iterator_map_fd =
589+
bpf_object__find_map_fd_by_name(obj, "cpus_iterator");
590+
exception_cnt_map_fd =
591+
bpf_object__find_map_fd_by_name(obj, "exception_cnt");
592+
593+
if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
594+
redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
595+
cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
596+
cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
597+
exception_cnt_map_fd < 0)
598+
return -ENOENT;
599+
600+
return 0;
601+
}
602+
559603
int main(int argc, char **argv)
560604
{
561605
struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
606+
char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
607+
struct bpf_prog_load_attr prog_load_attr = {
608+
.prog_type = BPF_PROG_TYPE_UNSPEC,
609+
};
562610
bool use_separators = true;
563611
bool stress_mode = false;
612+
struct bpf_program *prog;
613+
struct bpf_object *obj;
564614
char filename[256];
565615
int added_cpus = 0;
566616
int longindex = 0;
567617
int interval = 2;
568-
int prog_num = 5;
569618
int add_cpu = -1;
619+
int prog_fd;
570620
__u32 qsize;
571621
int opt;
572622

@@ -579,22 +629,25 @@ int main(int argc, char **argv)
579629
qsize = 128+64;
580630

581631
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
632+
prog_load_attr.file = filename;
582633

583634
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
584635
perror("setrlimit(RLIMIT_MEMLOCK)");
585636
return 1;
586637
}
587638

588-
if (load_bpf_file(filename)) {
589-
fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf);
639+
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
590640
return EXIT_FAIL;
591-
}
592641

593-
if (!prog_fd[0]) {
594-
fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
642+
if (prog_fd < 0) {
643+
fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
644+
strerror(errno));
645+
return EXIT_FAIL;
646+
}
647+
if (init_map_fds(obj) < 0) {
648+
fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
595649
return EXIT_FAIL;
596650
}
597-
598651
mark_cpus_unavailable();
599652

600653
/* Parse commands line args */
@@ -630,13 +683,7 @@ int main(int argc, char **argv)
630683
break;
631684
case 'p':
632685
/* Selecting eBPF prog to load */
633-
prog_num = atoi(optarg);
634-
if (prog_num < 0 || prog_num >= MAX_PROG) {
635-
fprintf(stderr,
636-
"--prognum too large err(%d):%s\n",
637-
errno, strerror(errno));
638-
goto error;
639-
}
686+
prog_name = optarg;
640687
break;
641688
case 'c':
642689
/* Add multiple CPUs */
@@ -656,33 +703,45 @@ int main(int argc, char **argv)
656703
case 'h':
657704
error:
658705
default:
659-
usage(argv);
706+
usage(argv, obj);
660707
return EXIT_FAIL_OPTION;
661708
}
662709
}
663710
/* Required option */
664711
if (ifindex == -1) {
665712
fprintf(stderr, "ERR: required option --dev missing\n");
666-
usage(argv);
713+
usage(argv, obj);
667714
return EXIT_FAIL_OPTION;
668715
}
669716
/* Required option */
670717
if (add_cpu == -1) {
671718
fprintf(stderr, "ERR: required option --cpu missing\n");
672719
fprintf(stderr, " Specify multiple --cpu option to add more\n");
673-
usage(argv);
720+
usage(argv, obj);
674721
return EXIT_FAIL_OPTION;
675722
}
676723

677724
/* Remove XDP program when program is interrupted or killed */
678725
signal(SIGINT, int_exit);
679726
signal(SIGTERM, int_exit);
680727

681-
if (bpf_set_link_xdp_fd(ifindex, prog_fd[prog_num], xdp_flags) < 0) {
728+
prog = bpf_object__find_program_by_title(obj, prog_name);
729+
if (!prog) {
730+
fprintf(stderr, "bpf_object__find_program_by_title failed\n");
731+
return EXIT_FAIL;
732+
}
733+
734+
prog_fd = bpf_program__fd(prog);
735+
if (prog_fd < 0) {
736+
fprintf(stderr, "bpf_program__fd failed\n");
737+
return EXIT_FAIL;
738+
}
739+
740+
if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
682741
fprintf(stderr, "link set xdp fd failed\n");
683742
return EXIT_FAIL_XDP;
684743
}
685744

686-
stats_poll(interval, use_separators, prog_num, stress_mode);
745+
stats_poll(interval, use_separators, prog_name, stress_mode);
687746
return EXIT_OK;
688747
}

0 commit comments

Comments
 (0)