@@ -24,12 +24,8 @@ static const char *__doc__ =
24
24
/* How many xdp_progs are defined in _kern.c */
25
25
#define MAX_PROG 6
26
26
27
- /* Wanted to get rid of bpf_load.h and fake-"libbpf.h" (and instead
28
- * use bpf/libbpf.h), but cannot as (currently) needed for XDP
29
- * attaching to a device via bpf_set_link_xdp_fd()
30
- */
31
27
#include <bpf/bpf.h>
32
- #include "bpf_load .h"
28
+ #include "bpf/libbpf .h"
33
29
34
30
#include "bpf_util.h"
35
31
@@ -38,6 +34,15 @@ static char ifname_buf[IF_NAMESIZE];
38
34
static char * ifname ;
39
35
40
36
static __u32 xdp_flags ;
37
+ static int cpu_map_fd ;
38
+ static int rx_cnt_map_fd ;
39
+ static int redirect_err_cnt_map_fd ;
40
+ static int cpumap_enqueue_cnt_map_fd ;
41
+ static int cpumap_kthread_cnt_map_fd ;
42
+ static int cpus_available_map_fd ;
43
+ static int cpus_count_map_fd ;
44
+ static int cpus_iterator_map_fd ;
45
+ static int exception_cnt_map_fd ;
41
46
42
47
/* Exit return codes */
43
48
#define EXIT_OK 0
@@ -52,7 +57,7 @@ static const struct option long_options[] = {
52
57
{"dev" , required_argument , NULL , 'd' },
53
58
{"skb-mode" , no_argument , NULL , 'S' },
54
59
{"sec" , required_argument , NULL , 's' },
55
- {"prognum " , required_argument , NULL , 'p' },
60
+ {"progname " , required_argument , NULL , 'p' },
56
61
{"qsize" , required_argument , NULL , 'q' },
57
62
{"cpu" , required_argument , NULL , 'c' },
58
63
{"stress-mode" , no_argument , NULL , 'x' },
@@ -70,7 +75,17 @@ static void int_exit(int sig)
70
75
exit (EXIT_OK );
71
76
}
72
77
73
- static void usage (char * argv [])
78
+ static void print_avail_progs (struct bpf_object * obj )
79
+ {
80
+ struct bpf_program * pos ;
81
+
82
+ bpf_object__for_each_program (pos , obj ) {
83
+ if (bpf_program__is_xdp (pos ))
84
+ printf (" %s\n" , bpf_program__title (pos , false));
85
+ }
86
+ }
87
+
88
+ static void usage (char * argv [], struct bpf_object * obj )
74
89
{
75
90
int i ;
76
91
@@ -88,6 +103,8 @@ static void usage(char *argv[])
88
103
long_options [i ].val );
89
104
printf ("\n" );
90
105
}
106
+ printf ("\n Programs to be used for --progname:\n" );
107
+ print_avail_progs (obj );
91
108
printf ("\n" );
92
109
}
93
110
@@ -262,7 +279,7 @@ static __u64 calc_errs_pps(struct datarec *r,
262
279
263
280
static void stats_print (struct stats_record * stats_rec ,
264
281
struct stats_record * stats_prev ,
265
- int prog_num )
282
+ char * prog_name )
266
283
{
267
284
unsigned int nr_cpus = bpf_num_possible_cpus ();
268
285
double pps = 0 , drop = 0 , err = 0 ;
@@ -272,7 +289,7 @@ static void stats_print(struct stats_record *stats_rec,
272
289
int i ;
273
290
274
291
/* Header */
275
- printf ("Running XDP/eBPF prog_num:%d \n" , prog_num );
292
+ printf ("Running XDP/eBPF prog_name:%s \n" , prog_name );
276
293
printf ("%-15s %-7s %-14s %-11s %-9s\n" ,
277
294
"XDP-cpumap" , "CPU:to" , "pps" , "drop-pps" , "extra-info" );
278
295
@@ -423,20 +440,20 @@ static void stats_collect(struct stats_record *rec)
423
440
{
424
441
int fd , i ;
425
442
426
- fd = map_fd [ 1 ]; /* map: rx_cnt */
443
+ fd = rx_cnt_map_fd ;
427
444
map_collect_percpu (fd , 0 , & rec -> rx_cnt );
428
445
429
- fd = map_fd [ 2 ]; /* map: redirect_err_cnt */
446
+ fd = redirect_err_cnt_map_fd ;
430
447
map_collect_percpu (fd , 1 , & rec -> redir_err );
431
448
432
- fd = map_fd [ 3 ]; /* map: cpumap_enqueue_cnt */
449
+ fd = cpumap_enqueue_cnt_map_fd ;
433
450
for (i = 0 ; i < MAX_CPUS ; i ++ )
434
451
map_collect_percpu (fd , i , & rec -> enq [i ]);
435
452
436
- fd = map_fd [ 4 ]; /* map: cpumap_kthread_cnt */
453
+ fd = cpumap_kthread_cnt_map_fd ;
437
454
map_collect_percpu (fd , 0 , & rec -> kthread );
438
455
439
- fd = map_fd [ 8 ]; /* map: exception_cnt */
456
+ fd = exception_cnt_map_fd ;
440
457
map_collect_percpu (fd , 0 , & rec -> exception );
441
458
}
442
459
@@ -461,7 +478,7 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
461
478
/* Add a CPU entry to cpumap, as this allocate a cpu entry in
462
479
* the kernel for the cpu.
463
480
*/
464
- ret = bpf_map_update_elem (map_fd [ 0 ] , & cpu , & queue_size , 0 );
481
+ ret = bpf_map_update_elem (cpu_map_fd , & cpu , & queue_size , 0 );
465
482
if (ret ) {
466
483
fprintf (stderr , "Create CPU entry failed (err:%d)\n" , ret );
467
484
exit (EXIT_FAIL_BPF );
@@ -470,23 +487,22 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
470
487
/* Inform bpf_prog's that a new CPU is available to select
471
488
* from via some control maps.
472
489
*/
473
- /* map_fd[5] = cpus_available */
474
- ret = bpf_map_update_elem (map_fd [5 ], & avail_idx , & cpu , 0 );
490
+ ret = bpf_map_update_elem (cpus_available_map_fd , & avail_idx , & cpu , 0 );
475
491
if (ret ) {
476
492
fprintf (stderr , "Add to avail CPUs failed\n" );
477
493
exit (EXIT_FAIL_BPF );
478
494
}
479
495
480
496
/* When not replacing/updating existing entry, bump the count */
481
- /* map_fd[6] = cpus_count */
482
- ret = bpf_map_lookup_elem (map_fd [6 ], & key , & curr_cpus_count );
497
+ ret = bpf_map_lookup_elem (cpus_count_map_fd , & key , & curr_cpus_count );
483
498
if (ret ) {
484
499
fprintf (stderr , "Failed reading curr cpus_count\n" );
485
500
exit (EXIT_FAIL_BPF );
486
501
}
487
502
if (new ) {
488
503
curr_cpus_count ++ ;
489
- ret = bpf_map_update_elem (map_fd [6 ], & key , & curr_cpus_count , 0 );
504
+ ret = bpf_map_update_elem (cpus_count_map_fd , & key ,
505
+ & curr_cpus_count , 0 );
490
506
if (ret ) {
491
507
fprintf (stderr , "Failed write curr cpus_count\n" );
492
508
exit (EXIT_FAIL_BPF );
@@ -509,8 +525,8 @@ static void mark_cpus_unavailable(void)
509
525
int ret , i ;
510
526
511
527
for (i = 0 ; i < MAX_CPUS ; i ++ ) {
512
- /* map_fd[5] = cpus_available */
513
- ret = bpf_map_update_elem ( map_fd [ 5 ], & i , & invalid_cpu , 0 );
528
+ ret = bpf_map_update_elem ( cpus_available_map_fd , & i ,
529
+ & invalid_cpu , 0 );
514
530
if (ret ) {
515
531
fprintf (stderr , "Failed marking CPU unavailable\n" );
516
532
exit (EXIT_FAIL_BPF );
@@ -530,7 +546,7 @@ static void stress_cpumap(void)
530
546
create_cpu_entry (1 , 16000 , 0 , false);
531
547
}
532
548
533
- static void stats_poll (int interval , bool use_separators , int prog_num ,
549
+ static void stats_poll (int interval , bool use_separators , char * prog_name ,
534
550
bool stress_mode )
535
551
{
536
552
struct stats_record * record , * prev ;
@@ -546,7 +562,7 @@ static void stats_poll(int interval, bool use_separators, int prog_num,
546
562
while (1 ) {
547
563
swap (& prev , & record );
548
564
stats_collect (record );
549
- stats_print (record , prev , prog_num );
565
+ stats_print (record , prev , prog_name );
550
566
sleep (interval );
551
567
if (stress_mode )
552
568
stress_cpumap ();
@@ -556,17 +572,51 @@ static void stats_poll(int interval, bool use_separators, int prog_num,
556
572
free_stats_record (prev );
557
573
}
558
574
575
+ static int init_map_fds (struct bpf_object * obj )
576
+ {
577
+ cpu_map_fd = bpf_object__find_map_fd_by_name (obj , "cpu_map" );
578
+ rx_cnt_map_fd = bpf_object__find_map_fd_by_name (obj , "rx_cnt" );
579
+ redirect_err_cnt_map_fd =
580
+ bpf_object__find_map_fd_by_name (obj , "redirect_err_cnt" );
581
+ cpumap_enqueue_cnt_map_fd =
582
+ bpf_object__find_map_fd_by_name (obj , "cpumap_enqueue_cnt" );
583
+ cpumap_kthread_cnt_map_fd =
584
+ bpf_object__find_map_fd_by_name (obj , "cpumap_kthread_cnt" );
585
+ cpus_available_map_fd =
586
+ bpf_object__find_map_fd_by_name (obj , "cpus_available" );
587
+ cpus_count_map_fd = bpf_object__find_map_fd_by_name (obj , "cpus_count" );
588
+ cpus_iterator_map_fd =
589
+ bpf_object__find_map_fd_by_name (obj , "cpus_iterator" );
590
+ exception_cnt_map_fd =
591
+ bpf_object__find_map_fd_by_name (obj , "exception_cnt" );
592
+
593
+ if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
594
+ redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
595
+ cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
596
+ cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
597
+ exception_cnt_map_fd < 0 )
598
+ return - ENOENT ;
599
+
600
+ return 0 ;
601
+ }
602
+
559
603
int main (int argc , char * * argv )
560
604
{
561
605
struct rlimit r = {10 * 1024 * 1024 , RLIM_INFINITY };
606
+ char * prog_name = "xdp_cpu_map5_lb_hash_ip_pairs" ;
607
+ struct bpf_prog_load_attr prog_load_attr = {
608
+ .prog_type = BPF_PROG_TYPE_UNSPEC ,
609
+ };
562
610
bool use_separators = true;
563
611
bool stress_mode = false;
612
+ struct bpf_program * prog ;
613
+ struct bpf_object * obj ;
564
614
char filename [256 ];
565
615
int added_cpus = 0 ;
566
616
int longindex = 0 ;
567
617
int interval = 2 ;
568
- int prog_num = 5 ;
569
618
int add_cpu = -1 ;
619
+ int prog_fd ;
570
620
__u32 qsize ;
571
621
int opt ;
572
622
@@ -579,22 +629,25 @@ int main(int argc, char **argv)
579
629
qsize = 128 + 64 ;
580
630
581
631
snprintf (filename , sizeof (filename ), "%s_kern.o" , argv [0 ]);
632
+ prog_load_attr .file = filename ;
582
633
583
634
if (setrlimit (RLIMIT_MEMLOCK , & r )) {
584
635
perror ("setrlimit(RLIMIT_MEMLOCK)" );
585
636
return 1 ;
586
637
}
587
638
588
- if (load_bpf_file (filename )) {
589
- fprintf (stderr , "ERR in load_bpf_file(): %s" , bpf_log_buf );
639
+ if (bpf_prog_load_xattr (& prog_load_attr , & obj , & prog_fd ))
590
640
return EXIT_FAIL ;
591
- }
592
641
593
- if (!prog_fd [0 ]) {
594
- fprintf (stderr , "ERR: load_bpf_file: %s\n" , strerror (errno ));
642
+ if (prog_fd < 0 ) {
643
+ fprintf (stderr , "ERR: bpf_prog_load_xattr: %s\n" ,
644
+ strerror (errno ));
645
+ return EXIT_FAIL ;
646
+ }
647
+ if (init_map_fds (obj ) < 0 ) {
648
+ fprintf (stderr , "bpf_object__find_map_fd_by_name failed\n" );
595
649
return EXIT_FAIL ;
596
650
}
597
-
598
651
mark_cpus_unavailable ();
599
652
600
653
/* Parse commands line args */
@@ -630,13 +683,7 @@ int main(int argc, char **argv)
630
683
break ;
631
684
case 'p' :
632
685
/* Selecting eBPF prog to load */
633
- prog_num = atoi (optarg );
634
- if (prog_num < 0 || prog_num >= MAX_PROG ) {
635
- fprintf (stderr ,
636
- "--prognum too large err(%d):%s\n" ,
637
- errno , strerror (errno ));
638
- goto error ;
639
- }
686
+ prog_name = optarg ;
640
687
break ;
641
688
case 'c' :
642
689
/* Add multiple CPUs */
@@ -656,33 +703,45 @@ int main(int argc, char **argv)
656
703
case 'h' :
657
704
error :
658
705
default :
659
- usage (argv );
706
+ usage (argv , obj );
660
707
return EXIT_FAIL_OPTION ;
661
708
}
662
709
}
663
710
/* Required option */
664
711
if (ifindex == -1 ) {
665
712
fprintf (stderr , "ERR: required option --dev missing\n" );
666
- usage (argv );
713
+ usage (argv , obj );
667
714
return EXIT_FAIL_OPTION ;
668
715
}
669
716
/* Required option */
670
717
if (add_cpu == -1 ) {
671
718
fprintf (stderr , "ERR: required option --cpu missing\n" );
672
719
fprintf (stderr , " Specify multiple --cpu option to add more\n" );
673
- usage (argv );
720
+ usage (argv , obj );
674
721
return EXIT_FAIL_OPTION ;
675
722
}
676
723
677
724
/* Remove XDP program when program is interrupted or killed */
678
725
signal (SIGINT , int_exit );
679
726
signal (SIGTERM , int_exit );
680
727
681
- if (bpf_set_link_xdp_fd (ifindex , prog_fd [prog_num ], xdp_flags ) < 0 ) {
728
+ prog = bpf_object__find_program_by_title (obj , prog_name );
729
+ if (!prog ) {
730
+ fprintf (stderr , "bpf_object__find_program_by_title failed\n" );
731
+ return EXIT_FAIL ;
732
+ }
733
+
734
+ prog_fd = bpf_program__fd (prog );
735
+ if (prog_fd < 0 ) {
736
+ fprintf (stderr , "bpf_program__fd failed\n" );
737
+ return EXIT_FAIL ;
738
+ }
739
+
740
+ if (bpf_set_link_xdp_fd (ifindex , prog_fd , xdp_flags ) < 0 ) {
682
741
fprintf (stderr , "link set xdp fd failed\n" );
683
742
return EXIT_FAIL_XDP ;
684
743
}
685
744
686
- stats_poll (interval , use_separators , prog_num , stress_mode );
745
+ stats_poll (interval , use_separators , prog_name , stress_mode );
687
746
return EXIT_OK ;
688
747
}
0 commit comments