|
6 | 6 | #include "uprobe_multi.skel.h"
|
7 | 7 | #include "uprobe_multi_bench.skel.h"
|
8 | 8 | #include "uprobe_multi_usdt.skel.h"
|
| 9 | +#include "uprobe_multi_consumers.skel.h" |
9 | 10 | #include "bpf/libbpf_internal.h"
|
10 | 11 | #include "testing_helpers.h"
|
11 | 12 | #include "../sdt.h"
|
@@ -516,6 +517,122 @@ static void test_attach_api_fails(void)
|
516 | 517 | uprobe_multi__destroy(skel);
|
517 | 518 | }
|
518 | 519 |
|
| 520 | +#ifdef __x86_64__ |
| 521 | +noinline void uprobe_multi_error_func(void) |
| 522 | +{ |
| 523 | + /* |
| 524 | + * If --fcf-protection=branch is enabled the gcc generates endbr as |
| 525 | + * first instruction, so marking the exact address of int3 with the |
| 526 | + * symbol to be used in the attach_uprobe_fail_trap test below. |
| 527 | + */ |
| 528 | + asm volatile ( |
| 529 | + ".globl uprobe_multi_error_func_int3; \n" |
| 530 | + "uprobe_multi_error_func_int3: \n" |
| 531 | + "int3 \n" |
| 532 | + ); |
| 533 | +} |
| 534 | + |
| 535 | +/* |
| 536 | + * Attaching uprobe on uprobe_multi_error_func results in error |
| 537 | + * because it already starts with int3 instruction. |
| 538 | + */ |
| 539 | +static void attach_uprobe_fail_trap(struct uprobe_multi *skel) |
| 540 | +{ |
| 541 | + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); |
| 542 | + const char *syms[4] = { |
| 543 | + "uprobe_multi_func_1", |
| 544 | + "uprobe_multi_func_2", |
| 545 | + "uprobe_multi_func_3", |
| 546 | + "uprobe_multi_error_func_int3", |
| 547 | + }; |
| 548 | + |
| 549 | + opts.syms = syms; |
| 550 | + opts.cnt = ARRAY_SIZE(syms); |
| 551 | + |
| 552 | + skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1, |
| 553 | + "/proc/self/exe", NULL, &opts); |
| 554 | + if (!ASSERT_ERR_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) { |
| 555 | + bpf_link__destroy(skel->links.uprobe); |
| 556 | + skel->links.uprobe = NULL; |
| 557 | + } |
| 558 | +} |
| 559 | +#else |
| 560 | +static void attach_uprobe_fail_trap(struct uprobe_multi *skel) { } |
| 561 | +#endif |
| 562 | + |
| 563 | +short sema_1 __used, sema_2 __used; |
| 564 | + |
| 565 | +static void attach_uprobe_fail_refctr(struct uprobe_multi *skel) |
| 566 | +{ |
| 567 | + unsigned long *tmp_offsets = NULL, *tmp_ref_ctr_offsets = NULL; |
| 568 | + unsigned long offsets[3], ref_ctr_offsets[3]; |
| 569 | + LIBBPF_OPTS(bpf_link_create_opts, opts); |
| 570 | + const char *path = "/proc/self/exe"; |
| 571 | + const char *syms[3] = { |
| 572 | + "uprobe_multi_func_1", |
| 573 | + "uprobe_multi_func_2", |
| 574 | + }; |
| 575 | + const char *sema[3] = { |
| 576 | + "sema_1", |
| 577 | + "sema_2", |
| 578 | + }; |
| 579 | + int prog_fd, link_fd, err; |
| 580 | + |
| 581 | + prog_fd = bpf_program__fd(skel->progs.uprobe_extra); |
| 582 | + |
| 583 | + err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &syms, |
| 584 | + &tmp_offsets, STT_FUNC); |
| 585 | + if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func")) |
| 586 | + return; |
| 587 | + |
| 588 | + err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &sema, |
| 589 | + &tmp_ref_ctr_offsets, STT_OBJECT); |
| 590 | + if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema")) |
| 591 | + goto cleanup; |
| 592 | + |
| 593 | + /* |
| 594 | + * We attach to 3 uprobes on 2 functions, so 2 uprobes share single function, |
| 595 | + * but with different ref_ctr_offset which is not allowed and results in fail. |
| 596 | + */ |
| 597 | + offsets[0] = tmp_offsets[0]; /* uprobe_multi_func_1 */ |
| 598 | + offsets[1] = tmp_offsets[1]; /* uprobe_multi_func_2 */ |
| 599 | + offsets[2] = tmp_offsets[1]; /* uprobe_multi_func_2 */ |
| 600 | + |
| 601 | + ref_ctr_offsets[0] = tmp_ref_ctr_offsets[0]; /* sema_1 */ |
| 602 | + ref_ctr_offsets[1] = tmp_ref_ctr_offsets[1]; /* sema_2 */ |
| 603 | + ref_ctr_offsets[2] = tmp_ref_ctr_offsets[0]; /* sema_1, error */ |
| 604 | + |
| 605 | + opts.uprobe_multi.path = path; |
| 606 | + opts.uprobe_multi.offsets = (const unsigned long *) &offsets; |
| 607 | + opts.uprobe_multi.ref_ctr_offsets = (const unsigned long *) &ref_ctr_offsets; |
| 608 | + opts.uprobe_multi.cnt = 3; |
| 609 | + |
| 610 | + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); |
| 611 | + if (!ASSERT_ERR(link_fd, "link_fd")) |
| 612 | + close(link_fd); |
| 613 | + |
| 614 | +cleanup: |
| 615 | + free(tmp_ref_ctr_offsets); |
| 616 | + free(tmp_offsets); |
| 617 | +} |
| 618 | + |
| 619 | +static void test_attach_uprobe_fails(void) |
| 620 | +{ |
| 621 | + struct uprobe_multi *skel = NULL; |
| 622 | + |
| 623 | + skel = uprobe_multi__open_and_load(); |
| 624 | + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) |
| 625 | + return; |
| 626 | + |
| 627 | + /* attach fails due to adding uprobe on trap instruction, x86_64 only */ |
| 628 | + attach_uprobe_fail_trap(skel); |
| 629 | + |
| 630 | + /* attach fail due to wrong ref_ctr_offs on one of the uprobes */ |
| 631 | + attach_uprobe_fail_refctr(skel); |
| 632 | + |
| 633 | + uprobe_multi__destroy(skel); |
| 634 | +} |
| 635 | + |
519 | 636 | static void __test_link_api(struct child *child)
|
520 | 637 | {
|
521 | 638 | int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
|
@@ -615,6 +732,216 @@ static void test_link_api(void)
|
615 | 732 | __test_link_api(child);
|
616 | 733 | }
|
617 | 734 |
|
| 735 | +static struct bpf_program * |
| 736 | +get_program(struct uprobe_multi_consumers *skel, int prog) |
| 737 | +{ |
| 738 | + switch (prog) { |
| 739 | + case 0: |
| 740 | + return skel->progs.uprobe_0; |
| 741 | + case 1: |
| 742 | + return skel->progs.uprobe_1; |
| 743 | + case 2: |
| 744 | + return skel->progs.uprobe_2; |
| 745 | + case 3: |
| 746 | + return skel->progs.uprobe_3; |
| 747 | + default: |
| 748 | + ASSERT_FAIL("get_program"); |
| 749 | + return NULL; |
| 750 | + } |
| 751 | +} |
| 752 | + |
| 753 | +static struct bpf_link ** |
| 754 | +get_link(struct uprobe_multi_consumers *skel, int link) |
| 755 | +{ |
| 756 | + switch (link) { |
| 757 | + case 0: |
| 758 | + return &skel->links.uprobe_0; |
| 759 | + case 1: |
| 760 | + return &skel->links.uprobe_1; |
| 761 | + case 2: |
| 762 | + return &skel->links.uprobe_2; |
| 763 | + case 3: |
| 764 | + return &skel->links.uprobe_3; |
| 765 | + default: |
| 766 | + ASSERT_FAIL("get_link"); |
| 767 | + return NULL; |
| 768 | + } |
| 769 | +} |
| 770 | + |
| 771 | +static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx) |
| 772 | +{ |
| 773 | + struct bpf_program *prog = get_program(skel, idx); |
| 774 | + struct bpf_link **link = get_link(skel, idx); |
| 775 | + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); |
| 776 | + |
| 777 | + if (!prog || !link) |
| 778 | + return -1; |
| 779 | + |
| 780 | + /* |
| 781 | + * bit/prog: 0,1 uprobe entry |
| 782 | + * bit/prog: 2,3 uprobe return |
| 783 | + */ |
| 784 | + opts.retprobe = idx == 2 || idx == 3; |
| 785 | + |
| 786 | + *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", |
| 787 | + "uprobe_consumer_test", |
| 788 | + &opts); |
| 789 | + if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi")) |
| 790 | + return -1; |
| 791 | + return 0; |
| 792 | +} |
| 793 | + |
| 794 | +static void uprobe_detach(struct uprobe_multi_consumers *skel, int idx) |
| 795 | +{ |
| 796 | + struct bpf_link **link = get_link(skel, idx); |
| 797 | + |
| 798 | + bpf_link__destroy(*link); |
| 799 | + *link = NULL; |
| 800 | +} |
| 801 | + |
| 802 | +static bool test_bit(int bit, unsigned long val) |
| 803 | +{ |
| 804 | + return val & (1 << bit); |
| 805 | +} |
| 806 | + |
| 807 | +noinline int |
| 808 | +uprobe_consumer_test(struct uprobe_multi_consumers *skel, |
| 809 | + unsigned long before, unsigned long after) |
| 810 | +{ |
| 811 | + int idx; |
| 812 | + |
| 813 | + /* detach uprobe for each unset programs in 'before' state ... */ |
| 814 | + for (idx = 0; idx < 4; idx++) { |
| 815 | + if (test_bit(idx, before) && !test_bit(idx, after)) |
| 816 | + uprobe_detach(skel, idx); |
| 817 | + } |
| 818 | + |
| 819 | + /* ... and attach all new programs in 'after' state */ |
| 820 | + for (idx = 0; idx < 4; idx++) { |
| 821 | + if (!test_bit(idx, before) && test_bit(idx, after)) { |
| 822 | + if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_after")) |
| 823 | + return -1; |
| 824 | + } |
| 825 | + } |
| 826 | + return 0; |
| 827 | +} |
| 828 | + |
| 829 | +static void consumer_test(struct uprobe_multi_consumers *skel, |
| 830 | + unsigned long before, unsigned long after) |
| 831 | +{ |
| 832 | + int err, idx; |
| 833 | + |
| 834 | + printf("consumer_test before %lu after %lu\n", before, after); |
| 835 | + |
| 836 | + /* 'before' is each, we attach uprobe for every set idx */ |
| 837 | + for (idx = 0; idx < 4; idx++) { |
| 838 | + if (test_bit(idx, before)) { |
| 839 | + if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_before")) |
| 840 | + goto cleanup; |
| 841 | + } |
| 842 | + } |
| 843 | + |
| 844 | + err = uprobe_consumer_test(skel, before, after); |
| 845 | + if (!ASSERT_EQ(err, 0, "uprobe_consumer_test")) |
| 846 | + goto cleanup; |
| 847 | + |
| 848 | + for (idx = 0; idx < 4; idx++) { |
| 849 | + const char *fmt = "BUG"; |
| 850 | + __u64 val = 0; |
| 851 | + |
| 852 | + if (idx < 2) { |
| 853 | + /* |
| 854 | + * uprobe entry |
| 855 | + * +1 if define in 'before' |
| 856 | + */ |
| 857 | + if (test_bit(idx, before)) |
| 858 | + val++; |
| 859 | + fmt = "prog 0/1: uprobe"; |
| 860 | + } else { |
| 861 | + /* |
| 862 | + * uprobe return is tricky ;-) |
| 863 | + * |
| 864 | + * to trigger uretprobe consumer, the uretprobe needs to be installed, |
| 865 | + * which means one of the 'return' uprobes was alive when probe was hit: |
| 866 | + * |
| 867 | + * idxs: 2/3 uprobe return in 'installed' mask |
| 868 | + * |
| 869 | + * in addition if 'after' state removes everything that was installed in |
| 870 | + * 'before' state, then uprobe kernel object goes away and return uprobe |
| 871 | + * is not installed and we won't hit it even if it's in 'after' state. |
| 872 | + */ |
| 873 | + unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */ |
| 874 | + unsigned long probe_preserved = before & after; /* did uprobe go away */ |
| 875 | + |
| 876 | + if (had_uretprobes && probe_preserved && test_bit(idx, after)) |
| 877 | + val++; |
| 878 | + fmt = "idx 2/3: uretprobe"; |
| 879 | + } |
| 880 | + |
| 881 | + ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt); |
| 882 | + skel->bss->uprobe_result[idx] = 0; |
| 883 | + } |
| 884 | + |
| 885 | +cleanup: |
| 886 | + for (idx = 0; idx < 4; idx++) |
| 887 | + uprobe_detach(skel, idx); |
| 888 | +} |
| 889 | + |
| 890 | +static void test_consumers(void) |
| 891 | +{ |
| 892 | + struct uprobe_multi_consumers *skel; |
| 893 | + int before, after; |
| 894 | + |
| 895 | + skel = uprobe_multi_consumers__open_and_load(); |
| 896 | + if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load")) |
| 897 | + return; |
| 898 | + |
| 899 | + /* |
| 900 | + * The idea of this test is to try all possible combinations of |
| 901 | + * uprobes consumers attached on single function. |
| 902 | + * |
| 903 | + * - 2 uprobe entry consumer |
| 904 | + * - 2 uprobe exit consumers |
| 905 | + * |
| 906 | + * The test uses 4 uprobes attached on single function, but that |
| 907 | + * translates into single uprobe with 4 consumers in kernel. |
| 908 | + * |
| 909 | + * The before/after values present the state of attached consumers |
| 910 | + * before and after the probed function: |
| 911 | + * |
| 912 | + * bit/prog 0,1 : uprobe entry |
| 913 | + * bit/prog 2,3 : uprobe return |
| 914 | + * |
| 915 | + * For example for: |
| 916 | + * |
| 917 | + * before = 0b0101 |
| 918 | + * after = 0b0110 |
| 919 | + * |
| 920 | + * it means that before we call 'uprobe_consumer_test' we attach |
| 921 | + * uprobes defined in 'before' value: |
| 922 | + * |
| 923 | + * - bit/prog 0: uprobe entry |
| 924 | + * - bit/prog 2: uprobe return |
| 925 | + * |
| 926 | + * uprobe_consumer_test is called and inside it we attach and detach |
| 927 | + * uprobes based on 'after' value: |
| 928 | + * |
| 929 | + * - bit/prog 0: stays untouched |
| 930 | + * - bit/prog 2: uprobe return is detached |
| 931 | + * |
| 932 | + * uprobe_consumer_test returns and we check counters values increased |
| 933 | + * by bpf programs on each uprobe to match the expected count based on |
| 934 | + * before/after bits. |
| 935 | + */ |
| 936 | + |
| 937 | + for (before = 0; before < 16; before++) { |
| 938 | + for (after = 0; after < 16; after++) |
| 939 | + consumer_test(skel, before, after); |
| 940 | + } |
| 941 | + |
| 942 | + uprobe_multi_consumers__destroy(skel); |
| 943 | +} |
| 944 | + |
618 | 945 | static void test_bench_attach_uprobe(void)
|
619 | 946 | {
|
620 | 947 | long attach_start_ns = 0, attach_end_ns = 0;
|
@@ -703,4 +1030,8 @@ void test_uprobe_multi_test(void)
|
703 | 1030 | test_bench_attach_usdt();
|
704 | 1031 | if (test__start_subtest("attach_api_fails"))
|
705 | 1032 | test_attach_api_fails();
|
| 1033 | + if (test__start_subtest("attach_uprobe_fails")) |
| 1034 | + test_attach_uprobe_fails(); |
| 1035 | + if (test__start_subtest("consumers")) |
| 1036 | + test_consumers(); |
706 | 1037 | }
|
0 commit comments