|
47 | 47 | #include <linux/kcmp.h>
|
48 | 48 | #include <sys/resource.h>
|
49 | 49 | #include <sys/capability.h>
|
| 50 | +#include <linux/perf_event.h> |
50 | 51 |
|
51 | 52 | #include <unistd.h>
|
52 | 53 | #include <sys/syscall.h>
|
|
68 | 69 | # define PR_SET_PTRACER 0x59616d61
|
69 | 70 | #endif
|
70 | 71 |
|
| 72 | +#ifndef noinline |
| 73 | +#define noinline __attribute__((noinline)) |
| 74 | +#endif |
| 75 | + |
71 | 76 | #ifndef PR_SET_NO_NEW_PRIVS
|
72 | 77 | #define PR_SET_NO_NEW_PRIVS 38
|
73 | 78 | #define PR_GET_NO_NEW_PRIVS 39
|
@@ -4888,6 +4893,200 @@ TEST(tsync_vs_dead_thread_leader)
|
4888 | 4893 | EXPECT_EQ(0, status);
|
4889 | 4894 | }
|
4890 | 4895 |
|
| 4896 | +noinline int probed(void) |
| 4897 | +{ |
| 4898 | + return 1; |
| 4899 | +} |
| 4900 | + |
| 4901 | +static int parse_uint_from_file(const char *file, const char *fmt) |
| 4902 | +{ |
| 4903 | + int err = -1, ret; |
| 4904 | + FILE *f; |
| 4905 | + |
| 4906 | + f = fopen(file, "re"); |
| 4907 | + if (f) { |
| 4908 | + err = fscanf(f, fmt, &ret); |
| 4909 | + fclose(f); |
| 4910 | + } |
| 4911 | + return err == 1 ? ret : err; |
| 4912 | +} |
| 4913 | + |
| 4914 | +static int determine_uprobe_perf_type(void) |
| 4915 | +{ |
| 4916 | + const char *file = "/sys/bus/event_source/devices/uprobe/type"; |
| 4917 | + |
| 4918 | + return parse_uint_from_file(file, "%d\n"); |
| 4919 | +} |
| 4920 | + |
| 4921 | +static int determine_uprobe_retprobe_bit(void) |
| 4922 | +{ |
| 4923 | + const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; |
| 4924 | + |
| 4925 | + return parse_uint_from_file(file, "config:%d\n"); |
| 4926 | +} |
| 4927 | + |
| 4928 | +static ssize_t get_uprobe_offset(const void *addr) |
| 4929 | +{ |
| 4930 | + size_t start, base, end; |
| 4931 | + bool found = false; |
| 4932 | + char buf[256]; |
| 4933 | + FILE *f; |
| 4934 | + |
| 4935 | + f = fopen("/proc/self/maps", "r"); |
| 4936 | + if (!f) |
| 4937 | + return -1; |
| 4938 | + |
| 4939 | + while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) { |
| 4940 | + if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) { |
| 4941 | + found = true; |
| 4942 | + break; |
| 4943 | + } |
| 4944 | + } |
| 4945 | + fclose(f); |
| 4946 | + return found ? (uintptr_t)addr - start + base : -1; |
| 4947 | +} |
| 4948 | + |
| 4949 | +FIXTURE(URETPROBE) { |
| 4950 | + int fd; |
| 4951 | +}; |
| 4952 | + |
| 4953 | +FIXTURE_VARIANT(URETPROBE) { |
| 4954 | + /* |
| 4955 | + * All of the URETPROBE behaviors can be tested with either |
| 4956 | + * uretprobe attached or not |
| 4957 | + */ |
| 4958 | + bool attach; |
| 4959 | +}; |
| 4960 | + |
| 4961 | +FIXTURE_VARIANT_ADD(URETPROBE, attached) { |
| 4962 | + .attach = true, |
| 4963 | +}; |
| 4964 | + |
| 4965 | +FIXTURE_VARIANT_ADD(URETPROBE, not_attached) { |
| 4966 | + .attach = false, |
| 4967 | +}; |
| 4968 | + |
| 4969 | +FIXTURE_SETUP(URETPROBE) |
| 4970 | +{ |
| 4971 | + const size_t attr_sz = sizeof(struct perf_event_attr); |
| 4972 | + struct perf_event_attr attr; |
| 4973 | + ssize_t offset; |
| 4974 | + int type, bit; |
| 4975 | + |
| 4976 | +#ifndef __NR_uretprobe |
| 4977 | + SKIP(return, "__NR_uretprobe syscall not defined"); |
| 4978 | +#endif |
| 4979 | + |
| 4980 | + if (!variant->attach) |
| 4981 | + return; |
| 4982 | + |
| 4983 | + memset(&attr, 0, attr_sz); |
| 4984 | + |
| 4985 | + type = determine_uprobe_perf_type(); |
| 4986 | + ASSERT_GE(type, 0); |
| 4987 | + bit = determine_uprobe_retprobe_bit(); |
| 4988 | + ASSERT_GE(bit, 0); |
| 4989 | + offset = get_uprobe_offset(probed); |
| 4990 | + ASSERT_GE(offset, 0); |
| 4991 | + |
| 4992 | + attr.config |= 1 << bit; |
| 4993 | + attr.size = attr_sz; |
| 4994 | + attr.type = type; |
| 4995 | + attr.config1 = ptr_to_u64("/proc/self/exe"); |
| 4996 | + attr.config2 = offset; |
| 4997 | + |
| 4998 | + self->fd = syscall(__NR_perf_event_open, &attr, |
| 4999 | + getpid() /* pid */, -1 /* cpu */, -1 /* group_fd */, |
| 5000 | + PERF_FLAG_FD_CLOEXEC); |
| 5001 | +} |
| 5002 | + |
| 5003 | +FIXTURE_TEARDOWN(URETPROBE) |
| 5004 | +{ |
| 5005 | + /* we could call close(self->fd), but we'd need extra filter for |
| 5006 | + * that and since we are calling _exit right away.. |
| 5007 | + */ |
| 5008 | +} |
| 5009 | + |
| 5010 | +static int run_probed_with_filter(struct sock_fprog *prog) |
| 5011 | +{ |
| 5012 | + if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) || |
| 5013 | + seccomp(SECCOMP_SET_MODE_FILTER, 0, prog)) { |
| 5014 | + return -1; |
| 5015 | + } |
| 5016 | + |
| 5017 | + probed(); |
| 5018 | + return 0; |
| 5019 | +} |
| 5020 | + |
| 5021 | +TEST_F(URETPROBE, uretprobe_default_allow) |
| 5022 | +{ |
| 5023 | + struct sock_filter filter[] = { |
| 5024 | + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), |
| 5025 | + }; |
| 5026 | + struct sock_fprog prog = { |
| 5027 | + .len = (unsigned short)ARRAY_SIZE(filter), |
| 5028 | + .filter = filter, |
| 5029 | + }; |
| 5030 | + |
| 5031 | + ASSERT_EQ(0, run_probed_with_filter(&prog)); |
| 5032 | +} |
| 5033 | + |
| 5034 | +TEST_F(URETPROBE, uretprobe_default_block) |
| 5035 | +{ |
| 5036 | + struct sock_filter filter[] = { |
| 5037 | + BPF_STMT(BPF_LD|BPF_W|BPF_ABS, |
| 5038 | + offsetof(struct seccomp_data, nr)), |
| 5039 | + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit_group, 1, 0), |
| 5040 | + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), |
| 5041 | + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), |
| 5042 | + }; |
| 5043 | + struct sock_fprog prog = { |
| 5044 | + .len = (unsigned short)ARRAY_SIZE(filter), |
| 5045 | + .filter = filter, |
| 5046 | + }; |
| 5047 | + |
| 5048 | + ASSERT_EQ(0, run_probed_with_filter(&prog)); |
| 5049 | +} |
| 5050 | + |
| 5051 | +TEST_F(URETPROBE, uretprobe_block_uretprobe_syscall) |
| 5052 | +{ |
| 5053 | + struct sock_filter filter[] = { |
| 5054 | + BPF_STMT(BPF_LD|BPF_W|BPF_ABS, |
| 5055 | + offsetof(struct seccomp_data, nr)), |
| 5056 | +#ifdef __NR_uretprobe |
| 5057 | + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_uretprobe, 0, 1), |
| 5058 | +#endif |
| 5059 | + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), |
| 5060 | + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), |
| 5061 | + }; |
| 5062 | + struct sock_fprog prog = { |
| 5063 | + .len = (unsigned short)ARRAY_SIZE(filter), |
| 5064 | + .filter = filter, |
| 5065 | + }; |
| 5066 | + |
| 5067 | + ASSERT_EQ(0, run_probed_with_filter(&prog)); |
| 5068 | +} |
| 5069 | + |
| 5070 | +TEST_F(URETPROBE, uretprobe_default_block_with_uretprobe_syscall) |
| 5071 | +{ |
| 5072 | + struct sock_filter filter[] = { |
| 5073 | + BPF_STMT(BPF_LD|BPF_W|BPF_ABS, |
| 5074 | + offsetof(struct seccomp_data, nr)), |
| 5075 | +#ifdef __NR_uretprobe |
| 5076 | + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_uretprobe, 2, 0), |
| 5077 | +#endif |
| 5078 | + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit_group, 1, 0), |
| 5079 | + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), |
| 5080 | + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), |
| 5081 | + }; |
| 5082 | + struct sock_fprog prog = { |
| 5083 | + .len = (unsigned short)ARRAY_SIZE(filter), |
| 5084 | + .filter = filter, |
| 5085 | + }; |
| 5086 | + |
| 5087 | + ASSERT_EQ(0, run_probed_with_filter(&prog)); |
| 5088 | +} |
| 5089 | + |
4891 | 5090 | /*
|
4892 | 5091 | * TODO:
|
4893 | 5092 | * - expand NNP testing
|
|
0 commit comments