|
4 | 4 | #include <bpf/bpf_helpers.h>
|
5 | 5 | #include <bpf/bpf_tracing.h>
|
6 | 6 | #include <stdbool.h>
|
| 7 | +#include <stdatomic.h> |
7 | 8 | #include "bpf_arena_common.h"
|
8 | 9 |
|
9 | 10 | struct {
|
@@ -77,50 +78,79 @@ int sub(const void *ctx)
|
77 | 78 | return 0;
|
78 | 79 | }
|
79 | 80 |
|
| 81 | +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING |
| 82 | +_Atomic __u64 __arena_global and64_value = (0x110ull << 32); |
| 83 | +_Atomic __u32 __arena_global and32_value = 0x110; |
| 84 | +#else |
80 | 85 | __u64 __arena_global and64_value = (0x110ull << 32);
|
81 | 86 | __u32 __arena_global and32_value = 0x110;
|
| 87 | +#endif |
82 | 88 |
|
83 | 89 | SEC("raw_tp/sys_enter")
|
84 | 90 | int and(const void *ctx)
|
85 | 91 | {
|
86 | 92 | if (pid != (bpf_get_current_pid_tgid() >> 32))
|
87 | 93 | return 0;
|
88 | 94 | #ifdef ENABLE_ATOMICS_TESTS
|
89 |
| - |
| 95 | +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING |
| 96 | + __c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed); |
| 97 | + __c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed); |
| 98 | +#else |
90 | 99 | __sync_fetch_and_and(&and64_value, 0x011ull << 32);
|
91 | 100 | __sync_fetch_and_and(&and32_value, 0x011);
|
| 101 | +#endif |
92 | 102 | #endif
|
93 | 103 |
|
94 | 104 | return 0;
|
95 | 105 | }
|
96 | 106 |
|
| 107 | +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING |
| 108 | +_Atomic __u32 __arena_global or32_value = 0x110; |
| 109 | +_Atomic __u64 __arena_global or64_value = (0x110ull << 32); |
| 110 | +#else |
97 | 111 | __u32 __arena_global or32_value = 0x110;
|
98 | 112 | __u64 __arena_global or64_value = (0x110ull << 32);
|
| 113 | +#endif |
99 | 114 |
|
100 | 115 | SEC("raw_tp/sys_enter")
|
101 | 116 | int or(const void *ctx)
|
102 | 117 | {
|
103 | 118 | if (pid != (bpf_get_current_pid_tgid() >> 32))
|
104 | 119 | return 0;
|
105 | 120 | #ifdef ENABLE_ATOMICS_TESTS
|
| 121 | +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING |
| 122 | + __c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed); |
| 123 | + __c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed); |
| 124 | +#else |
106 | 125 | __sync_fetch_and_or(&or64_value, 0x011ull << 32);
|
107 | 126 | __sync_fetch_and_or(&or32_value, 0x011);
|
| 127 | +#endif |
108 | 128 | #endif
|
109 | 129 |
|
110 | 130 | return 0;
|
111 | 131 | }
|
112 | 132 |
|
| 133 | +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING |
| 134 | +_Atomic __u64 __arena_global xor64_value = (0x110ull << 32); |
| 135 | +_Atomic __u32 __arena_global xor32_value = 0x110; |
| 136 | +#else |
113 | 137 | __u64 __arena_global xor64_value = (0x110ull << 32);
|
114 | 138 | __u32 __arena_global xor32_value = 0x110;
|
| 139 | +#endif |
115 | 140 |
|
116 | 141 | SEC("raw_tp/sys_enter")
|
117 | 142 | int xor(const void *ctx)
|
118 | 143 | {
|
119 | 144 | if (pid != (bpf_get_current_pid_tgid() >> 32))
|
120 | 145 | return 0;
|
121 | 146 | #ifdef ENABLE_ATOMICS_TESTS
|
| 147 | +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING |
| 148 | + __c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed); |
| 149 | + __c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed); |
| 150 | +#else |
122 | 151 | __sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
|
123 | 152 | __sync_fetch_and_xor(&xor32_value, 0x011);
|
| 153 | +#endif |
124 | 154 | #endif
|
125 | 155 |
|
126 | 156 | return 0;
|
|
0 commit comments