Skip to content

Commit b0ac09f

Browse files
Yonghong SongKernel Patches Daemon
authored andcommitted
selftests/bpf: Fix arena_atomics failure due to llvm change
llvm change [1] made a change such that __sync_fetch_and_{and,or,xor}() will generate atomic_fetch_*() insns even if the return value is not used. This is a deliberate choice to make sure barrier semantics are preserved from source code to asm insn. But the change in [1] caused arena_atomics selftest failure. test_arena_atomics:PASS:arena atomics skeleton open 0 nsec libbpf: prog 'and': BPF program load failed: Permission denied libbpf: prog 'and': -- BEGIN PROG LOAD LOG -- arg#0 reference type('UNKNOWN ') size cannot be determined: -22 0: R1=ctx() R10=fp0 ; if (pid != (bpf_get_current_pid_tgid() >> 32)) @ arena_atomics.c:87 0: (18) r1 = 0xffffc90000064000 ; R1_w=map_value(map=arena_at.bss,ks=4,vs=4) 2: (61) r6 = *(u32 *)(r1 +0) ; R1_w=map_value(map=arena_at.bss,ks=4,vs=4) R6_w=scalar(smin=0,smax=umax=0xffffffff,v ar_off=(0x0; 0xffffffff)) 3: (85) call bpf_get_current_pid_tgid#14 ; R0_w=scalar() 4: (77) r0 >>= 32 ; R0_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff)) 5: (5d) if r0 != r6 goto pc+11 ; R0_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff)) R6_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0x) ; __sync_fetch_and_and(&and64_value, 0x011ull << 32); @ arena_atomics.c:91 6: (18) r1 = 0x100000000060 ; R1_w=scalar() 8: (bf) r1 = addr_space_cast(r1, 0, 1) ; R1_w=arena 9: (18) r2 = 0x1100000000 ; R2_w=0x1100000000 11: (db) r2 = atomic64_fetch_and((u64 *)(r1 +0), r2) BPF_ATOMIC stores into R1 arena is not allowed processed 9 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0 -- END PROG LOAD LOG -- libbpf: prog 'and': failed to load: -13 libbpf: failed to load object 'arena_atomics' libbpf: failed to load BPF skeleton 'arena_atomics': -13 test_arena_atomics:FAIL:arena atomics skeleton load unexpected error: -13 (errno 13) #3 arena_atomics:FAIL The reason of the failure is due to [2] where atomic{64,}_fetch_{and,or,xor}() are not allowed by arena addresses. Version 2 of the patch fixed the issue by using inline asm ([3]). But further discussion suggested to find a way from source to generate locked insn which is more user friendly. So in not-merged llvm patch ([4]), if relax memory ordering is used and the return value is not used, locked insn could be generated. So with llvm patch [4] to compile the bpf selftest, the following code __c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed); is able to generate locked insn, hence fixing the selftest failure. [1] llvm/llvm-project#106494 [2] d503a04 ("bpf: Add support for certain atomics in bpf_arena to x86 JIT") [3] https://lore.kernel.org/bpf/[email protected]/ [4] llvm/llvm-project#107343 Signed-off-by: Yonghong Song <[email protected]>
1 parent 93bade1 commit b0ac09f

File tree

1 file changed

+31
-1
lines changed

1 file changed

+31
-1
lines changed

tools/testing/selftests/bpf/progs/arena_atomics.c

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <bpf/bpf_helpers.h>
55
#include <bpf/bpf_tracing.h>
66
#include <stdbool.h>
7+
#include <stdatomic.h>
78
#include "bpf_arena_common.h"
89

910
struct {
@@ -77,50 +78,79 @@ int sub(const void *ctx)
7778
return 0;
7879
}
7980

81+
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
82+
_Atomic __u64 __arena_global and64_value = (0x110ull << 32);
83+
_Atomic __u32 __arena_global and32_value = 0x110;
84+
#else
8085
__u64 __arena_global and64_value = (0x110ull << 32);
8186
__u32 __arena_global and32_value = 0x110;
87+
#endif
8288

8389
SEC("raw_tp/sys_enter")
8490
int and(const void *ctx)
8591
{
8692
if (pid != (bpf_get_current_pid_tgid() >> 32))
8793
return 0;
8894
#ifdef ENABLE_ATOMICS_TESTS
89-
95+
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
96+
__c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
97+
__c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed);
98+
#else
9099
__sync_fetch_and_and(&and64_value, 0x011ull << 32);
91100
__sync_fetch_and_and(&and32_value, 0x011);
101+
#endif
92102
#endif
93103

94104
return 0;
95105
}
96106

107+
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
108+
_Atomic __u32 __arena_global or32_value = 0x110;
109+
_Atomic __u64 __arena_global or64_value = (0x110ull << 32);
110+
#else
97111
__u32 __arena_global or32_value = 0x110;
98112
__u64 __arena_global or64_value = (0x110ull << 32);
113+
#endif
99114

100115
SEC("raw_tp/sys_enter")
101116
int or(const void *ctx)
102117
{
103118
if (pid != (bpf_get_current_pid_tgid() >> 32))
104119
return 0;
105120
#ifdef ENABLE_ATOMICS_TESTS
121+
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
122+
__c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed);
123+
__c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed);
124+
#else
106125
__sync_fetch_and_or(&or64_value, 0x011ull << 32);
107126
__sync_fetch_and_or(&or32_value, 0x011);
127+
#endif
108128
#endif
109129

110130
return 0;
111131
}
112132

133+
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
134+
_Atomic __u64 __arena_global xor64_value = (0x110ull << 32);
135+
_Atomic __u32 __arena_global xor32_value = 0x110;
136+
#else
113137
__u64 __arena_global xor64_value = (0x110ull << 32);
114138
__u32 __arena_global xor32_value = 0x110;
139+
#endif
115140

116141
SEC("raw_tp/sys_enter")
117142
int xor(const void *ctx)
118143
{
119144
if (pid != (bpf_get_current_pid_tgid() >> 32))
120145
return 0;
121146
#ifdef ENABLE_ATOMICS_TESTS
147+
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
148+
__c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed);
149+
__c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed);
150+
#else
122151
__sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
123152
__sync_fetch_and_xor(&xor32_value, 0x011);
153+
#endif
124154
#endif
125155

126156
return 0;

0 commit comments

Comments
 (0)