Skip to content

Commit 89f0f27

Browse files
committed
[BasicAA] Add atomic mem intrinsic tests.
1 parent 2959e08 commit 89f0f27

File tree

1 file changed

+125
-0
lines changed

1 file changed

+125
-0
lines changed
Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
; RUN: opt -basic-aa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 %s | FileCheck %s
2+
3+
declare void @llvm.memset.element.unordered.atomic.p0i8.i32(i8*, i8, i64, i32)
4+
5+
define void @test_memset_element_unordered_atomic_const_size(i8* noalias %a) {
6+
; CHECK-LABEL: Function: test_memset_element_unordered_atomic_const_size
7+
; CHECK: Just Mod (MustAlias): Ptr: i8* %a <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
8+
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.1 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
9+
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.5 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
10+
;
11+
entry:
12+
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %a, i8 0, i64 4, i32 1)
13+
%a.gep.1 = getelementptr i8, i8* %a, i32 1
14+
store i8 0, i8* %a.gep.1
15+
%a.gep.5 = getelementptr i8, i8* %a, i32 5
16+
store i8 1, i8* %a.gep.5
17+
ret void
18+
}
19+
20+
define void @test_memset_element_unordered_atomic_variable_size(i8* noalias %a, i64 %n) {
21+
; CHECK-LABEL: Function: test_memset_element_unordered_atomic_variable_size
22+
; CHECK: Just Mod (MustAlias): Ptr: i8* %a <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
23+
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.1 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
24+
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.5 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
25+
;
26+
entry:
27+
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %a, i8 0, i64 %n, i32 1)
28+
%a.gep.1 = getelementptr i8, i8* %a, i32 1
29+
store i8 0, i8* %a.gep.1
30+
%a.gep.5 = getelementptr i8, i8* %a, i32 5
31+
store i8 1, i8* %a.gep.5
32+
ret void
33+
}
34+
35+
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32)
36+
37+
define void @test_memcpy_element_unordered_atomic_const_size(i8* noalias %a, i8* noalias %b) {
38+
; CHECK-LABEL: Function: test_memcpy_element_unordered_atomic_const_size
39+
; CHECK: Just Ref: Ptr: i8* %a <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
40+
; CHECK-NEXT: Just Mod: Ptr: i8* %b <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
41+
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.1 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
42+
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.5 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
43+
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.1 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
44+
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.5 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
45+
;
46+
entry:
47+
%a.gep.1 = getelementptr i8, i8* %a, i32 1
48+
store i8 0, i8* %a.gep.1
49+
%a.gep.5 = getelementptr i8, i8* %a, i32 5
50+
store i8 1, i8* %a.gep.5
51+
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
52+
%b.gep.1 = getelementptr i8, i8* %b, i32 1
53+
store i8 0, i8* %b.gep.1
54+
%b.gep.5 = getelementptr i8, i8* %b, i32 5
55+
store i8 1, i8* %b.gep.5
56+
ret void
57+
}
58+
59+
define void @test_memcpy_element_unordered_atomic_variable_size(i8* noalias %a, i8* noalias %b, i64 %n) {
60+
; CHECK-LABEL: Function: test_memcpy_element_unordered_atomic_variable_size
61+
; CHECK: Just Ref: Ptr: i8* %a <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
62+
; CHECK-NEXT: Just Mod: Ptr: i8* %b <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
63+
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.1 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
64+
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.5 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
65+
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.1 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
66+
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.5 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
67+
;
68+
entry:
69+
%a.gep.1 = getelementptr i8, i8* %a, i32 1
70+
store i8 0, i8* %a.gep.1
71+
%a.gep.5 = getelementptr i8, i8* %a, i32 5
72+
store i8 1, i8* %a.gep.5
73+
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
74+
%b.gep.1 = getelementptr i8, i8* %b, i32 1
75+
store i8 0, i8* %b.gep.1
76+
%b.gep.5 = getelementptr i8, i8* %b, i32 5
77+
store i8 1, i8* %b.gep.5
78+
ret void
79+
}
80+
81+
declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32)
82+
83+
define void @test_memmove_element_unordered_atomic_const_size(i8* noalias %a, i8* noalias %b) {
84+
; CHECK-LABEL: Function: test_memmove_element_unordered_atomic_const_size
85+
; CHECK: Just Ref: Ptr: i8* %a <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
86+
; CHECK-NEXT: Just Mod: Ptr: i8* %b <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
87+
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.1 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
88+
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.5 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
89+
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.1 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
90+
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.5 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
91+
;
92+
entry:
93+
%a.gep.1 = getelementptr i8, i8* %a, i32 1
94+
store i8 0, i8* %a.gep.1
95+
%a.gep.5 = getelementptr i8, i8* %a, i32 5
96+
store i8 1, i8* %a.gep.5
97+
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
98+
%b.gep.1 = getelementptr i8, i8* %b, i32 1
99+
store i8 0, i8* %b.gep.1
100+
%b.gep.5 = getelementptr i8, i8* %b, i32 5
101+
store i8 1, i8* %b.gep.5
102+
ret void
103+
}
104+
105+
define void @test_memmove_element_unordered_atomic_variable_size(i8* noalias %a, i8* noalias %b, i64 %n) {
106+
; CHECK-LABEL: Function: test_memmove_element_unordered_atomic_variable_size
107+
; CHECK: Just Ref: Ptr: i8* %a <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
108+
; CHECK-NEXT: Just Mod: Ptr: i8* %b <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
109+
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.1 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
110+
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.5 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
111+
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.1 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
112+
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.5 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
113+
;
114+
entry:
115+
%a.gep.1 = getelementptr i8, i8* %a, i32 1
116+
store i8 0, i8* %a.gep.1
117+
%a.gep.5 = getelementptr i8, i8* %a, i32 5
118+
store i8 1, i8* %a.gep.5
119+
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
120+
%b.gep.1 = getelementptr i8, i8* %b, i32 1
121+
store i8 0, i8* %b.gep.1
122+
%b.gep.5 = getelementptr i8, i8* %b, i32 5
123+
store i8 1, i8* %b.gep.5
124+
ret void
125+
}

0 commit comments

Comments
 (0)