Skip to content

Commit 17800f9

Browse files
committed
[tests] Add tests for forthcoming funcattrs nosync inference improvement
These are basically all the attributor tests for the same attribute with some minor cleanup for readability and autogened.
1 parent 76d9bc7 commit 17800f9

File tree

1 file changed

+218
-0
lines changed

1 file changed

+218
-0
lines changed

llvm/test/Transforms/FunctionAttrs/nosync.ll

Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
; RUN: opt < %s -function-attrs -S | FileCheck %s
33
; RUN: opt < %s -passes=function-attrs -S | FileCheck %s
44

5+
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
6+
57
; Base case, empty function
68
define void @test1() {
79
; CHECK: Function Attrs: norecurse nosync nounwind readnone willreturn
@@ -89,3 +91,219 @@ define void @test8(i8* %p) {
8991
ret void
9092
}
9193

94+
; singlethread fences are okay
95+
define void @test9(i8* %p) {
96+
; CHECK: Function Attrs: nofree norecurse nounwind willreturn
97+
; CHECK-LABEL: @test9(
98+
; CHECK-NEXT: fence syncscope("singlethread") seq_cst
99+
; CHECK-NEXT: ret void
100+
;
101+
fence syncscope("singlethread") seq_cst
102+
ret void
103+
}
104+
105+
; atomic load with monotonic ordering
106+
define i32 @load_monotonic(i32* nocapture readonly %0) norecurse nounwind uwtable {
107+
; CHECK: Function Attrs: nofree norecurse nounwind uwtable willreturn
108+
; CHECK-LABEL: @load_monotonic(
109+
; CHECK-NEXT: [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] monotonic, align 4
110+
; CHECK-NEXT: ret i32 [[TMP2]]
111+
;
112+
%2 = load atomic i32, i32* %0 monotonic, align 4
113+
ret i32 %2
114+
}
115+
116+
; atomic store with monotonic ordering.
117+
define void @store_monotonic(i32* nocapture %0) norecurse nounwind uwtable {
118+
; CHECK: Function Attrs: nofree norecurse nounwind uwtable willreturn
119+
; CHECK-LABEL: @store_monotonic(
120+
; CHECK-NEXT: store atomic i32 10, i32* [[TMP0:%.*]] monotonic, align 4
121+
; CHECK-NEXT: ret void
122+
;
123+
store atomic i32 10, i32* %0 monotonic, align 4
124+
ret void
125+
}
126+
127+
; negative, should not deduce nosync
128+
; atomic load with acquire ordering.
129+
define i32 @load_acquire(i32* nocapture readonly %0) norecurse nounwind uwtable {
130+
; CHECK: Function Attrs: nofree norecurse nounwind uwtable willreturn
131+
; CHECK-LABEL: @load_acquire(
132+
; CHECK-NEXT: [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] acquire, align 4
133+
; CHECK-NEXT: ret i32 [[TMP2]]
134+
;
135+
%2 = load atomic i32, i32* %0 acquire, align 4
136+
ret i32 %2
137+
}
138+
139+
define i32 @load_unordered(i32* nocapture readonly %0) norecurse nounwind uwtable {
140+
; CHECK: Function Attrs: norecurse nounwind readonly uwtable willreturn
141+
; CHECK-LABEL: @load_unordered(
142+
; CHECK-NEXT: [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] unordered, align 4
143+
; CHECK-NEXT: ret i32 [[TMP2]]
144+
;
145+
%2 = load atomic i32, i32* %0 unordered, align 4
146+
ret i32 %2
147+
}
148+
149+
; atomic store with unordered ordering.
150+
define void @store_unordered(i32* nocapture %0) norecurse nounwind uwtable {
151+
; CHECK: Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
152+
; CHECK-LABEL: @store_unordered(
153+
; CHECK-NEXT: store atomic i32 10, i32* [[TMP0:%.*]] unordered, align 4
154+
; CHECK-NEXT: ret void
155+
;
156+
store atomic i32 10, i32* %0 unordered, align 4
157+
ret void
158+
}
159+
160+
161+
; negative, should not deduce nosync
162+
; atomic load with release ordering
163+
define void @load_release(i32* nocapture %0) norecurse nounwind uwtable {
164+
; CHECK: Function Attrs: nofree norecurse nounwind uwtable willreturn
165+
; CHECK-LABEL: @load_release(
166+
; CHECK-NEXT: store atomic volatile i32 10, i32* [[TMP0:%.*]] release, align 4
167+
; CHECK-NEXT: ret void
168+
;
169+
store atomic volatile i32 10, i32* %0 release, align 4
170+
ret void
171+
}
172+
173+
; negative volatile, relaxed atomic
174+
define void @load_volatile_release(i32* nocapture %0) norecurse nounwind uwtable {
175+
; CHECK: Function Attrs: nofree norecurse nounwind uwtable willreturn
176+
; CHECK-LABEL: @load_volatile_release(
177+
; CHECK-NEXT: store atomic volatile i32 10, i32* [[TMP0:%.*]] release, align 4
178+
; CHECK-NEXT: ret void
179+
;
180+
store atomic volatile i32 10, i32* %0 release, align 4
181+
ret void
182+
}
183+
184+
; volatile store.
185+
define void @volatile_store(i32* %0) norecurse nounwind uwtable {
186+
; CHECK: Function Attrs: nofree norecurse nounwind uwtable willreturn
187+
; CHECK-LABEL: @volatile_store(
188+
; CHECK-NEXT: store volatile i32 14, i32* [[TMP0:%.*]], align 4
189+
; CHECK-NEXT: ret void
190+
;
191+
store volatile i32 14, i32* %0, align 4
192+
ret void
193+
}
194+
195+
; negative, should not deduce nosync
196+
; volatile load.
197+
define i32 @volatile_load(i32* %0) norecurse nounwind uwtable {
198+
; CHECK: Function Attrs: nofree norecurse nounwind uwtable willreturn
199+
; CHECK-LABEL: @volatile_load(
200+
; CHECK-NEXT: [[TMP2:%.*]] = load volatile i32, i32* [[TMP0:%.*]], align 4
201+
; CHECK-NEXT: ret i32 [[TMP2]]
202+
;
203+
%2 = load volatile i32, i32* %0, align 4
204+
ret i32 %2
205+
}
206+
207+
; CHECK: Function Attrs: noinline nosync nounwind uwtable
208+
; CHECK-NEXT: declare void @nosync_function()
209+
declare void @nosync_function() noinline nounwind uwtable nosync
210+
211+
define void @call_nosync_function() nounwind uwtable noinline {
212+
; CHECK: Function Attrs: noinline nounwind uwtable
213+
; CHECK-LABEL: @call_nosync_function(
214+
; CHECK-NEXT: tail call void @nosync_function() #[[ATTR7:[0-9]+]]
215+
; CHECK-NEXT: ret void
216+
;
217+
tail call void @nosync_function() noinline nounwind uwtable
218+
ret void
219+
}
220+
221+
; CHECK: Function Attrs: noinline nounwind uwtable
222+
; CHECK-NEXT: declare void @might_sync()
223+
declare void @might_sync() noinline nounwind uwtable
224+
225+
define void @call_might_sync() nounwind uwtable noinline {
226+
; CHECK: Function Attrs: noinline nounwind uwtable
227+
; CHECK-LABEL: @call_might_sync(
228+
; CHECK-NEXT: tail call void @might_sync() #[[ATTR7]]
229+
; CHECK-NEXT: ret void
230+
;
231+
tail call void @might_sync() noinline nounwind uwtable
232+
ret void
233+
}
234+
235+
declare void @llvm.memcpy(i8* %dest, i8* %src, i32 %len, i1 %isvolatile)
236+
declare void @llvm.memset(i8* %dest, i8 %val, i32 %len, i1 %isvolatile)
237+
238+
; negative, checking volatile intrinsics.
239+
define i32 @memcpy_volatile(i8* %ptr1, i8* %ptr2) {
240+
; CHECK: Function Attrs: nofree nounwind willreturn
241+
; CHECK-LABEL: @memcpy_volatile(
242+
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[PTR1:%.*]], i8* [[PTR2:%.*]], i32 8, i1 true)
243+
; CHECK-NEXT: ret i32 4
244+
;
245+
call void @llvm.memcpy(i8* %ptr1, i8* %ptr2, i32 8, i1 1)
246+
ret i32 4
247+
}
248+
249+
; positive, non-volatile intrinsic.
250+
define i32 @memset_non_volatile(i8* %ptr1, i8 %val) {
251+
; CHECK: Function Attrs: nofree nounwind willreturn writeonly
252+
; CHECK-LABEL: @memset_non_volatile(
253+
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[PTR1:%.*]], i8 [[VAL:%.*]], i32 8, i1 false)
254+
; CHECK-NEXT: ret i32 4
255+
;
256+
call void @llvm.memset(i8* %ptr1, i8 %val, i32 8, i1 0)
257+
ret i32 4
258+
}
259+
260+
; negative, inline assembly.
261+
define i32 @inline_asm_test(i32 %x) {
262+
; CHECK-LABEL: @inline_asm_test(
263+
; CHECK-NEXT: [[TMP1:%.*]] = call i32 asm "bswap $0", "=r,r"(i32 [[X:%.*]])
264+
; CHECK-NEXT: ret i32 4
265+
;
266+
call i32 asm "bswap $0", "=r,r"(i32 %x)
267+
ret i32 4
268+
}
269+
270+
declare void @readnone_test() convergent readnone
271+
272+
; negative. Convergent
273+
define void @convergent_readnone(){
274+
; CHECK: Function Attrs: nosync readnone
275+
; CHECK-LABEL: @convergent_readnone(
276+
; CHECK-NEXT: call void @readnone_test()
277+
; CHECK-NEXT: ret void
278+
;
279+
call void @readnone_test()
280+
ret void
281+
}
282+
283+
; CHECK: Function Attrs: nounwind
284+
; CHECK-NEXT: declare void @llvm.x86.sse2.clflush(i8*)
285+
declare void @llvm.x86.sse2.clflush(i8*)
286+
@a = common global i32 0, align 4
287+
288+
; negative. Synchronizing intrinsic
289+
define void @i_totally_sync() {
290+
; CHECK: Function Attrs: nounwind
291+
; CHECK-LABEL: @i_totally_sync(
292+
; CHECK-NEXT: tail call void @llvm.x86.sse2.clflush(i8* bitcast (i32* @a to i8*))
293+
; CHECK-NEXT: ret void
294+
;
295+
tail call void @llvm.x86.sse2.clflush(i8* bitcast (i32* @a to i8*))
296+
ret void
297+
}
298+
299+
declare float @llvm.cos(float %val) readnone
300+
301+
define float @cos_test(float %x) {
302+
; CHECK: Function Attrs: nosync nounwind readnone willreturn
303+
; CHECK-LABEL: @cos_test(
304+
; CHECK-NEXT: [[C:%.*]] = call float @llvm.cos.f32(float [[X:%.*]])
305+
; CHECK-NEXT: ret float [[C]]
306+
;
307+
%c = call float @llvm.cos(float %x)
308+
ret float %c
309+
}

0 commit comments

Comments
 (0)