Skip to content

Commit 8ea39c0

Browse files
committed
[clang] Convert a few OpenMP tests to use opaque pointers
Reviewed By: nikic Differential Revision: https://reviews.llvm.org/D150530
1 parent ddb2188 commit 8ea39c0

7 files changed

+1275
-1384
lines changed

clang/test/OpenMP/atomic_capture_codegen.cpp

Lines changed: 347 additions & 384 deletions
Large diffs are not rendered by default.

clang/test/OpenMP/atomic_read_codegen.c

Lines changed: 71 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
// RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
2-
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
3-
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
1+
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
2+
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
3+
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
44

5-
// RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6-
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
7-
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
5+
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6+
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
7+
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
88
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
99
// expected-no-diagnostics
1010
// REQUIRES: x86-registered-target
@@ -84,64 +84,63 @@ register int rix __asm__("esp");
8484

8585
// CHECK-LABEL: @main(
8686
int main(void) {
87-
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
87+
// CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
8888
// CHECK: store i8
8989
#pragma omp atomic read
9090
bv = bx;
91-
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
91+
// CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
9292
// CHECK: store i8
9393
#pragma omp atomic read
9494
cv = cx;
95-
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
95+
// CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
9696
// CHECK: store i8
9797
#pragma omp atomic read
9898
ucv = ucx;
99-
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
99+
// CHECK: load atomic i16, ptr {{.*}} monotonic, align 2
100100
// CHECK: store i16
101101
#pragma omp atomic read
102102
sv = sx;
103-
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
103+
// CHECK: load atomic i16, ptr {{.*}} monotonic, align 2
104104
// CHECK: store i16
105105
#pragma omp atomic read
106106
usv = usx;
107-
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
107+
// CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
108108
// CHECK: store i32
109109
#pragma omp atomic read
110110
iv = ix;
111-
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
111+
// CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
112112
// CHECK: store i32
113113
#pragma omp atomic read
114114
uiv = uix;
115-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
115+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
116116
// CHECK: store i64
117117
#pragma omp atomic read
118118
lv = lx;
119-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
119+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
120120
// CHECK: store i64
121121
#pragma omp atomic read
122122
ulv = ulx;
123-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
123+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
124124
// CHECK: store i64
125125
#pragma omp atomic read
126126
llv = llx;
127-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
127+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
128128
// CHECK: store i64
129129
#pragma omp atomic read
130130
ullv = ullx;
131-
// CHECK: load atomic i32, i32* bitcast (float* {{.*}} monotonic, align 4
131+
// CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
132132
// CHECK: bitcast i32 {{.*}} to float
133133
// CHECK: store float
134134
#pragma omp atomic read
135135
fv = fx;
136-
// CHECK: load atomic i64, i64* bitcast (double* {{.*}} monotonic, align 8
136+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
137137
// CHECK: bitcast i64 {{.*}} to double
138138
// CHECK: store double
139139
#pragma omp atomic read
140140
dv = dx;
141-
// CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* {{.*}} monotonic, align 16
142-
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
143-
// CHECK: store i128 [[LD]], i128* [[BITCAST]]
144-
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]]
141+
// CHECK: [[LD:%.+]] = load atomic i128, ptr {{.*}} monotonic, align 16
142+
// CHECK: store i128 [[LD]], ptr [[LDTEMP:%.+]]
143+
// CHECK: [[LD:%.+]] = load x86_fp80, ptr [[LDTEMP]]
145144
// CHECK: store x86_fp80 [[LD]]
146145
#pragma omp atomic read
147146
ldv = ldx;
@@ -161,173 +160,169 @@ int main(void) {
161160
// CHECK: store double
162161
#pragma omp atomic seq_cst read
163162
cdv = cdx;
164-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
163+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
165164
// CHECK: store i8
166165
#pragma omp atomic read
167166
bv = ulx;
168-
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
167+
// CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
169168
// CHECK: store i8
170169
#pragma omp atomic read
171170
cv = bx;
172-
// CHECK: load atomic i8, i8* {{.*}} seq_cst, align 1
171+
// CHECK: load atomic i8, ptr {{.*}} seq_cst, align 1
173172
// CHECK: call{{.*}} @__kmpc_flush(
174173
// CHECK: store i8
175174
#pragma omp atomic read seq_cst
176175
ucv = cx;
177-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
176+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
178177
// CHECK: store i16
179178
#pragma omp atomic read
180179
sv = ulx;
181-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
180+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
182181
// CHECK: store i16
183182
#pragma omp atomic read
184183
usv = lx;
185-
// CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
184+
// CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
186185
// CHECK: call{{.*}} @__kmpc_flush(
187186
// CHECK: store i32
188187
#pragma omp atomic seq_cst, read
189188
iv = uix;
190-
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
189+
// CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
191190
// CHECK: store i32
192191
#pragma omp atomic read
193192
uiv = ix;
194193
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 8,
195194
// CHECK: store i64
196195
#pragma omp atomic read
197196
lv = cix;
198-
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
197+
// CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
199198
// CHECK: store i64
200199
#pragma omp atomic read
201200
ulv = fx;
202-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
201+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
203202
// CHECK: store i64
204203
#pragma omp atomic read
205204
llv = dx;
206-
// CHECK: load atomic i128, i128* {{.*}} monotonic, align 16
205+
// CHECK: load atomic i128, ptr {{.*}} monotonic, align 16
207206
// CHECK: store i64
208207
#pragma omp atomic read
209208
ullv = ldx;
210209
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 8,
211210
// CHECK: store float
212211
#pragma omp atomic read
213212
fv = cix;
214-
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
213+
// CHECK: load atomic i16, ptr {{.*}} monotonic, align 2
215214
// CHECK: store double
216215
#pragma omp atomic read
217216
dv = sx;
218-
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
217+
// CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
219218
// CHECK: store x86_fp80
220219
#pragma omp atomic read
221220
ldv = bx;
222-
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
221+
// CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
223222
// CHECK: store i32
224223
// CHECK: store i32
225224
#pragma omp atomic read
226225
civ = bx;
227-
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
226+
// CHECK: load atomic i16, ptr {{.*}} monotonic, align 2
228227
// CHECK: store float
229228
// CHECK: store float
230229
#pragma omp atomic read
231230
cfv = usx;
232-
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
231+
// CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
233232
// CHECK: store double
234233
// CHECK: store double
235234
#pragma omp atomic read
236235
cdv = llx;
237-
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic, align 16
238-
// CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
239-
// CHECK: store i128 [[I128VAL]], i128* [[I128PTR]]
240-
// CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
236+
// CHECK: [[I128VAL:%.+]] = load atomic i128, ptr @{{.+}} monotonic, align 16
237+
// CHECK: store i128 [[I128VAL]], ptr [[LDTEMP:%.+]]
238+
// CHECK: [[LD:%.+]] = load <4 x i32>, ptr [[LDTEMP]]
241239
// CHECK: extractelement <4 x i32> [[LD]]
242240
// CHECK: store i8
243241
#pragma omp atomic read
244242
bv = int4x[0];
245-
// CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic, align 4
246-
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
247-
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
243+
// CHECK: [[LD:%.+]] = load atomic i32, ptr getelementptr (i8, ptr @{{.+}}, i64 4) monotonic, align 4
244+
// CHECK: store i32 [[LD]], ptr [[LDTEMP:%.+]]
245+
// CHECK: [[LD:%.+]] = load i32, ptr [[LDTEMP]]
248246
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
249247
// CHECK: ashr i32 [[SHL]], 1
250248
// CHECK: store x86_fp80
251249
#pragma omp atomic read
252250
ldv = bfx.a;
253-
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
254-
// CHECK: call void @__atomic_load(i64 noundef 4, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* noundef [[LDTEMP_VOID_PTR]], i32 noundef 0)
255-
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
251+
// CHECK: call void @__atomic_load(i64 noundef 4, ptr noundef getelementptr (i8, ptr @bfx_packed, i64 4), ptr noundef [[LDTEMP:%.+]], i32 noundef 0)
252+
// CHECK: [[LD:%.+]] = load i32, ptr [[LDTEMP]]
256253
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
257254
// CHECK: ashr i32 [[SHL]], 1
258255
// CHECK: store x86_fp80
259256
#pragma omp atomic read
260257
ldv = bfx_packed.a;
261-
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic, align 4
262-
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
263-
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
258+
// CHECK: [[LD:%.+]] = load atomic i32, ptr @bfx2 monotonic, align 4
259+
// CHECK: store i32 [[LD]], ptr [[LDTEMP:%.+]]
260+
// CHECK: [[LD:%.+]] = load i32, ptr [[LDTEMP]]
264261
// CHECK: ashr i32 [[LD]], 31
265262
// CHECK: store x86_fp80
266263
#pragma omp atomic read
267264
ldv = bfx2.a;
268-
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic, align 1
269-
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
270-
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
265+
// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx2_packed, i64 3) monotonic, align 1
266+
// CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
267+
// CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
271268
// CHECK: ashr i8 [[LD]], 7
272269
// CHECK: store x86_fp80
273270
#pragma omp atomic read
274271
ldv = bfx2_packed.a;
275-
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic, align 4
276-
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
277-
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
272+
// CHECK: [[LD:%.+]] = load atomic i32, ptr @bfx3 monotonic, align 4
273+
// CHECK: store i32 [[LD]], ptr [[LDTEMP:%.+]]
274+
// CHECK: [[LD:%.+]] = load i32, ptr [[LDTEMP]]
278275
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7
279276
// CHECK: ashr i32 [[SHL]], 18
280277
// CHECK: store x86_fp80
281278
#pragma omp atomic read
282279
ldv = bfx3.a;
283-
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8*
284-
// CHECK: call void @__atomic_load(i64 noundef 3, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* noundef [[LDTEMP_VOID_PTR]], i32 noundef 0)
285-
// CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]]
280+
// CHECK: call void @__atomic_load(i64 noundef 3, ptr noundef getelementptr (i8, ptr @bfx3_packed, i64 1), ptr noundef [[LDTEMP:%.+]], i32 noundef 0)
281+
// CHECK: [[LD:%.+]] = load i24, ptr [[LDTEMP]]
286282
// CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7
287283
// CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10
288284
// CHECK: sext i24 [[ASHR]] to i32
289285
// CHECK: store x86_fp80
290286
#pragma omp atomic read
291287
ldv = bfx3_packed.a;
292-
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8
293-
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
294-
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
288+
// CHECK: [[LD:%.+]] = load atomic i64, ptr @bfx4 monotonic, align 8
289+
// CHECK: store i64 [[LD]], ptr [[LDTEMP:%.+]]
290+
// CHECK: [[LD:%.+]] = load i64, ptr [[LDTEMP]]
295291
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47
296292
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63
297293
// CHECK: trunc i64 [[ASHR]] to i32
298294
// CHECK: store x86_fp80
299295
#pragma omp atomic read
300296
ldv = bfx4.a;
301-
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic, align 1
302-
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
303-
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
297+
// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) monotonic, align 1
298+
// CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
299+
// CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
304300
// CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
305301
// CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7
306302
// CHECK: sext i8 [[ASHR]] to i32
307303
// CHECK: store x86_fp80
308304
#pragma omp atomic relaxed read
309305
ldv = bfx4_packed.a;
310-
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8
311-
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
312-
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
306+
// CHECK: [[LD:%.+]] = load atomic i64, ptr @bfx4 monotonic, align 8
307+
// CHECK: store i64 [[LD]], ptr [[LDTEMP:%.+]]
308+
// CHECK: [[LD:%.+]] = load i64, ptr [[LDTEMP]]
313309
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40
314310
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57
315311
// CHECK: store x86_fp80
316312
#pragma omp atomic read relaxed
317313
ldv = bfx4.b;
318-
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) acquire, align 1
319-
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
320-
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
314+
// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) acquire, align 1
315+
// CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
316+
// CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
321317
// CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1
322318
// CHECK: sext i8 [[ASHR]] to i64
323319
// CHECK: call{{.*}} @__kmpc_flush(
324320
// CHECK: store x86_fp80
325321
#pragma omp atomic read acquire
326322
ldv = bfx4_packed.b;
327-
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic, align 8
328-
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
329-
// CHECK: store i64 [[LD]], i64* [[BITCAST]]
330-
// CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
323+
// CHECK: [[LD:%.+]] = load atomic i64, ptr @{{.+}} monotonic, align 8
324+
// CHECK: store i64 [[LD]], ptr [[LDTEMP:%.+]]
325+
// CHECK: [[LD:%.+]] = load <2 x float>, ptr [[LDTEMP]]
331326
// CHECK: extractelement <2 x float> [[LD]]
332327
// CHECK: store i64
333328
#pragma omp atomic read

0 commit comments

Comments
 (0)