1
- // RUN: %clang_cc1 -no-opaque-pointers - verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
2
- // RUN: %clang_cc1 -no-opaque-pointers - fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
3
- // RUN: %clang_cc1 -no-opaque-pointers - fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
1
+ // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
2
+ // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
3
+ // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4
4
5
- // RUN: %clang_cc1 -no-opaque-pointers - verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6
- // RUN: %clang_cc1 -no-opaque-pointers - fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
7
- // RUN: %clang_cc1 -no-opaque-pointers - fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
5
+ // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6
+ // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
7
+ // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8
8
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
9
9
// expected-no-diagnostics
10
10
// REQUIRES: x86-registered-target
@@ -84,64 +84,63 @@ register int rix __asm__("esp");
84
84
85
85
// CHECK-LABEL: @main(
86
86
int main (void ) {
87
- // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
87
+ // CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
88
88
// CHECK: store i8
89
89
#pragma omp atomic read
90
90
bv = bx ;
91
- // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
91
+ // CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
92
92
// CHECK: store i8
93
93
#pragma omp atomic read
94
94
cv = cx ;
95
- // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
95
+ // CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
96
96
// CHECK: store i8
97
97
#pragma omp atomic read
98
98
ucv = ucx ;
99
- // CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
99
+ // CHECK: load atomic i16, ptr {{.*}} monotonic, align 2
100
100
// CHECK: store i16
101
101
#pragma omp atomic read
102
102
sv = sx ;
103
- // CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
103
+ // CHECK: load atomic i16, ptr {{.*}} monotonic, align 2
104
104
// CHECK: store i16
105
105
#pragma omp atomic read
106
106
usv = usx ;
107
- // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
107
+ // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
108
108
// CHECK: store i32
109
109
#pragma omp atomic read
110
110
iv = ix ;
111
- // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
111
+ // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
112
112
// CHECK: store i32
113
113
#pragma omp atomic read
114
114
uiv = uix ;
115
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
115
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
116
116
// CHECK: store i64
117
117
#pragma omp atomic read
118
118
lv = lx ;
119
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
119
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
120
120
// CHECK: store i64
121
121
#pragma omp atomic read
122
122
ulv = ulx ;
123
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
123
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
124
124
// CHECK: store i64
125
125
#pragma omp atomic read
126
126
llv = llx ;
127
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
127
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
128
128
// CHECK: store i64
129
129
#pragma omp atomic read
130
130
ullv = ullx ;
131
- // CHECK: load atomic i32, i32* bitcast (float* {{.*}} monotonic, align 4
131
+ // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
132
132
// CHECK: bitcast i32 {{.*}} to float
133
133
// CHECK: store float
134
134
#pragma omp atomic read
135
135
fv = fx ;
136
- // CHECK: load atomic i64, i64* bitcast (double* {{.*}} monotonic, align 8
136
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
137
137
// CHECK: bitcast i64 {{.*}} to double
138
138
// CHECK: store double
139
139
#pragma omp atomic read
140
140
dv = dx ;
141
- // CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* {{.*}} monotonic, align 16
142
- // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
143
- // CHECK: store i128 [[LD]], i128* [[BITCAST]]
144
- // CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]]
141
+ // CHECK: [[LD:%.+]] = load atomic i128, ptr {{.*}} monotonic, align 16
142
+ // CHECK: store i128 [[LD]], ptr [[LDTEMP:%.+]]
143
+ // CHECK: [[LD:%.+]] = load x86_fp80, ptr [[LDTEMP]]
145
144
// CHECK: store x86_fp80 [[LD]]
146
145
#pragma omp atomic read
147
146
ldv = ldx ;
@@ -161,173 +160,169 @@ int main(void) {
161
160
// CHECK: store double
162
161
#pragma omp atomic seq_cst read
163
162
cdv = cdx ;
164
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
163
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
165
164
// CHECK: store i8
166
165
#pragma omp atomic read
167
166
bv = ulx ;
168
- // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
167
+ // CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
169
168
// CHECK: store i8
170
169
#pragma omp atomic read
171
170
cv = bx ;
172
- // CHECK: load atomic i8, i8* {{.*}} seq_cst, align 1
171
+ // CHECK: load atomic i8, ptr {{.*}} seq_cst, align 1
173
172
// CHECK: call{{.*}} @__kmpc_flush(
174
173
// CHECK: store i8
175
174
#pragma omp atomic read seq_cst
176
175
ucv = cx ;
177
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
176
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
178
177
// CHECK: store i16
179
178
#pragma omp atomic read
180
179
sv = ulx ;
181
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
180
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
182
181
// CHECK: store i16
183
182
#pragma omp atomic read
184
183
usv = lx ;
185
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
184
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
186
185
// CHECK: call{{.*}} @__kmpc_flush(
187
186
// CHECK: store i32
188
187
#pragma omp atomic seq_cst, read
189
188
iv = uix ;
190
- // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
189
+ // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
191
190
// CHECK: store i32
192
191
#pragma omp atomic read
193
192
uiv = ix ;
194
193
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 8,
195
194
// CHECK: store i64
196
195
#pragma omp atomic read
197
196
lv = cix ;
198
- // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
197
+ // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
199
198
// CHECK: store i64
200
199
#pragma omp atomic read
201
200
ulv = fx ;
202
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
201
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
203
202
// CHECK: store i64
204
203
#pragma omp atomic read
205
204
llv = dx ;
206
- // CHECK: load atomic i128, i128* {{.*}} monotonic, align 16
205
+ // CHECK: load atomic i128, ptr {{.*}} monotonic, align 16
207
206
// CHECK: store i64
208
207
#pragma omp atomic read
209
208
ullv = ldx ;
210
209
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 8,
211
210
// CHECK: store float
212
211
#pragma omp atomic read
213
212
fv = cix ;
214
- // CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
213
+ // CHECK: load atomic i16, ptr {{.*}} monotonic, align 2
215
214
// CHECK: store double
216
215
#pragma omp atomic read
217
216
dv = sx ;
218
- // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
217
+ // CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
219
218
// CHECK: store x86_fp80
220
219
#pragma omp atomic read
221
220
ldv = bx ;
222
- // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
221
+ // CHECK: load atomic i8, ptr {{.*}} monotonic, align 1
223
222
// CHECK: store i32
224
223
// CHECK: store i32
225
224
#pragma omp atomic read
226
225
civ = bx ;
227
- // CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
226
+ // CHECK: load atomic i16, ptr {{.*}} monotonic, align 2
228
227
// CHECK: store float
229
228
// CHECK: store float
230
229
#pragma omp atomic read
231
230
cfv = usx ;
232
- // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
231
+ // CHECK: load atomic i64, ptr {{.*}} monotonic, align 8
233
232
// CHECK: store double
234
233
// CHECK: store double
235
234
#pragma omp atomic read
236
235
cdv = llx ;
237
- // CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic, align 16
238
- // CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
239
- // CHECK: store i128 [[I128VAL]], i128* [[I128PTR]]
240
- // CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
236
+ // CHECK: [[I128VAL:%.+]] = load atomic i128, ptr @{{.+}} monotonic, align 16
237
+ // CHECK: store i128 [[I128VAL]], ptr [[LDTEMP:%.+]]
238
+ // CHECK: [[LD:%.+]] = load <4 x i32>, ptr [[LDTEMP]]
241
239
// CHECK: extractelement <4 x i32> [[LD]]
242
240
// CHECK: store i8
243
241
#pragma omp atomic read
244
242
bv = int4x [0 ];
245
- // CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*) , i64 4) to i32* ) monotonic, align 4
246
- // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
247
- // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
243
+ // CHECK: [[LD:%.+]] = load atomic i32, ptr getelementptr (i8, ptr @{{.+}}, i64 4) monotonic, align 4
244
+ // CHECK: store i32 [[LD]], ptr [[LDTEMP:%.+]]
245
+ // CHECK: [[LD:%.+]] = load i32, ptr [[LDTEMP]]
248
246
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
249
247
// CHECK: ashr i32 [[SHL]], 1
250
248
// CHECK: store x86_fp80
251
249
#pragma omp atomic read
252
250
ldv = bfx .a ;
253
- // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
254
- // CHECK: call void @__atomic_load(i64 noundef 4, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* noundef [[LDTEMP_VOID_PTR]], i32 noundef 0)
255
- // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
251
+ // CHECK: call void @__atomic_load(i64 noundef 4, ptr noundef getelementptr (i8, ptr @bfx_packed, i64 4), ptr noundef [[LDTEMP:%.+]], i32 noundef 0)
252
+ // CHECK: [[LD:%.+]] = load i32, ptr [[LDTEMP]]
256
253
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
257
254
// CHECK: ashr i32 [[SHL]], 1
258
255
// CHECK: store x86_fp80
259
256
#pragma omp atomic read
260
257
ldv = bfx_packed .a ;
261
- // CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic, align 4
262
- // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
263
- // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
258
+ // CHECK: [[LD:%.+]] = load atomic i32, ptr @bfx2 monotonic, align 4
259
+ // CHECK: store i32 [[LD]], ptr [[LDTEMP:%.+]]
260
+ // CHECK: [[LD:%.+]] = load i32, ptr [[LDTEMP]]
264
261
// CHECK: ashr i32 [[LD]], 31
265
262
// CHECK: store x86_fp80
266
263
#pragma omp atomic read
267
264
ldv = bfx2 .a ;
268
- // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*) , i64 3) monotonic, align 1
269
- // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
270
- // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
265
+ // CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx2_packed, i64 3) monotonic, align 1
266
+ // CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
267
+ // CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
271
268
// CHECK: ashr i8 [[LD]], 7
272
269
// CHECK: store x86_fp80
273
270
#pragma omp atomic read
274
271
ldv = bfx2_packed .a ;
275
- // CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic, align 4
276
- // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
277
- // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
272
+ // CHECK: [[LD:%.+]] = load atomic i32, ptr @bfx3 monotonic, align 4
273
+ // CHECK: store i32 [[LD]], ptr [[LDTEMP:%.+]]
274
+ // CHECK: [[LD:%.+]] = load i32, ptr [[LDTEMP]]
278
275
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7
279
276
// CHECK: ashr i32 [[SHL]], 18
280
277
// CHECK: store x86_fp80
281
278
#pragma omp atomic read
282
279
ldv = bfx3 .a ;
283
- // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8*
284
- // CHECK: call void @__atomic_load(i64 noundef 3, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* noundef [[LDTEMP_VOID_PTR]], i32 noundef 0)
285
- // CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]]
280
+ // CHECK: call void @__atomic_load(i64 noundef 3, ptr noundef getelementptr (i8, ptr @bfx3_packed, i64 1), ptr noundef [[LDTEMP:%.+]], i32 noundef 0)
281
+ // CHECK: [[LD:%.+]] = load i24, ptr [[LDTEMP]]
286
282
// CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7
287
283
// CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10
288
284
// CHECK: sext i24 [[ASHR]] to i32
289
285
// CHECK: store x86_fp80
290
286
#pragma omp atomic read
291
287
ldv = bfx3_packed .a ;
292
- // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8
293
- // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
294
- // CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
288
+ // CHECK: [[LD:%.+]] = load atomic i64, ptr @bfx4 monotonic, align 8
289
+ // CHECK: store i64 [[LD]], ptr [[LDTEMP:%.+]]
290
+ // CHECK: [[LD:%.+]] = load i64, ptr [[LDTEMP]]
295
291
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47
296
292
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63
297
293
// CHECK: trunc i64 [[ASHR]] to i32
298
294
// CHECK: store x86_fp80
299
295
#pragma omp atomic read
300
296
ldv = bfx4 .a ;
301
- // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0 , i64 2) monotonic, align 1
302
- // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
303
- // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
297
+ // CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) monotonic, align 1
298
+ // CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
299
+ // CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
304
300
// CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
305
301
// CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7
306
302
// CHECK: sext i8 [[ASHR]] to i32
307
303
// CHECK: store x86_fp80
308
304
#pragma omp atomic relaxed read
309
305
ldv = bfx4_packed .a ;
310
- // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8
311
- // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
312
- // CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
306
+ // CHECK: [[LD:%.+]] = load atomic i64, ptr @bfx4 monotonic, align 8
307
+ // CHECK: store i64 [[LD]], ptr [[LDTEMP:%.+]]
308
+ // CHECK: [[LD:%.+]] = load i64, ptr [[LDTEMP]]
313
309
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40
314
310
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57
315
311
// CHECK: store x86_fp80
316
312
#pragma omp atomic read relaxed
317
313
ldv = bfx4 .b ;
318
- // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0 , i64 2) acquire, align 1
319
- // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
320
- // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
314
+ // CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) acquire, align 1
315
+ // CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
316
+ // CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
321
317
// CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1
322
318
// CHECK: sext i8 [[ASHR]] to i64
323
319
// CHECK: call{{.*}} @__kmpc_flush(
324
320
// CHECK: store x86_fp80
325
321
#pragma omp atomic read acquire
326
322
ldv = bfx4_packed .b ;
327
- // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic, align 8
328
- // CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
329
- // CHECK: store i64 [[LD]], i64* [[BITCAST]]
330
- // CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
323
+ // CHECK: [[LD:%.+]] = load atomic i64, ptr @{{.+}} monotonic, align 8
324
+ // CHECK: store i64 [[LD]], ptr [[LDTEMP:%.+]]
325
+ // CHECK: [[LD:%.+]] = load <2 x float>, ptr [[LDTEMP]]
331
326
// CHECK: extractelement <2 x float> [[LD]]
332
327
// CHECK: store i64
333
328
#pragma omp atomic read
0 commit comments