@@ -35,6 +35,11 @@ void test_builtin_elementwise_abs(float f1, float f2, double d1, double d2,
35
35
// CHECK-NEXT: call i64 @llvm.abs.i64(i64 [[I1]], i1 false)
36
36
i2 = __builtin_elementwise_abs (i1 );
37
37
38
+ // CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
39
+ // CHECK: [[S1:%.+]] = trunc i64 [[I1]] to i16
40
+ // CHECK-NEXT: call i16 @llvm.abs.i16(i16 [[S1]], i1 false)
41
+ i1 = __builtin_elementwise_abs ((short )i1 );
42
+
38
43
// CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
39
44
// CHECK-NEXT: call <8 x i16> @llvm.abs.v8i16(<8 x i16> [[VI1]], i1 false)
40
45
vi2 = __builtin_elementwise_abs (vi1 );
@@ -77,7 +82,7 @@ void test_builtin_elementwise_add_sat(float f1, float f2, double d1, double d2,
77
82
78
83
// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
79
84
// CHECK-NEXT: call i64 @llvm.sadd.sat.i64(i64 [[I1]], i64 10)
80
- i1 = __builtin_elementwise_add_sat (i1 , ( long long int ) 10 );
85
+ i1 = __builtin_elementwise_add_sat (i1 , 10ll );
81
86
82
87
// CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
83
88
// CHECK-NEXT: [[VI2:%.+]] = load <8 x i16>, ptr %vi2.addr, align 16
@@ -131,6 +136,13 @@ void test_builtin_elementwise_add_sat(float f1, float f2, double d1, double d2,
131
136
// CHECK-NEXT: call i16 @llvm.sadd.sat.i16(i16 [[S1]], i16 [[S2]])
132
137
s1 = __builtin_elementwise_add_sat (s1 , s2 );
133
138
139
+ // CHECK: [[S1:%.+]] = load i16, ptr %s1.addr, align 2
140
+ // CHECK: [[I1:%.+]] = sext i16 [[S1]] to i32
141
+ // CHECK-NEXT: [[S2:%.+]] = load i16, ptr %s2.addr, align 2
142
+ // CHECK: [[I2:%.+]] = sext i16 [[S2]] to i32
143
+ // CHECK-NEXT: call i32 @llvm.sadd.sat.i32(i32 [[I1]], i32 [[I2]])
144
+ s1 = __builtin_elementwise_add_sat ((int )s1 , (int )s2 );
145
+
134
146
// CHECK: [[US1:%.+]] = load i16, ptr %us1.addr, align 2
135
147
// CHECK-NEXT: [[US2:%.+]] = load i16, ptr %us2.addr, align 2
136
148
// CHECK-NEXT: call i16 @llvm.uadd.sat.i16(i16 [[US1]], i16 [[US2]])
@@ -153,7 +165,7 @@ void test_builtin_elementwise_sub_sat(float f1, float f2, double d1, double d2,
153
165
154
166
// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
155
167
// CHECK-NEXT: call i64 @llvm.ssub.sat.i64(i64 [[I1]], i64 10)
156
- i1 = __builtin_elementwise_sub_sat (i1 , ( long long int ) 10 );
168
+ i1 = __builtin_elementwise_sub_sat (i1 , 10ll );
157
169
158
170
// CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
159
171
// CHECK-NEXT: [[VI2:%.+]] = load <8 x i16>, ptr %vi2.addr, align 16
@@ -322,7 +334,7 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,
322
334
323
335
// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
324
336
// CHECK-NEXT: call i64 @llvm.smax.i64(i64 [[I1]], i64 10)
325
- i1 = __builtin_elementwise_max (i1 , ( long long int ) 10 );
337
+ i1 = __builtin_elementwise_max (i1 , 10ll );
326
338
327
339
// CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
328
340
// CHECK-NEXT: [[VI2:%.+]] = load <8 x i16>, ptr %vi2.addr, align 16
@@ -406,7 +418,14 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,
406
418
407
419
// CHECK: [[I2:%.+]] = load i64, ptr %i2.addr, align 8
408
420
// CHECK-NEXT: call i64 @llvm.smin.i64(i64 -11, i64 [[I2]])
409
- i1 = __builtin_elementwise_min ((long long int )-11 , i2 );
421
+ i1 = __builtin_elementwise_min (-11ll , i2 );
422
+
423
+ // CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
424
+ // CHECK: [[S1:%.+]] = trunc i64 [[I1]] to i16
425
+ // CHECK-NEXT: [[I2:%.+]] = load i64, ptr %i2.addr, align 8
426
+ // CHECK: [[S2:%.+]] = trunc i64 [[I2]] to i16
427
+ // CHECK-NEXT: call i16 @llvm.smin.i16(i16 [[S1]], i16 [[S2]])
428
+ i1 = __builtin_elementwise_min ((short )i1 , (short )i2 );
410
429
411
430
// CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
412
431
// CHECK-NEXT: [[VI2:%.+]] = load <8 x i16>, ptr %vi2.addr, align 16
0 commit comments