@@ -300,7 +300,7 @@ define <2 x double> @shuffle_v2f64_u2(<2 x double> %a, <2 x double> %b) {
300
300
; AVX: # %bb.0:
301
301
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
302
302
; AVX-NEXT: retq
303
- %shuffle = shufflevector <2 x double > %a , <2 x double > %b , <2 x i32 > <i32 undef , i32 2 >
303
+ %shuffle = shufflevector <2 x double > %a , <2 x double > %b , <2 x i32 > <i32 poison , i32 2 >
304
304
ret <2 x double > %shuffle
305
305
}
306
306
define <2 x double > @shuffle_v2f64_3u (<2 x double > %a , <2 x double > %b ) {
@@ -314,7 +314,7 @@ define <2 x double> @shuffle_v2f64_3u(<2 x double> %a, <2 x double> %b) {
314
314
; AVX: # %bb.0:
315
315
; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm1[1,0]
316
316
; AVX-NEXT: retq
317
- %shuffle = shufflevector <2 x double > %a , <2 x double > %b , <2 x i32 > <i32 3 , i32 undef >
317
+ %shuffle = shufflevector <2 x double > %a , <2 x double > %b , <2 x i32 > <i32 3 , i32 poison >
318
318
ret <2 x double > %shuffle
319
319
}
320
320
@@ -847,7 +847,7 @@ define <2 x double> @shuffle_v2f64_bitcast_1z(<2 x double> %a) {
847
847
; AVX-NEXT: retq
848
848
%shuffle64 = shufflevector <2 x double > %a , <2 x double > zeroinitializer , <2 x i32 > <i32 2 , i32 1 >
849
849
%bitcast32 = bitcast <2 x double > %shuffle64 to <4 x float >
850
- %shuffle32 = shufflevector <4 x float > %bitcast32 , <4 x float > undef , <4 x i32 > <i32 2 , i32 3 , i32 0 , i32 1 >
850
+ %shuffle32 = shufflevector <4 x float > %bitcast32 , <4 x float > poison , <4 x i32 > <i32 2 , i32 3 , i32 0 , i32 1 >
851
851
%bitcast64 = bitcast <4 x float > %shuffle32 to <2 x double >
852
852
ret <2 x double > %bitcast64
853
853
}
@@ -880,7 +880,7 @@ define <2 x i64> @shuffle_v2i64_bitcast_z123(<2 x i64> %x) {
880
880
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
881
881
; AVX-NEXT: retq
882
882
%bitcast32 = bitcast <2 x i64 > %x to <4 x float >
883
- %shuffle32 = shufflevector <4 x float > %bitcast32 , <4 x float > <float 1 .000000e+00 , float undef , float undef , float undef >, <4 x i32 > <i32 4 , i32 1 , i32 2 , i32 3 >
883
+ %shuffle32 = shufflevector <4 x float > %bitcast32 , <4 x float > <float 1 .000000e+00 , float poison , float poison , float poison >, <4 x i32 > <i32 4 , i32 1 , i32 2 , i32 3 >
884
884
%bitcast64 = bitcast <4 x float > %shuffle32 to <2 x i64 >
885
885
%and = and <2 x i64 > %bitcast64 , <i64 -4294967296 , i64 -1 >
886
886
ret <2 x i64 > %and
@@ -896,7 +896,7 @@ define <2 x i64> @insert_reg_and_zero_v2i64(i64 %a) {
896
896
; AVX: # %bb.0:
897
897
; AVX-NEXT: vmovq %rdi, %xmm0
898
898
; AVX-NEXT: retq
899
- %v = insertelement <2 x i64 > undef , i64 %a , i32 0
899
+ %v = insertelement <2 x i64 > poison , i64 %a , i32 0
900
900
%shuffle = shufflevector <2 x i64 > %v , <2 x i64 > zeroinitializer , <2 x i32 > <i32 0 , i32 3 >
901
901
ret <2 x i64 > %shuffle
902
902
}
@@ -912,7 +912,7 @@ define <2 x i64> @insert_mem_and_zero_v2i64(ptr %ptr) {
912
912
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
913
913
; AVX-NEXT: retq
914
914
%a = load i64 , ptr %ptr
915
- %v = insertelement <2 x i64 > undef , i64 %a , i32 0
915
+ %v = insertelement <2 x i64 > poison , i64 %a , i32 0
916
916
%shuffle = shufflevector <2 x i64 > %v , <2 x i64 > zeroinitializer , <2 x i32 > <i32 0 , i32 3 >
917
917
ret <2 x i64 > %shuffle
918
918
}
@@ -927,7 +927,7 @@ define <2 x double> @insert_reg_and_zero_v2f64(double %a) {
927
927
; AVX: # %bb.0:
928
928
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
929
929
; AVX-NEXT: retq
930
- %v = insertelement <2 x double > undef , double %a , i32 0
930
+ %v = insertelement <2 x double > poison , double %a , i32 0
931
931
%shuffle = shufflevector <2 x double > %v , <2 x double > zeroinitializer , <2 x i32 > <i32 0 , i32 3 >
932
932
ret <2 x double > %shuffle
933
933
}
@@ -943,7 +943,7 @@ define <2 x double> @insert_mem_and_zero_v2f64(ptr %ptr) {
943
943
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
944
944
; AVX-NEXT: retq
945
945
%a = load double , ptr %ptr
946
- %v = insertelement <2 x double > undef , double %a , i32 0
946
+ %v = insertelement <2 x double > poison , double %a , i32 0
947
947
%shuffle = shufflevector <2 x double > %v , <2 x double > zeroinitializer , <2 x i32 > <i32 0 , i32 3 >
948
948
ret <2 x double > %shuffle
949
949
}
@@ -976,7 +976,7 @@ define <2 x i64> @insert_reg_lo_v2i64(i64 %a, <2 x i64> %b) {
976
976
; AVX: # %bb.0:
977
977
; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm0
978
978
; AVX-NEXT: retq
979
- %v = insertelement <2 x i64 > undef , i64 %a , i32 0
979
+ %v = insertelement <2 x i64 > poison , i64 %a , i32 0
980
980
%shuffle = shufflevector <2 x i64 > %v , <2 x i64 > %b , <2 x i32 > <i32 0 , i32 3 >
981
981
ret <2 x i64 > %shuffle
982
982
}
@@ -1007,7 +1007,7 @@ define <2 x i64> @insert_mem_lo_v2i64(ptr %ptr, <2 x i64> %b) {
1007
1007
; AVX-NEXT: vpinsrq $0, (%rdi), %xmm0, %xmm0
1008
1008
; AVX-NEXT: retq
1009
1009
%a = load i64 , ptr %ptr
1010
- %v = insertelement <2 x i64 > undef , i64 %a , i32 0
1010
+ %v = insertelement <2 x i64 > poison , i64 %a , i32 0
1011
1011
%shuffle = shufflevector <2 x i64 > %v , <2 x i64 > %b , <2 x i32 > <i32 0 , i32 3 >
1012
1012
ret <2 x i64 > %shuffle
1013
1013
}
@@ -1040,7 +1040,7 @@ define <2 x i64> @insert_reg_hi_v2i64(i64 %a, <2 x i64> %b) {
1040
1040
; AVX: # %bb.0:
1041
1041
; AVX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
1042
1042
; AVX-NEXT: retq
1043
- %v = insertelement <2 x i64 > undef , i64 %a , i32 0
1043
+ %v = insertelement <2 x i64 > poison , i64 %a , i32 0
1044
1044
%shuffle = shufflevector <2 x i64 > %v , <2 x i64 > %b , <2 x i32 > <i32 2 , i32 0 >
1045
1045
ret <2 x i64 > %shuffle
1046
1046
}
@@ -1074,7 +1074,7 @@ define <2 x i64> @insert_mem_hi_v2i64(ptr %ptr, <2 x i64> %b) {
1074
1074
; AVX-NEXT: vpinsrq $1, (%rdi), %xmm0, %xmm0
1075
1075
; AVX-NEXT: retq
1076
1076
%a = load i64 , ptr %ptr
1077
- %v = insertelement <2 x i64 > undef , i64 %a , i32 0
1077
+ %v = insertelement <2 x i64 > poison , i64 %a , i32 0
1078
1078
%shuffle = shufflevector <2 x i64 > %v , <2 x i64 > %b , <2 x i32 > <i32 2 , i32 0 >
1079
1079
ret <2 x i64 > %shuffle
1080
1080
}
@@ -1104,7 +1104,7 @@ define <2 x double> @insert_reg_lo_v2f64(double %a, <2 x double> %b) {
1104
1104
; AVX: # %bb.0:
1105
1105
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
1106
1106
; AVX-NEXT: retq
1107
- %v = insertelement <2 x double > undef , double %a , i32 0
1107
+ %v = insertelement <2 x double > poison , double %a , i32 0
1108
1108
%shuffle = shufflevector <2 x double > %v , <2 x double > %b , <2 x i32 > <i32 0 , i32 3 >
1109
1109
ret <2 x double > %shuffle
1110
1110
}
@@ -1120,7 +1120,7 @@ define <2 x double> @insert_mem_lo_v2f64(ptr %ptr, <2 x double> %b) {
1120
1120
; AVX-NEXT: vmovlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
1121
1121
; AVX-NEXT: retq
1122
1122
%a = load double , ptr %ptr
1123
- %v = insertelement <2 x double > undef , double %a , i32 0
1123
+ %v = insertelement <2 x double > poison , double %a , i32 0
1124
1124
%shuffle = shufflevector <2 x double > %v , <2 x double > %b , <2 x i32 > <i32 0 , i32 3 >
1125
1125
ret <2 x double > %shuffle
1126
1126
}
@@ -1136,7 +1136,7 @@ define <2 x double> @insert_reg_hi_v2f64(double %a, <2 x double> %b) {
1136
1136
; AVX: # %bb.0:
1137
1137
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
1138
1138
; AVX-NEXT: retq
1139
- %v = insertelement <2 x double > undef , double %a , i32 0
1139
+ %v = insertelement <2 x double > poison , double %a , i32 0
1140
1140
%shuffle = shufflevector <2 x double > %v , <2 x double > %b , <2 x i32 > <i32 2 , i32 0 >
1141
1141
ret <2 x double > %shuffle
1142
1142
}
@@ -1152,7 +1152,7 @@ define <2 x double> @insert_mem_hi_v2f64(ptr %ptr, <2 x double> %b) {
1152
1152
; AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1153
1153
; AVX-NEXT: retq
1154
1154
%a = load double , ptr %ptr
1155
- %v = insertelement <2 x double > undef , double %a , i32 0
1155
+ %v = insertelement <2 x double > poison , double %a , i32 0
1156
1156
%shuffle = shufflevector <2 x double > %v , <2 x double > %b , <2 x i32 > <i32 2 , i32 0 >
1157
1157
ret <2 x double > %shuffle
1158
1158
}
@@ -1182,8 +1182,8 @@ define <2 x double> @insert_dup_reg_v2f64(double %a) {
1182
1182
; AVX: # %bb.0:
1183
1183
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
1184
1184
; AVX-NEXT: retq
1185
- %v = insertelement <2 x double > undef , double %a , i32 0
1186
- %shuffle = shufflevector <2 x double > %v , <2 x double > undef , <2 x i32 > <i32 0 , i32 0 >
1185
+ %v = insertelement <2 x double > poison , double %a , i32 0
1186
+ %shuffle = shufflevector <2 x double > %v , <2 x double > poison , <2 x i32 > <i32 0 , i32 0 >
1187
1187
ret <2 x double > %shuffle
1188
1188
}
1189
1189
@@ -1214,8 +1214,8 @@ define <2 x double> @insert_dup_mem_v2f64(ptr %ptr) {
1214
1214
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
1215
1215
; AVX-NEXT: retq
1216
1216
%a = load double , ptr %ptr
1217
- %v = insertelement <2 x double > undef , double %a , i32 0
1218
- %shuffle = shufflevector <2 x double > %v , <2 x double > undef , <2 x i32 > <i32 0 , i32 0 >
1217
+ %v = insertelement <2 x double > poison , double %a , i32 0
1218
+ %shuffle = shufflevector <2 x double > %v , <2 x double > poison , <2 x i32 > <i32 0 , i32 0 >
1219
1219
ret <2 x double > %shuffle
1220
1220
}
1221
1221
@@ -1246,7 +1246,7 @@ define <2 x double> @insert_dup_mem128_v2f64(ptr %ptr) nounwind {
1246
1246
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
1247
1247
; AVX-NEXT: retq
1248
1248
%v = load <2 x double >, ptr %ptr
1249
- %shuffle = shufflevector <2 x double > %v , <2 x double > undef , <2 x i32 > <i32 0 , i32 0 >
1249
+ %shuffle = shufflevector <2 x double > %v , <2 x double > poison , <2 x i32 > <i32 0 , i32 0 >
1250
1250
ret <2 x double > %shuffle
1251
1251
}
1252
1252
@@ -1263,8 +1263,8 @@ define <2 x i64> @insert_dup_mem_v2i64(ptr %ptr) {
1263
1263
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
1264
1264
; AVX-NEXT: retq
1265
1265
%tmp = load i64 , ptr %ptr , align 1
1266
- %tmp1 = insertelement <2 x i64 > undef , i64 %tmp , i32 0
1267
- %tmp2 = shufflevector <2 x i64 > %tmp1 , <2 x i64 > undef , <2 x i32 > zeroinitializer
1266
+ %tmp1 = insertelement <2 x i64 > poison , i64 %tmp , i32 0
1267
+ %tmp2 = shufflevector <2 x i64 > %tmp1 , <2 x i64 > poison , <2 x i32 > zeroinitializer
1268
1268
ret <2 x i64 > %tmp2
1269
1269
}
1270
1270
@@ -1281,7 +1281,7 @@ define <2 x double> @shuffle_mem_v2f64_10(ptr %ptr) {
1281
1281
; AVX-NEXT: retq
1282
1282
1283
1283
%a = load <2 x double >, ptr %ptr
1284
- %shuffle = shufflevector <2 x double > %a , <2 x double > undef , <2 x i32 > <i32 1 , i32 0 >
1284
+ %shuffle = shufflevector <2 x double > %a , <2 x double > poison , <2 x i32 > <i32 1 , i32 0 >
1285
1285
ret <2 x double > %shuffle
1286
1286
}
1287
1287
0 commit comments