@@ -216,7 +216,7 @@ define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) #0 {
216
216
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i8> [[TMP3]] to <4 x i64>
217
217
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <4 x i64> [[TMP4]], zeroinitializer
218
218
; CHECK-NEXT: [[TMP6:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i64>
219
- ; CHECK-NEXT: [[TMP7:%.*]] = lshr <4 x i64> [[TMP6]], <i64 48, i64 48, i64 48, i64 48>
219
+ ; CHECK-NEXT: [[TMP7:%.*]] = lshr <4 x i64> [[TMP6]], splat ( i64 48)
220
220
; CHECK-NEXT: [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
221
221
; CHECK-NEXT: store <4 x i64> [[TMP7]], ptr @__msan_retval_tls, align 8
222
222
; CHECK-NEXT: ret <4 x i64> [[RES]]
@@ -885,9 +885,9 @@ define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8>
885
885
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
886
886
; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
887
887
; CHECK-NEXT: call void @llvm.donothing()
888
- ; CHECK-NEXT: [[TMP10:%.*]] = ashr <32 x i8> [[A2:%.*]], <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
888
+ ; CHECK-NEXT: [[TMP10:%.*]] = ashr <32 x i8> [[A2:%.*]], splat ( i8 7)
889
889
; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i8> [[TMP10]] to <32 x i1>
890
- ; CHECK-NEXT: [[TMP6:%.*]] = ashr <32 x i8> [[TMP4]], <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
890
+ ; CHECK-NEXT: [[TMP6:%.*]] = ashr <32 x i8> [[TMP4]], splat ( i8 7)
891
891
; CHECK-NEXT: [[TMP7:%.*]] = trunc <32 x i8> [[TMP6]] to <32 x i1>
892
892
; CHECK-NEXT: [[TMP8:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> [[TMP2]], <32 x i8> [[TMP9]]
893
893
; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i8> [[A1:%.*]], [[A0:%.*]]
@@ -1438,7 +1438,7 @@ define <2 x i64> @test_x86_avx2_psrlv_q_const() #0 {
1438
1438
; CHECK-NEXT: call void @llvm.donothing()
1439
1439
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> zeroinitializer, <2 x i64> <i64 1, i64 -1>)
1440
1440
; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
1441
- ; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> <i64 4, i64 4> , <2 x i64> <i64 1, i64 -1>)
1441
+ ; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> splat ( i64 4) , <2 x i64> <i64 1, i64 -1>)
1442
1442
; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr @__msan_retval_tls, align 8
1443
1443
; CHECK-NEXT: ret <2 x i64> [[RES]]
1444
1444
;
@@ -1471,7 +1471,7 @@ define <4 x i64> @test_x86_avx2_psrlv_q_256_const() #0 {
1471
1471
; CHECK-NEXT: call void @llvm.donothing()
1472
1472
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
1473
1473
; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
1474
- ; CHECK-NEXT: [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 4> , <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
1474
+ ; CHECK-NEXT: [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> splat ( i64 4) , <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
1475
1475
; CHECK-NEXT: store <4 x i64> [[TMP2]], ptr @__msan_retval_tls, align 8
1476
1476
; CHECK-NEXT: ret <4 x i64> [[RES]]
1477
1477
;
0 commit comments