Skip to content

Commit c9e0340

Browse files
RKSimonfrederik-h
authored andcommitted
[X86] combineConcatVectorOps - convert X86ISD::BLENDI concatenation to use combineConcatVectorOps recursion (llvm#131121)
Only concatenate X86ISD::BLENDI nodes if at least one operand is beneficial to concatenate Add AVX1/AVX2 handling to 256-bit BLENDI nodes (accounting for AVX2 v16i16 repeated mask requirements). Extend existing AVX512BW (which still always concats until I get get rid of the remaining regressions) to handle AVX512F for 32/64-bit scalar types.
1 parent 5fd3da4 commit c9e0340

10 files changed

+1399
-1390
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 36 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -58512,16 +58512,42 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
5851258512
}
5851358513
break;
5851458514
case X86ISD::BLENDI:
58515-
if (NumOps == 2 && VT.is512BitVector() && Subtarget.useBWIRegs()) {
58516-
unsigned NumElts = VT.getVectorNumElements();
58517-
APInt Mask = getBLENDIBlendMask(Ops[0]).zext(NumElts);
58518-
Mask.insertBits(getBLENDIBlendMask(Ops[1]), NumElts / 2);
58519-
MVT MaskSVT = MVT::getIntegerVT(NumElts);
58520-
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
58521-
SDValue Sel =
58522-
DAG.getBitcast(MaskVT, DAG.getConstant(Mask, DL, MaskSVT));
58523-
return DAG.getSelect(DL, VT, Sel, ConcatSubOperand(VT, Ops, 1),
58524-
ConcatSubOperand(VT, Ops, 0));
58515+
if (VT.is256BitVector() && NumOps == 2 &&
58516+
(EltSizeInBits >= 32 ||
58517+
(Subtarget.hasInt256() &&
58518+
Ops[0].getOperand(2) == Ops[1].getOperand(2)))) {
58519+
SDValue Concat0 = CombineSubOperand(VT, Ops, 0);
58520+
SDValue Concat1 = CombineSubOperand(VT, Ops, 1);
58521+
if (Concat0 || Concat1) {
58522+
unsigned NumElts = VT.getVectorNumElements();
58523+
APInt Mask = getBLENDIBlendMask(Ops[0]).zext(NumElts);
58524+
Mask.insertBits(getBLENDIBlendMask(Ops[1]), NumElts / 2);
58525+
Mask = Mask.zextOrTrunc(8);
58526+
return DAG.getNode(Op0.getOpcode(), DL, VT,
58527+
Concat0 ? Concat0 : ConcatSubOperand(VT, Ops, 0),
58528+
Concat1 ? Concat1 : ConcatSubOperand(VT, Ops, 1),
58529+
DAG.getTargetConstant(Mask, DL, MVT::i8));
58530+
}
58531+
}
58532+
// TODO: BWI targets should only use CombineSubOperand.
58533+
if (((VT.is256BitVector() && Subtarget.hasVLX()) ||
58534+
(VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
58535+
(EltSizeInBits >= 32 || Subtarget.useBWIRegs())) {
58536+
SDValue Concat0 = CombineSubOperand(VT, Ops, 0);
58537+
SDValue Concat1 = CombineSubOperand(VT, Ops, 1);
58538+
if (Concat0 || Concat1 || Subtarget.useBWIRegs()) {
58539+
unsigned NumElts = VT.getVectorNumElements();
58540+
APInt Mask = getBLENDIBlendMask(Ops[0]).zext(NumElts);
58541+
for (unsigned I = 1; I != NumOps; ++I)
58542+
Mask.insertBits(getBLENDIBlendMask(Ops[I]), I * (NumElts / NumOps));
58543+
MVT MaskSVT = MVT::getIntegerVT(NumElts);
58544+
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
58545+
SDValue Sel =
58546+
DAG.getBitcast(MaskVT, DAG.getConstant(Mask, DL, MaskSVT));
58547+
Concat0 = Concat0 ? Concat0 : ConcatSubOperand(VT, Ops, 0);
58548+
Concat1 = Concat1 ? Concat1 : ConcatSubOperand(VT, Ops, 1);
58549+
return DAG.getSelect(DL, VT, Sel, Concat1, Concat0);
58550+
}
5852558551
}
5852658552
break;
5852758553
case ISD::VSELECT:

llvm/test/CodeGen/X86/masked_store.ll

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -5882,9 +5882,9 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
58825882
;
58835883
; AVX1-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
58845884
; AVX1: ## %bb.0:
5885-
; AVX1-NEXT: vmovdqa (%rsi), %ymm0
5886-
; AVX1-NEXT: vmovaps 32(%rsi), %ymm1
5887-
; AVX1-NEXT: vmovaps 64(%rsi), %ymm2
5885+
; AVX1-NEXT: vmovaps (%rsi), %ymm1
5886+
; AVX1-NEXT: vmovaps 32(%rsi), %ymm2
5887+
; AVX1-NEXT: vmovdqa 64(%rsi), %ymm0
58885888
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
58895889
; AVX1-NEXT: vpcmpgtd 48(%rdi), %xmm3, %xmm4
58905890
; AVX1-NEXT: vpcmpgtd 32(%rdi), %xmm3, %xmm5
@@ -5894,25 +5894,28 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
58945894
; AVX1-NEXT: vpcmpgtd 64(%rdi), %xmm3, %xmm6
58955895
; AVX1-NEXT: vpcmpgtd 16(%rdi), %xmm3, %xmm7
58965896
; AVX1-NEXT: vpcmpgtd (%rdi), %xmm3, %xmm8
5897-
; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm3[2,3],xmm8[4,5],xmm3[6,7]
5898-
; AVX1-NEXT: vpslld $31, %xmm8, %xmm8
5899-
; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm3[2,3],xmm7[4,5],xmm3[6,7]
5900-
; AVX1-NEXT: vpslld $31, %xmm7, %xmm7
5901-
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7
5902-
; AVX1-NEXT: vmaskmovps %ymm0, %ymm7, (%rdx)
5903-
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
5904-
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
5905-
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7]
5906-
; AVX1-NEXT: vpslld $31, %xmm5, %xmm5
5907-
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
5908-
; AVX1-NEXT: vmaskmovps %ymm2, %ymm0, 64(%rdx)
5909-
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
5910-
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
5897+
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
5898+
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
5899+
; AVX1-NEXT: vpslld $31, %xmm4, %xmm4
5900+
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
5901+
; AVX1-NEXT: vpslld $31, %xmm3, %xmm3
5902+
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
5903+
; AVX1-NEXT: vmaskmovps %ymm2, %ymm3, 32(%rdx)
5904+
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm2
5905+
; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
5906+
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
5907+
; AVX1-NEXT: vpslld $31, %xmm2, %xmm4
5908+
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
59115909
; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
5912-
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
5913-
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
5914-
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
5915-
; AVX1-NEXT: vmaskmovps %ymm1, %ymm0, 32(%rdx)
5910+
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
5911+
; AVX1-NEXT: vmaskmovps %ymm1, %ymm2, (%rdx)
5912+
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm1
5913+
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
5914+
; AVX1-NEXT: vpslld $31, %xmm1, %xmm2
5915+
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
5916+
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
5917+
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
5918+
; AVX1-NEXT: vmaskmovps %ymm0, %ymm1, 64(%rdx)
59165919
; AVX1-NEXT: vzeroupper
59175920
; AVX1-NEXT: retq
59185921
;

llvm/test/CodeGen/X86/vector-fshl-rot-256.ll

Lines changed: 19 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -139,26 +139,22 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
139139
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
140140
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
141141
; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5
142-
; AVX1-NEXT: vpmuludq %xmm2, %xmm6, %xmm2
143-
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
144-
; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
145-
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,2,2]
146-
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
147-
; AVX1-NEXT: vpor %xmm6, %xmm2, %xmm2
148142
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
149143
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
150144
; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
151145
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
152146
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
153147
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
154148
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
149+
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3
150+
; AVX1-NEXT: vpmuludq %xmm2, %xmm6, %xmm2
155151
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
156-
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
157-
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
158-
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
159-
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
160-
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
161152
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
153+
; AVX1-NEXT: vmovshdup {{.*#+}} ymm1 = ymm0[1,1,3,3,5,5,7,7]
154+
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
155+
; AVX1-NEXT: vmovsldup {{.*#+}} ymm2 = ymm3[0,0,2,2,4,4,6,6]
156+
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
157+
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
162158
; AVX1-NEXT: retq
163159
;
164160
; AVX2-LABEL: var_funnnel_v8i32:
@@ -1085,24 +1081,20 @@ define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x) nounwind {
10851081
define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
10861082
; AVX1-LABEL: constant_funnnel_v8i32:
10871083
; AVX1: # %bb.0:
1088-
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1089-
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
1090-
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1084+
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
10911085
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
1092-
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
1093-
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
1094-
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
1095-
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
1096-
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
1097-
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
1098-
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1086+
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
1087+
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
1088+
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
1089+
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
10991090
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1100-
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
1101-
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
1102-
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
1103-
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1104-
; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
1105-
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1091+
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1092+
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1093+
; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7]
1094+
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
1095+
; AVX1-NEXT: vmovsldup {{.*#+}} ymm1 = ymm1[0,0,2,2,4,4,6,6]
1096+
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
1097+
; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
11061098
; AVX1-NEXT: retq
11071099
;
11081100
; AVX2-LABEL: constant_funnnel_v8i32:

llvm/test/CodeGen/X86/vector-fshr-rot-256.ll

Lines changed: 19 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -146,12 +146,6 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
146146
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
147147
; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[1,1,3,3]
148148
; AVX1-NEXT: vpmuludq %xmm6, %xmm8, %xmm6
149-
; AVX1-NEXT: vpmuludq %xmm2, %xmm7, %xmm2
150-
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
151-
; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7]
152-
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,2,2]
153-
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7]
154-
; AVX1-NEXT: vpor %xmm7, %xmm2, %xmm2
155149
; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
156150
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
157151
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
@@ -160,13 +154,15 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
160154
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
161155
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
162156
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
157+
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
158+
; AVX1-NEXT: vpmuludq %xmm2, %xmm7, %xmm2
163159
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
164-
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
165-
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
166-
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
167-
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
168-
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
169160
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
161+
; AVX1-NEXT: vmovshdup {{.*#+}} ymm1 = ymm0[1,1,3,3,5,5,7,7]
162+
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
163+
; AVX1-NEXT: vmovsldup {{.*#+}} ymm2 = ymm3[0,0,2,2,4,4,6,6]
164+
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
165+
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
170166
; AVX1-NEXT: retq
171167
;
172168
; AVX2-LABEL: var_funnnel_v8i32:
@@ -1136,24 +1132,20 @@ define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x) nounwind {
11361132
define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
11371133
; AVX1-LABEL: constant_funnnel_v8i32:
11381134
; AVX1: # %bb.0:
1139-
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1140-
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
1141-
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1135+
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
11421136
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
1143-
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
1144-
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
1145-
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
1146-
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
1147-
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
1148-
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
1149-
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1137+
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
1138+
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
1139+
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
1140+
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
11501141
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1151-
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
1152-
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
1153-
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
1154-
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1155-
; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
1156-
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1142+
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1143+
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1144+
; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7]
1145+
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
1146+
; AVX1-NEXT: vmovsldup {{.*#+}} ymm1 = ymm1[0,0,2,2,4,4,6,6]
1147+
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
1148+
; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
11571149
; AVX1-NEXT: retq
11581150
;
11591151
; AVX2-LABEL: constant_funnnel_v8i32:

0 commit comments

Comments
 (0)