Skip to content

Commit 85460a2

Browse files
committed
[X86][SSE] Move unpack(hop,hop) fold from foldShuffleOfHorizOp to combineTargetShuffle
By moving this after more of the shuffle canonicalization we reduce the demanded vector elts, avoiding a few unnecessary copies/moves etc.
1 parent 6f5670a commit 85460a2

File tree

2 files changed

+112
-114
lines changed

2 files changed

+112
-114
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -37566,6 +37566,29 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
3756637566
}
3756737567
return SDValue();
3756837568
}
37569+
case X86ISD::UNPCKL:
37570+
case X86ISD::UNPCKH: {
37571+
// unpcklo(hop(x,y),hop(z,w)) -> permute(hop(x,z)).
37572+
// unpckhi(hop(x,y),hop(z,w)) -> permute(hop(y,w)).
37573+
// Don't fold if hop(x,y) == hop(z,w).
37574+
// TODO: Merge this into canonicalizeShuffleMaskWithHorizOp?
37575+
SDValue N0 = N.getOperand(0);
37576+
SDValue N1 = N.getOperand(1);
37577+
if (VT.getScalarSizeInBits() == 32 && N0 != N1 &&
37578+
N0.getOpcode() == N1.getOpcode() && isHorizOp(N0.getOpcode())) {
37579+
unsigned LoHi = Opcode == X86ISD::UNPCKL ? 0 : 1;
37580+
SDValue Res = DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(LoHi),
37581+
N1.getOperand(LoHi));
37582+
// Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
37583+
// combining and domain handling will simplify this later on.
37584+
EVT ShuffleVT = VT.changeVectorElementType(MVT::f32);
37585+
Res = DAG.getBitcast(ShuffleVT, Res);
37586+
Res = DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
37587+
getV4X86ShuffleImm8ForMask({0, 2, 1, 3}, DL, DAG));
37588+
return DAG.getBitcast(VT, Res);
37589+
}
37590+
return SDValue();
37591+
}
3756937592
case X86ISD::VPERMI: {
3757037593
// vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
3757137594
// TODO: Remove when we have preferred domains in combineX86ShuffleChain.
@@ -38071,38 +38094,15 @@ static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
3807138094
// TODO: Merge this into canonicalizeShuffleMaskWithHorizOp.
3807238095
static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
3807338096
unsigned Opcode = N->getOpcode();
38074-
if (Opcode != X86ISD::UNPCKL && Opcode != X86ISD::UNPCKH)
38075-
if (Opcode != X86ISD::SHUFP)
38076-
return SDValue();
38097+
if (Opcode != X86ISD::SHUFP)
38098+
return SDValue();
3807738099

3807838100
EVT VT = N->getValueType(0);
3807938101
SDValue HOp = N->getOperand(0);
3808038102
if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
3808138103
HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
3808238104
return SDValue();
3808338105

38084-
// unpcklo(hop(x,y),hop(z,w)) -> permute(hop(x,z)).
38085-
// unpckhi(hop(x,y),hop(z,w)) -> permute(hop(y,w)).
38086-
// Don't fold if hop(x,y) == hop(z,w).
38087-
if (Opcode == X86ISD::UNPCKL || Opcode == X86ISD::UNPCKH) {
38088-
SDValue HOp2 = N->getOperand(1);
38089-
if (HOp.getOpcode() != HOp2.getOpcode() || VT.getScalarSizeInBits() != 32)
38090-
return SDValue();
38091-
if (HOp == HOp2)
38092-
return SDValue();
38093-
SDLoc DL(HOp);
38094-
unsigned LoHi = Opcode == X86ISD::UNPCKL ? 0 : 1;
38095-
SDValue Res = DAG.getNode(HOp.getOpcode(), DL, VT, HOp.getOperand(LoHi),
38096-
HOp2.getOperand(LoHi));
38097-
// Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
38098-
// combining and domain handling will simplify this later on.
38099-
EVT ShuffleVT = VT.changeVectorElementType(MVT::f32);
38100-
Res = DAG.getBitcast(ShuffleVT, Res);
38101-
Res = DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
38102-
getV4X86ShuffleImm8ForMask({0, 2, 1, 3}, DL, DAG));
38103-
return DAG.getBitcast(VT, Res);
38104-
}
38105-
3810638106
// shufps(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
3810738107
// Don't fold if hop(x,y) == hop(z,w).
3810838108
if (Opcode == X86ISD::SHUFP) {

llvm/test/CodeGen/X86/horizontal-sum.ll

Lines changed: 87 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,8 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
9494
; SSSE3-SLOW-LABEL: pair_sum_v4i32_v4i32:
9595
; SSSE3-SLOW: # %bb.0:
9696
; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm0
97-
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
98-
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
97+
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
98+
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
9999
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
100100
; SSSE3-SLOW-NEXT: phaddd %xmm2, %xmm3
101101
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
@@ -115,9 +115,9 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
115115
; AVX1-SLOW-LABEL: pair_sum_v4i32_v4i32:
116116
; AVX1-SLOW: # %bb.0:
117117
; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
118-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
119-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
120-
; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
118+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
119+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
120+
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
121121
; AVX1-SLOW-NEXT: vphaddd %xmm2, %xmm2, %xmm1
122122
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
123123
; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -184,57 +184,55 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
184184
; SSSE3-SLOW: # %bb.0:
185185
; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm0
186186
; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm1
187-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
188-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3,1,3]
187+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
188+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
189189
; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0
190-
; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm1
191-
; SSSE3-SLOW-NEXT: haddps %xmm3, %xmm1
192-
; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm3
190+
; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm1
191+
; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm1
193192
; SSSE3-SLOW-NEXT: haddps %xmm4, %xmm5
194-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
195-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm5[3,1]
196-
; SSSE3-SLOW-NEXT: addps %xmm1, %xmm3
197-
; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
193+
; SSSE3-SLOW-NEXT: haddps %xmm3, %xmm2
194+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
195+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
196+
; SSSE3-SLOW-NEXT: addps %xmm2, %xmm1
197+
; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
198198
; SSSE3-SLOW-NEXT: haddps %xmm7, %xmm6
199199
; SSSE3-SLOW-NEXT: haddps %xmm6, %xmm6
200-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm6[0,3]
201-
; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm1
200+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,3]
202201
; SSSE3-SLOW-NEXT: retq
203202
;
204203
; SSSE3-FAST-LABEL: pair_sum_v8f32_v4f32:
205204
; SSSE3-FAST: # %bb.0:
206205
; SSSE3-FAST-NEXT: haddps %xmm1, %xmm0
207206
; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
208-
; SSSE3-FAST-NEXT: movaps %xmm2, %xmm1
209-
; SSSE3-FAST-NEXT: haddps %xmm3, %xmm1
210-
; SSSE3-FAST-NEXT: haddps %xmm2, %xmm3
207+
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
208+
; SSSE3-FAST-NEXT: haddps %xmm2, %xmm1
211209
; SSSE3-FAST-NEXT: haddps %xmm4, %xmm5
212-
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
213-
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm5[3,1]
214-
; SSSE3-FAST-NEXT: addps %xmm1, %xmm3
215-
; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
210+
; SSSE3-FAST-NEXT: haddps %xmm3, %xmm2
211+
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
212+
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
213+
; SSSE3-FAST-NEXT: addps %xmm2, %xmm1
214+
; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
216215
; SSSE3-FAST-NEXT: haddps %xmm6, %xmm6
217216
; SSSE3-FAST-NEXT: haddps %xmm7, %xmm7
218217
; SSSE3-FAST-NEXT: haddps %xmm7, %xmm6
219-
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm6[0,2]
220-
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
218+
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,2]
221219
; SSSE3-FAST-NEXT: retq
222220
;
223221
; AVX1-SLOW-LABEL: pair_sum_v8f32_v4f32:
224222
; AVX1-SLOW: # %bb.0:
225223
; AVX1-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
226-
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm8 = xmm0[0,2,1,3]
227224
; AVX1-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
228-
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,1]
229-
; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm8, %xmm0
230-
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm1
231-
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm2
232-
; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm3
233-
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm1[0,2],xmm2[0,1]
234-
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm3[0]
235-
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,1]
236-
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[1]
237-
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm4, %xmm1
225+
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
226+
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
227+
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
228+
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
229+
; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
230+
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
231+
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
232+
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
233+
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1]
234+
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
235+
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm3, %xmm1
238236
; AVX1-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
239237
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
240238
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -247,34 +245,34 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
247245
; AVX1-FAST-LABEL: pair_sum_v8f32_v4f32:
248246
; AVX1-FAST: # %bb.0:
249247
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
250-
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
251-
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm2, %xmm1
252-
; AVX1-FAST-NEXT: vhaddps %xmm2, %xmm2, %xmm2
253-
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm3, %xmm3
254-
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[1],xmm3[1],zero,zero
255-
; AVX1-FAST-NEXT: vhaddps %xmm4, %xmm4, %xmm3
248+
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm8
249+
; AVX1-FAST-NEXT: vhaddps %xmm2, %xmm2, %xmm1
250+
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm3, %xmm0
251+
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[1],xmm0[1],zero,zero
252+
; AVX1-FAST-NEXT: vhaddps %xmm4, %xmm4, %xmm1
256253
; AVX1-FAST-NEXT: vhaddps %xmm5, %xmm5, %xmm4
257-
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,1]
258-
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
259-
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[1,3]
260-
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[1]
261-
; AVX1-FAST-NEXT: vaddps %xmm2, %xmm1, %xmm1
262-
; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
263-
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
264-
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
254+
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm2, %xmm2
255+
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
256+
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
257+
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
258+
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[1]
259+
; AVX1-FAST-NEXT: vaddps %xmm0, %xmm2, %xmm0
260+
; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm1 = xmm8[0],xmm0[0]
261+
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
262+
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
265263
; AVX1-FAST-NEXT: vhaddps %xmm7, %xmm6, %xmm2
266264
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm2, %xmm2
267-
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
268-
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
265+
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
266+
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2]
269267
; AVX1-FAST-NEXT: retq
270268
;
271269
; AVX2-SLOW-LABEL: pair_sum_v8f32_v4f32:
272270
; AVX2-SLOW: # %bb.0:
273271
; AVX2-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
274-
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm8 = xmm0[0,2,1,3]
275272
; AVX2-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
276-
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,1]
277-
; AVX2-SLOW-NEXT: vaddps %xmm0, %xmm8, %xmm0
273+
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
274+
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
275+
; AVX2-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
278276
; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
279277
; AVX2-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
280278
; AVX2-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
@@ -364,17 +362,17 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
364362
; SSSE3-SLOW-LABEL: pair_sum_v8i32_v4i32:
365363
; SSSE3-SLOW: # %bb.0:
366364
; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm0
367-
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
368-
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
365+
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
366+
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
369367
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
370-
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm2
371368
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
372369
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
373-
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,1,0,1]
374-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1]
370+
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,1,0,1]
371+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
372+
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm2
375373
; SSSE3-SLOW-NEXT: movdqa %xmm2, %xmm1
376374
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
377-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[2,0]
375+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[2,0]
378376
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
379377
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
380378
; SSSE3-SLOW-NEXT: phaddd %xmm7, %xmm6
@@ -388,12 +386,12 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
388386
; SSSE3-FAST: # %bb.0:
389387
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm0
390388
; SSSE3-FAST-NEXT: phaddd %xmm0, %xmm0
391-
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
392389
; SSSE3-FAST-NEXT: movdqa %xmm5, %xmm1
393390
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm5
394391
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm4
395392
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm1
396393
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
394+
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
397395
; SSSE3-FAST-NEXT: movdqa %xmm2, %xmm3
398396
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm5[2,0]
399397
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[2,0]
@@ -409,20 +407,20 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
409407
; AVX1-SLOW-LABEL: pair_sum_v8i32_v4i32:
410408
; AVX1-SLOW: # %bb.0:
411409
; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
412-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
413-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,1]
414-
; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
415-
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm1
416-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,2,1,3]
417-
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm3
410+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
411+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
412+
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
413+
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
418414
; AVX1-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
419-
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
415+
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
416+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,1,3]
417+
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
420418
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
421-
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm5[6,7]
422-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,1]
423-
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[1],zero
419+
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm5[6,7]
420+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,3,1,1]
421+
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[1],zero
424422
; AVX1-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
425-
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm2, %xmm1
423+
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm3, %xmm1
426424
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
427425
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
428426
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -436,20 +434,20 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
436434
; AVX1-FAST: # %bb.0:
437435
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
438436
; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm8
439-
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm1
440-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
441-
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm2, %xmm2
442-
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm3, %xmm3
437+
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm2, %xmm1
438+
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm3, %xmm0
443439
; AVX1-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm4
444440
; AVX1-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm5
445-
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
446-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[0,0,0,0]
447-
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
448-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
449-
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5,6,7]
450-
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[1],zero
451-
; AVX1-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[3]
452-
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0
441+
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
442+
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
443+
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
444+
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[0,0,0,0]
445+
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm3[6,7]
446+
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
447+
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
448+
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm4[1],zero
449+
; AVX1-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[3]
450+
; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm2, %xmm0
453451
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm8[0],xmm0[0]
454452
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
455453
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -462,9 +460,9 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
462460
; AVX2-SLOW-LABEL: pair_sum_v8i32_v4i32:
463461
; AVX2-SLOW: # %bb.0:
464462
; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
465-
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
466-
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,1]
467-
; AVX2-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
463+
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
464+
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
465+
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
468466
; AVX2-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
469467
; AVX2-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
470468
; AVX2-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
@@ -1147,13 +1145,13 @@ define <4 x i32> @reduction_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32
11471145
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
11481146
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
11491147
; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
1150-
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
11511148
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
11521149
; AVX-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
11531150
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
11541151
; AVX-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
11551152
; AVX-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
11561153
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
1154+
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
11571155
; AVX-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
11581156
; AVX-FAST-NEXT: retq
11591157
%5 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %0)

0 commit comments

Comments
 (0)