Skip to content

Commit 61aab82

Browse files
authored
[X86] getFauxShuffleMask - insert_subvector - skip undemanded subvectors (#129042)
If the shuffle combine doesn't require the subvector of a insert_subvector node, we can just combine the base vector directly.
1 parent c19a303 commit 61aab82

11 files changed

+996
-960
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6145,6 +6145,13 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
61456145
EVT SubVT = Sub.getValueType();
61466146
unsigned NumSubElts = SubVT.getVectorNumElements();
61476147
uint64_t InsertIdx = N.getConstantOperandVal(2);
6148+
// Subvector isn't demanded - just return the base vector.
6149+
if (DemandedElts.extractBits(NumSubElts, InsertIdx) == 0) {
6150+
Mask.resize(NumElts, SM_SentinelUndef);
6151+
std::iota(Mask.begin(), Mask.end(), 0);
6152+
Ops.push_back(Src);
6153+
return true;
6154+
}
61486155
// Handle CONCAT(SUB0, SUB1).
61496156
// Limit this to vXi64 vector cases to make the most of cross lane shuffles.
61506157
if (Depth > 0 && InsertIdx == NumSubElts && NumElts == (2 * NumSubElts) &&

llvm/test/CodeGen/X86/avx-insertelt.ll

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -270,11 +270,10 @@ define <16 x i16> @insert_i16_firstelts(<16 x i16> %x, i16 %s) {
270270
; AVX2-LABEL: insert_i16_firstelts:
271271
; AVX2: # %bb.0:
272272
; AVX2-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
273+
; AVX2-NEXT: vmovd %edi, %xmm2
274+
; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
275+
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
273276
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
274-
; AVX2-NEXT: vmovd %edi, %xmm1
275-
; AVX2-NEXT: vpbroadcastw %xmm1, %ymm1
276-
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
277-
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
278277
; AVX2-NEXT: retq
279278
%i0 = insertelement <16 x i16> %x, i16 %s, i32 0
280279
%i1 = insertelement <16 x i16> %i0, i16 %s, i32 8

llvm/test/CodeGen/X86/avx512-insert-extract.ll

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -693,20 +693,18 @@ define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, ptr %ptr) nounwind {
693693
; KNL-LABEL: insert_v16i16:
694694
; KNL: ## %bb.0:
695695
; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
696+
; KNL-NEXT: vmovd %edi, %xmm2
697+
; KNL-NEXT: vpbroadcastw %xmm2, %ymm2
698+
; KNL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15]
696699
; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
697-
; KNL-NEXT: vmovd %edi, %xmm1
698-
; KNL-NEXT: vpbroadcastw %xmm1, %ymm1
699-
; KNL-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
700-
; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
701700
; KNL-NEXT: retq
702701
;
703702
; SKX-LABEL: insert_v16i16:
704703
; SKX: ## %bb.0:
705704
; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
705+
; SKX-NEXT: vpbroadcastw %edi, %ymm2
706+
; SKX-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15]
706707
; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
707-
; SKX-NEXT: vpbroadcastw %edi, %ymm1
708-
; SKX-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
709-
; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
710708
; SKX-NEXT: retq
711709
%val = load i16, ptr %ptr
712710
%r1 = insertelement <16 x i16> %x, i16 %val, i32 1

llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll

Lines changed: 751 additions & 734 deletions
Large diffs are not rendered by default.

llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -346,26 +346,26 @@ define void @store_i32_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
346346
; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
347347
; AVX-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0,0,2,3]
348348
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
349-
; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
350-
; AVX-NEXT: vshufpd {{.*#+}} ymm7 = ymm7[0,0,3,3]
351-
; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5,6],ymm5[7]
352-
; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm7 = mem[0,1,0,1]
353-
; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm0
354-
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3],ymm0[4],ymm5[5,6,7]
349+
; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
350+
; AVX-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
351+
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6],ymm5[7]
352+
; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm5 = mem[0,1,0,1]
353+
; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm7
354+
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0],ymm4[1,2,3],ymm7[4],ymm4[5,6,7]
355355
; AVX-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,u,u,2,u,u,u,7]
356-
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4,5,6,7]
357-
; AVX-NEXT: vpermilps {{.*#+}} ymm4 = ymm6[1,u,u,u,6,u,u,u]
358-
; AVX-NEXT: vbroadcastss 8(%rcx), %ymm5
359-
; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
360-
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5],ymm1[6,7]
361-
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3,4,5],ymm7[6],ymm1[7]
362-
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
363-
; AVX-NEXT: vbroadcastss 12(%rsi), %xmm3
364-
; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
365-
; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm7[3]
366-
; AVX-NEXT: vmovaps %xmm2, 64(%r9)
367-
; AVX-NEXT: vmovaps %ymm0, (%r9)
368-
; AVX-NEXT: vmovaps %ymm1, 32(%r9)
356+
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6,7]
357+
; AVX-NEXT: vpermilps {{.*#+}} ymm1 = ymm6[1,u,u,u,6,u,u,u]
358+
; AVX-NEXT: vbroadcastss 8(%rcx), %ymm6
359+
; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[1],ymm6[1],ymm1[4],ymm6[4],ymm1[5],ymm6[5]
360+
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
361+
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4,5],ymm5[6],ymm0[7]
362+
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,3],xmm3[3,3]
363+
; AVX-NEXT: vbroadcastss 12(%rsi), %xmm2
364+
; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
365+
; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[3]
366+
; AVX-NEXT: vmovaps %xmm1, 64(%r9)
367+
; AVX-NEXT: vmovaps %ymm4, (%r9)
368+
; AVX-NEXT: vmovaps %ymm0, 32(%r9)
369369
; AVX-NEXT: vzeroupper
370370
; AVX-NEXT: retq
371371
;

llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -473,7 +473,7 @@ define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
473473
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm8
474474
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm9
475475
; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
476-
; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0],ymm8[2,1],ymm10[6,4],ymm8[6,5]
476+
; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0],ymm1[2,1],ymm10[6,4],ymm1[6,5]
477477
; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
478478
; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm7[1],ymm5[1],ymm7[3],ymm5[3]
479479
; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[1,1],ymm6[2,0],ymm5[5,5],ymm6[6,4]

llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -62,16 +62,16 @@ define void @store_i64_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
6262
; AVX2-NEXT: vmovaps (%rcx), %xmm2
6363
; AVX2-NEXT: vmovaps (%r8), %xmm3
6464
; AVX2-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
65-
; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
66-
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
67-
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
68-
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
69-
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
65+
; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4
66+
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
67+
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
68+
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
69+
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
7070
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
71-
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
72-
; AVX2-NEXT: vmovaps %xmm1, 64(%r9)
71+
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
72+
; AVX2-NEXT: vmovaps %xmm2, 64(%r9)
7373
; AVX2-NEXT: vmovaps %ymm0, (%r9)
74-
; AVX2-NEXT: vmovaps %ymm4, 32(%r9)
74+
; AVX2-NEXT: vmovaps %ymm1, 32(%r9)
7575
; AVX2-NEXT: vzeroupper
7676
; AVX2-NEXT: retq
7777
;
@@ -82,16 +82,16 @@ define void @store_i64_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
8282
; AVX2-FP-NEXT: vmovaps (%rcx), %xmm2
8383
; AVX2-FP-NEXT: vmovaps (%r8), %xmm3
8484
; AVX2-FP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
85-
; AVX2-FP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
86-
; AVX2-FP-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
87-
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
88-
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
89-
; AVX2-FP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
85+
; AVX2-FP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4
86+
; AVX2-FP-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
87+
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
88+
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
89+
; AVX2-FP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
9090
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
91-
; AVX2-FP-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
92-
; AVX2-FP-NEXT: vmovaps %xmm1, 64(%r9)
91+
; AVX2-FP-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
92+
; AVX2-FP-NEXT: vmovaps %xmm2, 64(%r9)
9393
; AVX2-FP-NEXT: vmovaps %ymm0, (%r9)
94-
; AVX2-FP-NEXT: vmovaps %ymm4, 32(%r9)
94+
; AVX2-FP-NEXT: vmovaps %ymm1, 32(%r9)
9595
; AVX2-FP-NEXT: vzeroupper
9696
; AVX2-FP-NEXT: retq
9797
;
@@ -102,16 +102,16 @@ define void @store_i64_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
102102
; AVX2-FCP-NEXT: vmovaps (%rcx), %xmm2
103103
; AVX2-FCP-NEXT: vmovaps (%r8), %xmm3
104104
; AVX2-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
105-
; AVX2-FCP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
106-
; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
107-
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
108-
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
109-
; AVX2-FCP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
105+
; AVX2-FCP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4
106+
; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
107+
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
108+
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
109+
; AVX2-FCP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
110110
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
111-
; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
112-
; AVX2-FCP-NEXT: vmovaps %xmm1, 64(%r9)
111+
; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
112+
; AVX2-FCP-NEXT: vmovaps %xmm2, 64(%r9)
113113
; AVX2-FCP-NEXT: vmovaps %ymm0, (%r9)
114-
; AVX2-FCP-NEXT: vmovaps %ymm4, 32(%r9)
114+
; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%r9)
115115
; AVX2-FCP-NEXT: vzeroupper
116116
; AVX2-FCP-NEXT: retq
117117
;

llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -79,24 +79,24 @@ define void @store_i64_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
7979
; AVX2-NEXT: vmovaps (%r8), %xmm2
8080
; AVX2-NEXT: vmovaps (%r9), %xmm3
8181
; AVX2-NEXT: vmovaps (%r10), %xmm4
82-
; AVX2-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
82+
; AVX2-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm5
8383
; AVX2-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
84-
; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm5
85-
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
86-
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,1]
84+
; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm6
85+
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3],ymm6[4,5,6,7]
86+
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
8787
; AVX2-NEXT: vbroadcastsd %xmm4, %ymm6
88-
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
89-
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
88+
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
89+
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm1[1],ymm5[3],ymm1[3]
9090
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
9191
; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
9292
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
93-
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
94-
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
95-
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
96-
; AVX2-NEXT: vmovaps %xmm1, 96(%rax)
97-
; AVX2-NEXT: vmovaps %ymm0, (%rax)
93+
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
94+
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
95+
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
96+
; AVX2-NEXT: vmovaps %xmm3, 96(%rax)
97+
; AVX2-NEXT: vmovaps %ymm1, (%rax)
9898
; AVX2-NEXT: vmovaps %ymm2, 64(%rax)
99-
; AVX2-NEXT: vmovaps %ymm5, 32(%rax)
99+
; AVX2-NEXT: vmovaps %ymm0, 32(%rax)
100100
; AVX2-NEXT: vzeroupper
101101
; AVX2-NEXT: retq
102102
;
@@ -109,24 +109,24 @@ define void @store_i64_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
109109
; AVX2-FP-NEXT: vmovaps (%r8), %xmm2
110110
; AVX2-FP-NEXT: vmovaps (%r9), %xmm3
111111
; AVX2-FP-NEXT: vmovaps (%r10), %xmm4
112-
; AVX2-FP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
112+
; AVX2-FP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm5
113113
; AVX2-FP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
114-
; AVX2-FP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm5
115-
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
116-
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,1]
114+
; AVX2-FP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm6
115+
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3],ymm6[4,5,6,7]
116+
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
117117
; AVX2-FP-NEXT: vbroadcastsd %xmm4, %ymm6
118-
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
119-
; AVX2-FP-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
118+
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
119+
; AVX2-FP-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm1[1],ymm5[3],ymm1[3]
120120
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
121121
; AVX2-FP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
122122
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
123-
; AVX2-FP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
124-
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
125-
; AVX2-FP-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
126-
; AVX2-FP-NEXT: vmovaps %xmm1, 96(%rax)
127-
; AVX2-FP-NEXT: vmovaps %ymm0, (%rax)
123+
; AVX2-FP-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
124+
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
125+
; AVX2-FP-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
126+
; AVX2-FP-NEXT: vmovaps %xmm3, 96(%rax)
127+
; AVX2-FP-NEXT: vmovaps %ymm1, (%rax)
128128
; AVX2-FP-NEXT: vmovaps %ymm2, 64(%rax)
129-
; AVX2-FP-NEXT: vmovaps %ymm5, 32(%rax)
129+
; AVX2-FP-NEXT: vmovaps %ymm0, 32(%rax)
130130
; AVX2-FP-NEXT: vzeroupper
131131
; AVX2-FP-NEXT: retq
132132
;
@@ -139,24 +139,24 @@ define void @store_i64_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
139139
; AVX2-FCP-NEXT: vmovaps (%r8), %xmm2
140140
; AVX2-FCP-NEXT: vmovaps (%r9), %xmm3
141141
; AVX2-FCP-NEXT: vmovaps (%r10), %xmm4
142-
; AVX2-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
142+
; AVX2-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm5
143143
; AVX2-FCP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
144-
; AVX2-FCP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm5
145-
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
146-
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,1]
144+
; AVX2-FCP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm6
145+
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3],ymm6[4,5,6,7]
146+
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
147147
; AVX2-FCP-NEXT: vbroadcastsd %xmm4, %ymm6
148-
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
149-
; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
148+
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
149+
; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm1[1],ymm5[3],ymm1[3]
150150
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
151151
; AVX2-FCP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
152152
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
153-
; AVX2-FCP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
154-
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
155-
; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
156-
; AVX2-FCP-NEXT: vmovaps %xmm1, 96(%rax)
157-
; AVX2-FCP-NEXT: vmovaps %ymm0, (%rax)
153+
; AVX2-FCP-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
154+
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
155+
; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
156+
; AVX2-FCP-NEXT: vmovaps %xmm3, 96(%rax)
157+
; AVX2-FCP-NEXT: vmovaps %ymm1, (%rax)
158158
; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%rax)
159-
; AVX2-FCP-NEXT: vmovaps %ymm5, 32(%rax)
159+
; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%rax)
160160
; AVX2-FCP-NEXT: vzeroupper
161161
; AVX2-FCP-NEXT: retq
162162
;

0 commit comments

Comments
 (0)