Skip to content

[X86] Fold VPERMV3(WIDEN(X),M,WIDEN(Y)) -> VPERMV(CONCAT(X,Y),M') iff the CONCAT is free #122750

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jan 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 20 additions & 1 deletion llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41711,9 +41711,10 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
using namespace SDPatternMatch;

MVT VT = N.getSimpleValueType();
unsigned NumElts = VT.getVectorNumElements();

SmallVector<int, 4> Mask;
unsigned Opcode = N.getOpcode();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Expand Down Expand Up @@ -42436,6 +42437,24 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
/*IsMask=*/true);
return DAG.getNode(X86ISD::VPERMV, DL, VT, NewMask, N.getOperand(0));
}
// If sources are half width, then concat and use VPERMV with adjusted
// mask.
Comment on lines +42440 to +42441
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How we check the CONCAT is free?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

combineConcatVectorOps only combines the ops if it results in further concatenated combines (sequential loads/broadcasts, subvector/shuffle simplifications, concatenation of additional instructions etc.) - it never just returns a CONCAT_VECTORS node, as that is likely to break apart again in later folds.

SDValue Ops[2];
MVT HalfVT = VT.getHalfNumVectorElementsVT();
if (sd_match(V1,
m_InsertSubvector(m_Undef(), m_Value(Ops[0]), m_Zero())) &&
sd_match(V2,
m_InsertSubvector(m_Undef(), m_Value(Ops[1]), m_Zero())) &&
Ops[0].getValueType() == HalfVT && Ops[1].getValueType() == HalfVT) {
if (SDValue ConcatSrc =
combineConcatVectorOps(DL, VT, Ops, DAG, DCI, Subtarget)) {
for (int &M : Mask)
M = (M < (int)NumElts ? M : (M - (NumElts / 2)));
SDValue NewMask = getConstVector(Mask, MaskVT, DAG, DL,
/*IsMask=*/true);
return DAG.getNode(X86ISD::VPERMV, DL, VT, NewMask, ConcatSrc);
}
}
// Commute foldable source to the RHS.
if (isShuffleFoldableLoad(N.getOperand(0)) &&
!isShuffleFoldableLoad(N.getOperand(2))) {
Expand Down
69 changes: 12 additions & 57 deletions llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL,AVX512VL-FAST-ALL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL,AVX512VL-FAST-PERLANE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW,AVX512BW-FAST-ALL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW,AVX512BW-FAST-PERLANE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-FAST-ALL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-FAST-PERLANE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMI
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMI
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMIVL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMIVL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMI
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMI

; PR31551
; Pairs of shufflevector:trunc functions with functional equivalence.
Expand Down Expand Up @@ -74,13 +74,6 @@ define void @shuffle_v64i8_to_v32i8(ptr %L, ptr %S) nounwind {
; AVX512VBMI-NEXT: vpmovwb %zmm0, (%rsi)
; AVX512VBMI-NEXT: vzeroupper
; AVX512VBMI-NEXT: retq
;
; AVX512VBMIVL-LABEL: shuffle_v64i8_to_v32i8:
; AVX512VBMIVL: # %bb.0:
; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512VBMIVL-NEXT: vpmovwb %zmm0, (%rsi)
; AVX512VBMIVL-NEXT: vzeroupper
; AVX512VBMIVL-NEXT: retq
%vec = load <64 x i8>, ptr %L
%strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
store <32 x i8> %strided.vec, ptr %S
Expand Down Expand Up @@ -126,13 +119,6 @@ define void @trunc_v32i16_to_v32i8(ptr %L, ptr %S) nounwind {
; AVX512VBMI-NEXT: vpmovwb %zmm0, (%rsi)
; AVX512VBMI-NEXT: vzeroupper
; AVX512VBMI-NEXT: retq
;
; AVX512VBMIVL-LABEL: trunc_v32i16_to_v32i8:
; AVX512VBMIVL: # %bb.0:
; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512VBMIVL-NEXT: vpmovwb %zmm0, (%rsi)
; AVX512VBMIVL-NEXT: vzeroupper
; AVX512VBMIVL-NEXT: retq
%vec = load <64 x i8>, ptr %L
%bc = bitcast <64 x i8> %vec to <32 x i16>
%strided.vec = trunc <32 x i16> %bc to <32 x i8>
Expand Down Expand Up @@ -346,14 +332,6 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
; AVX512VBMI-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VBMI-NEXT: vzeroupper
; AVX512VBMI-NEXT: retq
;
; AVX512VBMIVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
; AVX512VBMIVL: # %bb.0:
; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} xmm1 = [1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,62]
; AVX512VBMIVL-NEXT: vpermb %zmm0, %zmm1, %zmm0
; AVX512VBMIVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VBMIVL-NEXT: vzeroupper
; AVX512VBMIVL-NEXT: retq
%res = shufflevector <64 x i8> %x, <64 x i8> %x, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 62>
ret <16 x i8> %res
}
Expand Down Expand Up @@ -406,12 +384,6 @@ define <32 x i8> @trunc_shuffle_v32i16_v32i8_ofs1(<32 x i16> %a0) {
; AVX512VBMI-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512VBMI-NEXT: vpmovwb %zmm0, %ymm0
; AVX512VBMI-NEXT: retq
;
; AVX512VBMIVL-LABEL: trunc_shuffle_v32i16_v32i8_ofs1:
; AVX512VBMIVL: # %bb.0:
; AVX512VBMIVL-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512VBMIVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512VBMIVL-NEXT: retq
%bc = bitcast <32 x i16> %a0 to <64 x i8>
%res = shufflevector <64 x i8> %bc, <64 x i8> poison, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
ret <32 x i8> %res
Expand Down Expand Up @@ -442,11 +414,9 @@ define <4 x double> @PR34175(ptr %p) {
;
; AVX512BW-LABEL: PR34175:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = [0,8,32,40,0,0,0,0]
; AVX512BW-NEXT: vmovdqu (%rdi), %ymm1
; AVX512BW-NEXT: vmovdqu 32(%rdi), %ymm2
; AVX512BW-NEXT: vpermt2w %zmm2, %zmm0, %zmm1
; AVX512BW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
; AVX512BW-NEXT: vpermw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512BW-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512BW-NEXT: retq
;
Expand All @@ -460,21 +430,11 @@ define <4 x double> @PR34175(ptr %p) {
;
; AVX512VBMI-LABEL: PR34175:
; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vmovq {{.*#+}} xmm0 = [0,8,32,40,0,0,0,0]
; AVX512VBMI-NEXT: vmovdqu (%rdi), %ymm1
; AVX512VBMI-NEXT: vmovdqu 32(%rdi), %ymm2
; AVX512VBMI-NEXT: vpermt2w %zmm2, %zmm0, %zmm1
; AVX512VBMI-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX512VBMI-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
; AVX512VBMI-NEXT: vpermw (%rdi), %zmm0, %zmm0
; AVX512VBMI-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VBMI-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512VBMI-NEXT: retq
;
; AVX512VBMIVL-LABEL: PR34175:
; AVX512VBMIVL: # %bb.0:
; AVX512VBMIVL-NEXT: vmovq {{.*#+}} xmm0 = [0,8,16,24,0,0,0,0]
; AVX512VBMIVL-NEXT: vpermw (%rdi), %zmm0, %zmm0
; AVX512VBMIVL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VBMIVL-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512VBMIVL-NEXT: retq
%v = load <32 x i16>, ptr %p, align 2
%shuf = shufflevector <32 x i16> %v, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
%tofp = uitofp <4 x i16> %shuf to <4 x double>
Expand All @@ -492,8 +452,3 @@ define <16 x i8> @trunc_v8i64_to_v8i8_return_v16i8(<8 x i64> %vec) nounwind {
ret <16 x i8> %result
}

;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; AVX512BW-FAST-ALL: {{.*}}
; AVX512BW-FAST-PERLANE: {{.*}}
; AVX512BWVL-FAST-ALL: {{.*}}
; AVX512BWVL-FAST-PERLANE: {{.*}}
76 changes: 32 additions & 44 deletions llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
Original file line number Diff line number Diff line change
Expand Up @@ -339,17 +339,14 @@ define void @store_i64_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm1
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm2
; AVX512-FCP-NEXT: vmovdqa (%rcx), %ymm3
; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX512-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,2,10,1,9,3,11]
; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm2, %zmm4
; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 64(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%r8)
; AVX512-FCP-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512-FCP-NEXT: vinserti64x4 $1, (%rdx), %zmm0, %zmm0
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,4,12,1,9,5,13]
; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [2,10,6,14,3,11,7,15]
; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
Expand Down Expand Up @@ -378,17 +375,14 @@ define void @store_i64_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %ymm3
; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX512DQ-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,2,10,1,9,3,11]
; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm2, %zmm4
; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 64(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%r8)
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, (%rdx), %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,4,12,1,9,5,13]
; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [2,10,6,14,3,11,7,15]
; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
Expand Down Expand Up @@ -417,17 +411,14 @@ define void @store_i64_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm1
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm2
; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm3
; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX512BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,2,10,1,9,3,11]
; AVX512BW-FCP-NEXT: vpermt2q %zmm5, %zmm2, %zmm4
; AVX512BW-FCP-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 64(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, (%r8)
; AVX512BW-FCP-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512BW-FCP-NEXT: vinserti64x4 $1, (%rdx), %zmm0, %zmm0
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,4,12,1,9,5,13]
; AVX512BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [2,10,6,14,3,11,7,15]
; AVX512BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
Expand Down Expand Up @@ -456,17 +447,14 @@ define void @store_i64_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm3
; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX512DQ-BW-FCP-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,2,10,1,9,3,11]
; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm5, %zmm2, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 64(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, (%r8)
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, (%rdx), %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,4,12,1,9,5,13]
; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [2,10,6,14,3,11,7,15]
; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <4 x i64>, ptr %in.vecptr0, align 64
Expand Down
Loading